summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Changes.txt445
-rw-r--r--INSTALL.txt17
-rw-r--r--PKG-INFO113
-rwxr-xr-xbin/fsck.s3ql23
-rwxr-xr-xbin/mkfs.s3ql23
-rwxr-xr-xbin/mount.s3ql23
-rwxr-xr-xbin/s3qladm23
-rwxr-xr-xbin/s3qlcp23
-rwxr-xr-xbin/s3qlctrl23
-rwxr-xr-xbin/s3qllock23
-rwxr-xr-xbin/s3qlrm23
-rwxr-xr-xbin/s3qlstat23
-rwxr-xr-xbin/umount.s3ql23
-rwxr-xr-xcontrib/benchmark.py135
-rw-r--r--contrib/expire_backups.1167
-rwxr-xr-xcontrib/expire_backups.py286
-rwxr-xr-xcontrib/make_dummy.py119
-rw-r--r--contrib/pcp.186
-rwxr-xr-xcontrib/pcp.py100
-rw-r--r--contrib/s3ql.conf33
-rwxr-xr-xcontrib/s3ql_backup.sh58
-rw-r--r--doc/html/.buildinfo4
-rw-r--r--doc/html/_sources/about.txt91
-rw-r--r--doc/html/_sources/adm.txt83
-rw-r--r--doc/html/_sources/backends.txt292
-rw-r--r--doc/html/_sources/contrib.txt87
-rw-r--r--doc/html/_sources/fsck.txt16
-rw-r--r--doc/html/_sources/index.txt23
-rw-r--r--doc/html/_sources/installation.txt101
-rw-r--r--doc/html/_sources/issues.txt89
-rw-r--r--doc/html/_sources/man/adm.txt66
-rw-r--r--doc/html/_sources/man/cp.txt100
-rw-r--r--doc/html/_sources/man/ctrl.txt69
-rw-r--r--doc/html/_sources/man/expire_backups.txt106
-rw-r--r--doc/html/_sources/man/fsck.txt44
-rw-r--r--doc/html/_sources/man/index.txt23
-rw-r--r--doc/html/_sources/man/lock.txt78
-rw-r--r--doc/html/_sources/man/mkfs.txt43
-rw-r--r--doc/html/_sources/man/mount.txt48
-rw-r--r--doc/html/_sources/man/pcp.txt46
-rw-r--r--doc/html/_sources/man/rm.txt41
-rw-r--r--doc/html/_sources/man/stat.txt41
-rw-r--r--doc/html/_sources/man/umount.txt44
-rw-r--r--doc/html/_sources/mkfs.txt20
-rw-r--r--doc/html/_sources/mount.txt175
-rw-r--r--doc/html/_sources/resources.txt22
-rw-r--r--doc/html/_sources/special.txt114
-rw-r--r--doc/html/_sources/tips.txt81
-rw-r--r--doc/html/_sources/umount.txt31
-rw-r--r--doc/html/_static/ajax-loader.gifbin0 -> 673 bytes
-rw-r--r--doc/html/_static/basic.css528
-rw-r--r--doc/html/_static/comment-bright.pngbin0 -> 3500 bytes
-rw-r--r--doc/html/_static/comment-close.pngbin0 -> 3578 bytes
-rw-r--r--doc/html/_static/comment.pngbin0 -> 3445 bytes
-rw-r--r--doc/html/_static/contents.pngbin0 -> 202 bytes
-rw-r--r--doc/html/_static/doctools.js247
-rw-r--r--doc/html/_static/down-pressed.pngbin0 -> 368 bytes
-rw-r--r--doc/html/_static/down.pngbin0 -> 363 bytes
-rw-r--r--doc/html/_static/file.pngbin0 -> 392 bytes
-rw-r--r--doc/html/_static/jquery.js154
-rw-r--r--doc/html/_static/minus.pngbin0 -> 199 bytes
-rw-r--r--doc/html/_static/navigation.pngbin0 -> 218 bytes
-rw-r--r--doc/html/_static/plus.pngbin0 -> 199 bytes
-rw-r--r--doc/html/_static/pygments.css62
-rw-r--r--doc/html/_static/searchtools.js515
-rw-r--r--doc/html/_static/sphinxdoc.css340
-rw-r--r--doc/html/_static/underscore.js16
-rw-r--r--doc/html/_static/up-pressed.pngbin0 -> 372 bytes
-rw-r--r--doc/html/_static/up.pngbin0 -> 363 bytes
-rw-r--r--doc/html/_static/websupport.js808
-rw-r--r--doc/html/about.html196
-rw-r--r--doc/html/adm.html217
-rw-r--r--doc/html/backends.html360
-rw-r--r--doc/html/contrib.html237
-rw-r--r--doc/html/fsck.html160
-rw-r--r--doc/html/index.html188
-rw-r--r--doc/html/installation.html200
-rw-r--r--doc/html/issues.html191
-rw-r--r--doc/html/man/adm.html239
-rw-r--r--doc/html/man/cp.html225
-rw-r--r--doc/html/man/ctrl.html208
-rw-r--r--doc/html/man/expire_backups.html242
-rw-r--r--doc/html/man/fsck.html227
-rw-r--r--doc/html/man/index.html148
-rw-r--r--doc/html/man/lock.html208
-rw-r--r--doc/html/man/mkfs.html239
-rw-r--r--doc/html/man/mount.html281
-rw-r--r--doc/html/man/pcp.html182
-rw-r--r--doc/html/man/rm.html181
-rw-r--r--doc/html/man/stat.html180
-rw-r--r--doc/html/man/umount.html190
-rw-r--r--doc/html/mkfs.html176
-rw-r--r--doc/html/mount.html407
-rw-r--r--doc/html/objects.invbin0 -> 505 bytes
-rw-r--r--doc/html/resources.html118
-rw-r--r--doc/html/search.html113
-rw-r--r--doc/html/searchindex.js1
-rw-r--r--doc/html/special.html280
-rw-r--r--doc/html/tips.html181
-rw-r--r--doc/html/umount.html156
-rw-r--r--doc/latex/Makefile64
-rw-r--r--doc/latex/fncychap.sty683
-rw-r--r--doc/latex/manual.aux366
-rw-r--r--doc/latex/manual.idx0
-rw-r--r--doc/latex/manual.log934
-rw-r--r--doc/latex/manual.out58
-rw-r--r--doc/latex/manual.tex2761
-rw-r--r--doc/latex/manual.toc142
-rw-r--r--doc/latex/python.ist11
-rw-r--r--doc/latex/sphinx.sty464
-rw-r--r--doc/latex/sphinxhowto.cls81
-rw-r--r--doc/latex/sphinxmanual.cls122
-rw-r--r--doc/latex/tabulary.sty452
-rw-r--r--doc/man/fsck.s3ql.1137
-rw-r--r--doc/man/mkfs.s3ql.1150
-rw-r--r--doc/man/mount.s3ql.1195
-rw-r--r--doc/man/s3qladm.1156
-rw-r--r--doc/man/s3qlcp.1146
-rw-r--r--doc/man/s3qlctrl.1133
-rw-r--r--doc/man/s3qllock.1118
-rw-r--r--doc/man/s3qlrm.190
-rw-r--r--doc/man/s3qlstat.189
-rw-r--r--doc/man/umount.s3ql.1100
-rw-r--r--doc/manual.pdfbin0 -> 289155 bytes
-rw-r--r--rst/_static/sphinxdoc.css340
-rw-r--r--rst/_templates/layout.html72
-rw-r--r--rst/about.rst91
-rw-r--r--rst/adm.rst83
-rw-r--r--rst/backends.rst292
-rw-r--r--rst/conf.py244
-rw-r--r--rst/contrib.rst87
-rw-r--r--rst/fsck.rst16
-rw-r--r--rst/include/about.rst11
-rw-r--r--rst/include/backends.rst28
-rw-r--r--rst/include/postman.rst22
-rw-r--r--rst/index.rst23
-rw-r--r--rst/installation.rst101
-rw-r--r--rst/issues.rst89
-rw-r--r--rst/man/adm.rst66
-rw-r--r--rst/man/cp.rst100
-rw-r--r--rst/man/ctrl.rst69
-rw-r--r--rst/man/expire_backups.rst106
-rw-r--r--rst/man/fsck.rst44
-rw-r--r--rst/man/index.rst23
-rw-r--r--rst/man/lock.rst78
-rw-r--r--rst/man/mkfs.rst43
-rw-r--r--rst/man/mount.rst48
-rw-r--r--rst/man/pcp.rst46
-rw-r--r--rst/man/rm.rst41
-rw-r--r--rst/man/stat.rst41
-rw-r--r--rst/man/umount.rst44
-rw-r--r--rst/mkfs.rst20
-rw-r--r--rst/mount.rst175
-rw-r--r--rst/resources.rst22
-rw-r--r--rst/special.rst114
-rw-r--r--rst/tips.rst81
-rw-r--r--rst/umount.rst31
-rw-r--r--setup.cfg5
-rwxr-xr-xsetup.py273
-rw-r--r--src/s3ql.egg-info/PKG-INFO113
-rw-r--r--src/s3ql.egg-info/SOURCES.txt229
-rw-r--r--src/s3ql.egg-info/dependency_links.txt1
-rw-r--r--src/s3ql.egg-info/entry_points.txt12
-rw-r--r--src/s3ql.egg-info/requires.txt5
-rw-r--r--src/s3ql.egg-info/top_level.txt1
-rw-r--r--src/s3ql.egg-info/zip-safe1
-rw-r--r--src/s3ql/__init__.py16
-rw-r--r--src/s3ql/backends/__init__.py11
-rw-r--r--src/s3ql/backends/boto/__init__.py358
-rw-r--r--src/s3ql/backends/boto/connection.py683
-rw-r--r--src/s3ql/backends/boto/exception.py305
-rw-r--r--src/s3ql/backends/boto/handler.py49
-rw-r--r--src/s3ql/backends/boto/pyami/__init__.py1
-rw-r--r--src/s3ql/backends/boto/pyami/config.py206
-rw-r--r--src/s3ql/backends/boto/resultset.py145
-rw-r--r--src/s3ql/backends/boto/s3/__init__.py32
-rw-r--r--src/s3ql/backends/boto/s3/acl.py165
-rw-r--r--src/s3ql/backends/boto/s3/bucket.py749
-rw-r--r--src/s3ql/backends/boto/s3/bucketlistresultset.py102
-rw-r--r--src/s3ql/backends/boto/s3/connection.py360
-rw-r--r--src/s3ql/backends/boto/s3/key.py901
-rw-r--r--src/s3ql/backends/boto/s3/prefix.py38
-rw-r--r--src/s3ql/backends/boto/s3/user.py52
-rw-r--r--src/s3ql/backends/boto/storage_uri.py274
-rw-r--r--src/s3ql/backends/boto/utils.py565
-rw-r--r--src/s3ql/backends/common.py690
-rw-r--r--src/s3ql/backends/ftp.py27
-rw-r--r--src/s3ql/backends/ftplib.py1038
-rw-r--r--src/s3ql/backends/local.py296
-rw-r--r--src/s3ql/backends/s3.py382
-rw-r--r--src/s3ql/backends/sftp.py349
-rw-r--r--src/s3ql/block_cache.py547
-rw-r--r--src/s3ql/cli/__init__.py12
-rw-r--r--src/s3ql/cli/adm.py315
-rw-r--r--src/s3ql/cli/cp.py102
-rw-r--r--src/s3ql/cli/ctrl.py120
-rw-r--r--src/s3ql/cli/fsck.py217
-rw-r--r--src/s3ql/cli/lock.py74
-rw-r--r--src/s3ql/cli/mkfs.py147
-rw-r--r--src/s3ql/cli/mount.py458
-rw-r--r--src/s3ql/cli/remove.py75
-rw-r--r--src/s3ql/cli/statfs.py85
-rw-r--r--src/s3ql/cli/umount.py204
-rw-r--r--src/s3ql/common.py664
-rw-r--r--src/s3ql/daemonize.py94
-rw-r--r--src/s3ql/database.py260
-rw-r--r--src/s3ql/fs.py1054
-rw-r--r--src/s3ql/fsck.py481
-rw-r--r--src/s3ql/inode_cache.py286
-rw-r--r--src/s3ql/multi_lock.py85
-rw-r--r--src/s3ql/ordered_dict.py219
-rw-r--r--src/s3ql/parse_args.py207
-rw-r--r--src/s3ql/thread_group.py171
-rw-r--r--src/s3ql/upload_manager.py387
-rw-r--r--tests/__init__.py16
-rw-r--r--tests/_common.py86
-rw-r--r--tests/data.tar.bz2bin0 -> 826340 bytes
-rw-r--r--tests/t1_backends.py192
-rw-r--r--tests/t1_multi_lock.py93
-rw-r--r--tests/t1_ordered_dict.py142
-rw-r--r--tests/t2_block_cache.py389
-rw-r--r--tests/t3_fs_api.py777
-rw-r--r--tests/t3_fsck.py329
-rw-r--r--tests/t3_inode_cache.py99
-rw-r--r--tests/t4_adm.py67
-rw-r--r--tests/t4_fuse.py301
-rw-r--r--tests/t5_cli.py74
-rw-r--r--tests/t5_cp.py75
-rw-r--r--util/cmdline_lexer.py33
-rw-r--r--util/distribute_setup.py485
-rw-r--r--util/sphinx_pipeinclude.py102
231 files changed, 40469 insertions, 0 deletions
diff --git a/Changes.txt b/Changes.txt
new file mode 100644
index 0000000..02c5446
--- /dev/null
+++ b/Changes.txt
@@ -0,0 +1,445 @@
+2011-05-20, S3QL 1.0.1
+
+ * Disabled WAL mode again for now because of unexpected problems
+ with s3qlcp, s3qllock and s3qlrm (performance going down orders
+ of magnitude, and *very* large *.db-wal file in ~/.s3ql).
+
+
+2011-05-13, S3QL 1.0
+
+ * S3QL has been declared stable after 2 years of beta-testing did
+ not reveal any data-critical bugs.
+
+ * Fixed occasional assertion error when calling s3qlctrl flushcache
+ or unmounting the file system.
+
+ * Fixed a race condition when a block is expired while it is in
+ transit but has already been modified again.
+
+ * expire_backups.py no longer has an --init option, the state file
+ is created automatically if this operation is safe. Instead, there
+ is a --reconstruct-state option that can be used to try to
+ reconstruct a lost state file.
+
+ * The size of symbolic links is now reported as the length of
+ the target instead of zero. This used to confuse revision
+ control systems like git and hg.
+
+ * Added man pages for all S3QL commands.
+
+2011-02-04, S3QL 0.30
+
+ * S3QL now defaults to use unencrypted HTTP connections, which
+ significantly improves performance when using the S3 backend. For
+ an encrypted file system, all data is already encrypted anyway,
+ and authentication data is never transmitted in plain text even
+ for unencrypted file systems. Therefore, the use of SSL brings
+ little gain for most users. To force SSL usage, the new --ssl
+ option can be used.
+
+ * mkfs.s3ql now has a --force option to overwrite an existing file
+ system.
+
+2010-12-30, S3QL 0.29
+
+ * The FUSE interface has been rewritten using Cython and factored
+ out into a separate package, http://code.google.com/p/python-llfuse/.
+
+ This should result in easier installation, better performance and
+ better maintainability.
+
+
+2010-12-19, S3QL 0.28
+
+ * "s3qlctrl upload-meta" now works even if the file system has been
+ mounted with --metadata-upload-interval=0.
+
+ * File system metadata is now permanently cached locally. This
+ significantly reduces the time required to mount the file system.
+
+ * The documentation is now also included in PDF format.
+
+2010-12-11, S3QL 0.27
+
+ * The authinfo file now supports passwords that include white space.
+
+ * The s3qladm command can now be used to download metadata backups.
+
+ * The --strip-meta option for mount.s3ql has been removed, redundant
+ data is now always stripped before upload.
+
+ * mount.s3ql now has a --upstart option so that it can easily run as
+ an upstart job. An example job definition is included in
+ contrib/s3ql.conf.
+
+ * s3qlctrl now has an 'upload-meta' command to trigger a metadata
+ upload while the file system is mounted.
+
+ * Fixed a bug that preserved old data when truncating a file to
+ zero. If you ever had fsck errors of the form
+
+ Size of inode [ddd] ([filename]) does not agree with number of
+ blocks
+
+ then the affected files may contain additional, old data at the
+ end that is not supposed to be there.
+
+
+2010-10-30, S3QL 0.26
+
+ * Fixed a problem with the sftp backend failing because it tries to
+ access a file locally.
+
+ * Various minor bugfixes
+
+2010-09-28, S3QL 0.25
+
+ * A critical bug that could cause backups to be deleted too early
+ and potentially break the whole backup strategy has been fixed in
+ contrib/expire_backups.py.
+
+ The new version has changed semantics that and also copes
+ significantly better when backups are made in irregular time
+ intervals.
+
+ * S3QL should now respond with more consistent performance when
+ accessing the file system while s3qlcp, s3qllock or s3qlrm is
+ running at the same time.
+
+ * When enabling debug output for the `UploadManager` module, S3QL
+ now logs detailed messages about de-duplication, upload and
+ compression performance.
+
+2010-09-18, S3QL 0.24
+
+ * Fixed a deadlock that could cause the mount.s3ql process to hang
+ around after umount.
+
+ * Fixed a bug that caused S3QL to consider all downloaded blocks as
+ dirty and resulted in unneccessary checksum calculations on
+ expiration.
+
+ * s3qlctrl can now change the log level at runtime.
+
+ * s3qladm delete now also deletes any local stale cache files.
+
+ * Periodic metadata upload can now be disabled completely by
+ specifying an interval of zero.
+
+
+2010-09-03, S3QL 0.23
+
+ * Fixed problem with global_lock.py not being installed by setup.py
+
+2010-08-31, S3QL 0.22
+
+ * Fixed a bug that could cause file system errors when calling
+ s3qlrm on a lot of really large files.
+
+ * The sftp backend is now significantly faster, thanks to skyang2009
+ for the patch.
+
+ * s3qlctrl can now change the cache size of a mounted file system.
+
+2010-08-09, S3QL 0.21
+
+ * The metadata is now only uploaded if there have been any changes.
+
+ * mount.s3ql now supports several parallel compression and
+ encryption threads with the --compression-threads option.
+
+ * S3QL now supports "immutable directories". This important new
+ feature can be used to protect backups from modification after
+ they have completed. See the User's Guide for details.
+
+ * Using S3 RRS is now deprecated, see User's Guide for details.
+
+ * fsck.s3ql now moves damaged files due to data lost by a backend
+ into /lost+found/
+
+ * expire_backups is no longer installed automatically and can now be
+ found in the contrib/ directory.
+
+ * S3QL now comes with sample backup script in contrib/s3_backup.sh
+
+ * Added contrib/pcp.py, an rsync wrapper to recursively copy
+ directory trees with several parallel rsync processes.
+
+2010-08-01, S3QL 0.20.1
+
+ * Hotfix for s3qladm upgrade.
+
+2010-08-01, S3QL 0.20
+
+ * Added contrib/make_dummy.py. This script creates a dummy copy of a
+ bucket that contains only the file system metadata. The resulting
+ dummy can be used for debugging.
+
+ * Mounting with the local and sftp backends is now significantly
+ faster, because the object tree is no longer traversed completely.
+
+ * Fixed a race condition that occasionally produced errors when
+ deleting large files (spanning multiple blocks).
+
+ * The file system now stays responsive to other requests while
+ compressing blocks.
+
+ * s3qlstat is now much faster since the size after de-duplication
+ does not need to be queried from the backend anymore but is stored
+ in the metadata.
+
+
+2010-07-25, S3QL 0.19
+
+ * Fixed various smaller bugs, see Mercurial changelog for details.
+
+2010-07-11, S3QL 0.18
+
+ * Added --strip-meta option to mount.s3ql
+
+ * Added --metadata-upload-interval option to mount.s3ql. This allows
+ to periodically upload updated metadata even while the file system
+ is mounted.
+
+ * stat.s3ql, tune.s3ql and cp.s3ql have been renamed to s3qlstat,
+ s3qladm and s3qlcp respectively.
+
+ * sftp backend is working again
+
+ * Added the s3qlctrl command.
+
+2010-06-29, S3QL 0.17
+
+ * The local and sftp backends now spread their files into different
+ sub-directories.
+
+ * Amazon S3 Reduced Redundancy Storage (RRS) is now supported. To
+ use it, use a storage url of the form s3rr://<bucket> instead of
+ s3://<bucket>.
+
+2010-06-15, S3QL 0.16
+
+ * Fixed problem with readdir() returning entries twice or skipping
+ them if files are added or removed while readdir() is in progress.
+
+ * Fixed build problem on Gentoo.
+
+ * fsck.s3ql now does a rudimentary check if the file system is still
+ mounted to prevent checking a mounted file system.
+
+
+2010-05-28, S3QL 0.15
+
+ * Fixed test cases to handle systems with low system clock
+ resolution.
+
+ * Corrected installation instructions for Debian
+
+ * mount.s3ql: instead of --bzip2, --zlib and --no-compress, there is
+ now just one option --compress=<alg>.
+
+ * File system metadata is now uploaded with all indices. This makes
+ mounting the file system much faster. Only if LZMA compression has
+ been chosen, indices are stripped for storage and regenerated on
+ the next mount.
+
+2010-05-14, S3QL 0.14
+
+ * fsck now detects if a cached block is dirty and commits only dirty
+ blocks to the backend.
+
+ * Installation in Debian and Ubuntu is now much simpler, it can be
+ done completely with aptitude.
+
+2010-05-04, S3QL 0.13
+
+ * S3QL now works with Ubuntu Karmic / 10.04 LTS
+
+ * The test command no longer produces bogus error messages after all
+ tests have completed.
+
+ * The umount.s3ql command now properly handles the 'fuser' output
+ with Kernel 2.6.32 (before it always refused to unmount, claiming
+ that the mount point was busy).
+
+ * The compression can now be specified independently from the
+ encryption, so it is possible to have an unencrypted, but
+ compressed file system.
+
+ * Apart from zlib, bzip2 and lzma, data can now also be stored
+ without any compression.
+
+ * S3QL no longer emulates the . and .. directory entries since this
+ is not required by POSIX and makes the code much simpler. This
+ also means that the st_nlink value of a directory is not longer 2
+ + number of subdirectories.
+
+ * Fixed a bug that caused files to be listed with wrong sizes under
+ certain conditions.
+
+ * Added `expire_backups` command and instructions for a simple
+ backup solution using S3QL and rsync.
+
+2010-04-27, S3QL 0.12
+
+ * fsck.s3ql now accepts a --batch option to not prompt for any user
+ input and requires a --force option to check the file system even
+ if it is marked as clean.
+
+ * Fixed a bug in cp.s3ql that caused incorrect st_nlink values in
+ the copy.
+
+ * fsck.s3ql has been even more optimized.
+
+ * Fixed a problem with running out of file descriptors when lots of
+ objects are deleted.
+
+ * Bucket encryption passwords can now also be stored in the
+ ~/.s3ql/authinfo file.
+
+ * mount.s3ql doesn't complain any more if it receives any of the
+ standard mount(8) mount options.
+
+
+2010-04-24, S3QL 0.11
+
+ * S3QL file system can now be mounted from /etc/fstab
+
+ * Metadata now takes significantly less space.
+
+ * Files with extended attributes can now be deleted.
+
+ * Extended attributes can now be listed.
+
+ * It is now possible to choose between zlib, BZip2 and LZMA
+ compression every time the file system is mounted.
+
+ * Added contrib/benchmark.py to find out optimal compression method
+ for given network bandwidth.
+
+ * fsck.s3ql no longer uses mknod(3) , since that lead to errors on
+ e.g. NFS mounted volumes under Fedora.
+
+ * File access, modification and inode change times before 1972 are
+ now supported.
+
+ * Fixed a deadlock when removing or overwriting files.
+
+2010-04-21, S3QL 0.10
+
+ * S3QL now depends on FUSE version 2.8
+
+ * Cached blocks are now flushed to the backend as soon as they have
+ not been accessed for more than 10 seconds.
+
+ * The setup script now automatically checks and, if necessary,
+ installs the Python module dependencies.
+
+ * mkfs.s3ql now creates compressed and encrypted file systems by
+ default.
+
+ * Data is now compressed with LZMA instead of Bzip2.
+
+ * Data compression and data upload is now done in parallel, so the
+ full network can bandwidth is continuously without breaks for data
+ compression.
+
+ * fsck.s3ql is now several orders of magnitude faster. The code has
+ been optimized and some special database indices are now
+ precalculated.
+
+ * When running cp.s3ql, the attributes of the target directory are
+ now immediately refreshed (so that e.g. ls immediately shows the
+ correct number of hard links).
+
+ * File removal is now significantly faster, since the network
+ transactions are carried out asynchronously as part of the cache
+ expiration.
+
+ * mount.s3ql no longer tries to create files with mknod(), since
+ that lead to errors on NFS mounted volumes under Fedora.
+
+ * This releases includes a lot of new code, so it may be slightly
+ more beta-ish than usual.
+
+
+2010-04-04, S3QL 0.9
+
+ * The --cachedir, --logfile, --awskey and --credfile options are
+ gone and have been replaced by --homedir.
+
+ * ~/.awssecret is no longer read, instead there is a common file
+ with authentication data for all backends in ~/.s3ql/authinfo
+
+ * The syntax for mounting S3 buckets is now s3://bucketname instead
+ of just the bucket name
+
+ * There is now an SFTP backend. Thanks to Ron Knapp
+ <ron.siesta@gmail.com> for most of the code.
+
+
+2010-03-07, S3QL 0.8
+
+ * S3QL now uses Python's default unittest.py instead of shipping its
+ own.
+
+ * Most documentation has been moved from the Wiki into the tarball,
+ so that it always corresponds to the correct version.
+
+ * setuptools is now used for installation. This allows .egg
+ creation, dependency resolution and generation of the HTML
+ documentation.
+
+ * The S3 part of boto has been integrated into S3QL.
+
+2010-02-22, beta7
+
+ * mount.s3ql no longer chdirs into / when daemonizing but into the
+ cache directory.
+
+ * Added example backup script in contrib/backup.py
+
+ * tune.s3ql --change-passphrase is working again
+
+ * Added testcase for tune.s3ql --change-passphrase
+
+ * Internal S3 errors are now handled properly by retrying the
+ upload.
+
+2010-02-19, beta6
+
+ * tune.s3ql --copy is now *really* working properly (brrr)
+
+2010-02-19, beta5
+
+ * mkfs.s3ql now makes strict checks on the bucket name
+
+ * Removed obsolete mount.s3ql_local from distribution
+
+ * tune.s3ql --copy is now working properly
+
+2010-02-19, beta4
+
+ * tune.s3ql now has a --copy option to copy buckets
+
+ * Storage location for new buckets can be specified in mkfs.s3ql
+ and tune.s3ql with --s3-location
+
+ * Fixed a deadlock in mount.s3ql when using local buckets without
+ --fg
+
+ * The local: bucket specifier is no longer artificially slow.
+
+ * mount.s3ql: --allow_other is now working
+
+2010-02-04, beta3
+
+ * Fixed a deadlock that caused umount.s3ql to hang indefinitely
+ when mount was called without --fg
+
+ * The '.' and '..' directory entries are no longer explicitly
+ stored in the database.
+
+ * Rewrote the mechanism to handle delayed updates. Now it no
+ longer depends on a small object being propagated relatively
+ fast, but is able to handle arbitrary network outages.
diff --git a/INSTALL.txt b/INSTALL.txt
new file mode 100644
index 0000000..3a7685c
--- /dev/null
+++ b/INSTALL.txt
@@ -0,0 +1,17 @@
+Installation
+------------
+
+S3QL depends on several other programs and libraries that have to be installed
+first. The best method to satisfy these dependencies depends on your
+distribution. In some cases S3QL and all its dependencies can be installed with as
+little as three commands, while in other cases more work may be required.
+
+The S3QL Wiki contains installation instructions for quite a few different
+Linux distributions:
+
+ http://code.google.com/p/s3ql/w/list?q=label:Installation
+
+If your distribution is not included in this list, you can fall back on the
+more detailed, but also more complex General Installation Instructions in the
+S3QL User's Guide (which is included in the doc/ directory and available
+online on http://www.rath.org/s3ql-docs/).
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..015c3a4
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,113 @@
+Metadata-Version: 1.1
+Name: s3ql
+Version: 1.0.1
+Summary: a full-featured file system for online data storage
+Home-page: http://code.google.com/p/s3ql/
+Author: Nikolaus Rath
+Author-email: Nikolaus@rath.org
+License: LGPL
+Download-URL: http://code.google.com/p/s3ql/downloads/list
+Description: .. -*- mode: rst -*-
+
+ ============
+ About S3QL
+ ============
+
+ S3QL is a file system that stores all its data online. It supports
+ `Amazon S3 <http://aws.amazon.com/s3 Amazon S3>`_ as well as arbitrary
+ SFTP servers and effectively provides you with a hard disk of dynamic,
+ infinite capacity that can be accessed from any computer with internet
+ access.
+
+ S3QL is providing a standard, full featured UNIX file system that is
+ conceptually indistinguishable from any local file system.
+ Furthermore, S3QL has additional features like compression,
+ encryption, data de-duplication, immutable trees and snapshotting
+ which make it especially suitable for online backup and archival.
+
+ S3QL is designed to favor simplicity and elegance over performance and
+ feature-creep. Care has been taken to make the source code as
+ readable and serviceable as possible. Solid error detection and error
+ handling have been included from the very first line, and S3QL comes
+ with extensive automated test cases for all its components.
+
+ Features
+ ========
+
+
+ * **Transparency.** Conceptually, S3QL is indistinguishable from a
+ local file system. For example, it supports hardlinks, symlinks,
+ ACLs and standard unix permissions, extended attributes and file
+ sizes up to 2 TB.
+
+ * **Dynamic Size.** The size of an S3QL file system grows and shrinks
+ dynamically as required.
+
+ * **Compression.** Before storage, all data may compressed with the
+ LZMA, bzip2 or deflate (gzip) algorithm.
+
+ * **Encryption.** After compression (but before upload), all data can
+ AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum
+ is used to protect the data against manipulation.
+
+ * **Data De-duplication.** If several files have identical contents,
+ the redundant data will be stored only once. This works across all
+ files stored in the file system, and also if only some parts of the
+ files are identical while other parts differ.
+
+ * **Immutable Trees.** Directory trees can be made immutable, so that
+ their contents can no longer be changed in any way whatsoever. This
+ can be used to ensure that backups can not be modified after they
+ have been made.
+
+ * **Copy-on-Write/Snapshotting.** S3QL can replicate entire directory
+ trees without using any additional storage space. Only if one of the
+ copies is modified, the part of the data that has been modified will
+ take up additional storage space. This can be used to create
+ intelligent snapshots that preserve the state of a directory at
+ different points in time using a minimum amount of space.
+
+ * **High Performance independent of network latency.** All operations
+ that do not write or read file contents (like creating directories
+ or moving, renaming, and changing permissions of files and
+ directories) are very fast because they are carried out without any
+ network transactions.
+
+ S3QL achieves this by saving the entire file and directory structure
+ in a database. This database is locally cached and the remote
+ copy updated asynchronously.
+
+ * **Support for low bandwidth connections.** S3QL splits file contents
+ into smaller blocks and caches blocks locally. This minimizes both
+ the number of network transactions required for reading and writing
+ data, and the amount of data that has to be transferred when only
+ parts of a file are read or written.
+
+
+
+ Development Status
+ ==================
+
+ After two years of beta-testing by about 93 users did not reveal any
+ data-critical bugs, S3QL was declared **stable** with the release of
+ version 1.0 on May 13th, 2011. Note that this does not mean that S3QL
+ is bug-free. S3QL still has several known, and probably many more
+ unknown bugs. However, there is a high probability that these bugs
+ will, although being inconvenient, not endanger any stored data.
+
+ Please report any problems on the `mailing list
+ <http://groups.google.com/group/s3ql>`_ or the `issue tracker
+ <http://code.google.com/p/s3ql/issues/list>`_.
+
+Keywords: FUSE,backup,archival,compression,encryption,deduplication,aws,s3
+Platform: POSIX
+Platform: UNIX
+Platform: Linux
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: No Input/Output (Daemon)
+Classifier: Environment :: Console
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Topic :: Internet
+Classifier: Operating System :: POSIX
+Classifier: Topic :: System :: Archiving
+Provides: s3ql
diff --git a/bin/fsck.s3ql b/bin/fsck.s3ql
new file mode 100755
index 0000000..ddc4fb6
--- /dev/null
+++ b/bin/fsck.s3ql
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+fsck.s3ql - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.fsck
+s3ql.cli.fsck.main(sys.argv[1:]) \ No newline at end of file
diff --git a/bin/mkfs.s3ql b/bin/mkfs.s3ql
new file mode 100755
index 0000000..ed85b95
--- /dev/null
+++ b/bin/mkfs.s3ql
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+mkfs.s3ql - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.mkfs
+s3ql.cli.mkfs.main(sys.argv[1:]) \ No newline at end of file
diff --git a/bin/mount.s3ql b/bin/mount.s3ql
new file mode 100755
index 0000000..f292af6
--- /dev/null
+++ b/bin/mount.s3ql
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+mount.s3ql - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.mount
+s3ql.cli.mount.main(sys.argv[1:]) \ No newline at end of file
diff --git a/bin/s3qladm b/bin/s3qladm
new file mode 100755
index 0000000..a877af2
--- /dev/null
+++ b/bin/s3qladm
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+s3qladm - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.adm
+s3ql.cli.adm.main(sys.argv[1:])
diff --git a/bin/s3qlcp b/bin/s3qlcp
new file mode 100755
index 0000000..2dc560b
--- /dev/null
+++ b/bin/s3qlcp
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+s3qlcp - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.cp
+s3ql.cli.cp.main(sys.argv[1:])
diff --git a/bin/s3qlctrl b/bin/s3qlctrl
new file mode 100755
index 0000000..6b2535a
--- /dev/null
+++ b/bin/s3qlctrl
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+s3qlctrl - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.ctrl
+s3ql.cli.ctrl.main(sys.argv[1:])
diff --git a/bin/s3qllock b/bin/s3qllock
new file mode 100755
index 0000000..69b18ae
--- /dev/null
+++ b/bin/s3qllock
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+s3qllock - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.lock
+s3ql.cli.lock.main(sys.argv[1:])
diff --git a/bin/s3qlrm b/bin/s3qlrm
new file mode 100755
index 0000000..beabf06
--- /dev/null
+++ b/bin/s3qlrm
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+s3qlrm - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.remove
+s3ql.cli.remove.main(sys.argv[1:])
diff --git a/bin/s3qlstat b/bin/s3qlstat
new file mode 100755
index 0000000..df7a5bf
--- /dev/null
+++ b/bin/s3qlstat
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+s3qlstat - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.statfs
+s3ql.cli.statfs.main(sys.argv[1:])
diff --git a/bin/umount.s3ql b/bin/umount.s3ql
new file mode 100755
index 0000000..9db3e70
--- /dev/null
+++ b/bin/umount.s3ql
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+'''
+umount.s3ql - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import unicode_literals, division, print_function
+
+import sys
+import os.path
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+import s3ql.cli.umount
+s3ql.cli.umount.main(sys.argv[1:]) \ No newline at end of file
diff --git a/contrib/benchmark.py b/contrib/benchmark.py
new file mode 100755
index 0000000..6dcd71a
--- /dev/null
+++ b/contrib/benchmark.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+'''
+benchmark.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Benchmark compression and upload performance and recommend compression
+algorithm that maximizes throughput.
+
+---
+Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import sys
+import time
+import os
+import logging
+import lzma
+import zlib
+import bz2
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+from s3ql.backends.common import compress_encrypt_fh
+from s3ql.common import (get_backend, QuietError,setup_logging)
+from s3ql.parse_args import ArgumentParser
+import argparse
+
+log = logging.getLogger('benchmark')
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ usage="%prog [options] <storage-url> <test-file>\n"
+ "%prog --help",
+ description="Transfers and compresses the test file and gives a recommendation "
+ "for the compression algorithm to use.")
+
+ parser.add_homedir()
+ parser.add_quiet()
+ parser.add_debug()
+ parser.add_version()
+ parser.add_ssl()
+
+ parser.add_storage_url()
+ parser.add_argument('file', metavar='<file>', type=argparse.FileType(mode='rb'),
+ help='File to transfer')
+ parser.add_argument("--compression-threads", action="store", type=int,
+ default=1, metavar='<no>',
+ help='Number of parallel compression and encryption threads '
+ 'to use (default: %(default)s).')
+ return parser.parse_args(args)
+
+
+def main(args=None):
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ with get_backend(options.storage_url, options.homedir,
+ options.ssl) as (conn, bucketname):
+
+ if not bucketname in conn:
+ raise QuietError("Bucket does not exist.")
+ bucket = conn.get_bucket(bucketname)
+
+ ifh = options.testfile
+ ofh = open('/dev/null', 'r+b')
+ size = os.fstat(ifh.fileno()).st_size / 1024
+ log.info('Test file size: %.2f MB', (size / 1024))
+
+ log.info('Compressing with LZMA...')
+ stamp = time.time()
+ compress_encrypt_fh(ifh, ofh, 'foobar', 'nonce',
+ lzma.LZMACompressor(options={ 'level': 7 }))
+ seconds = time.time() - stamp
+ lzma_speed = size / seconds
+ log.info('done. LZMA Compression Speed: %.2f KB per second', lzma_speed)
+
+ log.info('Compressing with BZip2...')
+ ifh.seek(0)
+ stamp = time.time()
+ compress_encrypt_fh(ifh, ofh, 'foobar', 'nonce',
+ bz2.BZ2Compressor(9))
+ seconds = time.time() - stamp
+ bzip2_speed = size / seconds
+ log.info('done. Bzip2 Compression Speed: %.2f KB per second', bzip2_speed)
+
+ log.info('Compressing with zlib...')
+ ifh.seek(0)
+ stamp = time.time()
+ compress_encrypt_fh(ifh, ofh, 'foobar', 'nonce',
+ zlib.compressobj(9))
+ seconds = time.time() - stamp
+ zlib_speed = size / seconds
+ log.info('done. zlib Compression Speed: %.2f KB per second', zlib_speed)
+
+ log.info('Transferring to backend...')
+ ifh.seek(0)
+ stamp = time.time()
+ bucket.raw_store(options.testfile, ifh, dict())
+ seconds = time.time() - stamp
+ net_speed = size / seconds
+ log.info('done. Network Uplink Speed: %.2f KB per second', net_speed)
+
+
+ print('Assuming mount.s3ql will be called with --compression-threads %d'
+ % options.compression_threads)
+ lzma_speed *= options.compression_threads
+ bzip2_speed *= options.compression_threads
+ zlib_speed *= options.compression_threads
+
+ if lzma_speed > net_speed:
+ print('You should use LZMA compression.')
+ elif bzip2_speed > net_speed:
+ print('You should use BZip2 compression.')
+ elif zlib_speed > net_speed:
+ print('You should use zlib compression.')
+ else:
+ print('You should use zlib compression, but even that is not fast\n'
+ 'enough to saturate your network connection.')
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/contrib/expire_backups.1 b/contrib/expire_backups.1
new file mode 100644
index 0000000..a6e2b3f
--- /dev/null
+++ b/contrib/expire_backups.1
@@ -0,0 +1,167 @@
+.TH "EXPIRE_BACKUPS" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+expire_backups \- Intelligently expire old backups
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+expire_backups [options] <age> [<age> ...]
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+The \fBexpire_backups\fP command intelligently remove old backups that are no
+longer needed.
+.sp
+To define what backups you want to keep for how long, you define a
+number of \fIage ranges\fP. \fBexpire_backups\fP ensures that you
+will have at least one backup in each age range at all times. It will
+keep exactly as many backups as are required for that and delete any
+backups that become redundant.
+.sp
+Age ranges are specified by giving a list of range boundaries in terms
+of backup cycles. Every time you create a new backup, the existing
+backups age by one cycle.
+.sp
+Example: when \fBexpire_backups\fP is called with the age range
+definition \fB1 3 7 14 31\fP, it will guarantee that you always have the
+following backups available:
+.INDENT 0.0
+.IP 1. 3
+.
+A backup that is 0 to 1 cycles old (i.e, the most recent backup)
+.IP 2. 3
+.
+A backup that is 1 to 3 cycles old
+.IP 3. 3
+.
+A backup that is 3 to 7 cycles old
+.IP 4. 3
+.
+A backup that is 7 to 14 cycles old
+.IP 5. 3
+.
+A backup that is 14 to 31 cycles old
+.UNINDENT
+.IP Note
+.
+If you do backups in fixed intervals, then one cycle will be
+equivalent to the backup interval. The advantage of specifying the
+age ranges in terms of backup cycles rather than days or weeks is
+that it allows you to gracefully handle irregular backup intervals.
+Imagine that for some reason you do not turn on your computer for
+one month. Now all your backups are at least a month old, and if you
+had specified the above backup strategy in terms of absolute ages,
+they would all be deleted! Specifying age ranges in terms of backup
+cycles avoids these sort of problems.
+.RE
+.sp
+\fBexpire_backups\fP usage is simple. It requires backups to have
+names of the forms \fByear\-month\-day_hour:minute:seconds\fP
+(\fBYYYY\-MM\-DD_HH:mm:ss\fP) and works on all backups in the current
+directory. So for the above backup strategy, the correct invocation
+would be:
+.sp
+.nf
+.ft C
+expire_backups.py 1 3 7 14 31
+.ft P
+.fi
+.sp
+When storing your backups on an S3QL file system, you probably want to
+specify the \fB\-\-use\-s3qlrm\fP option as well. This tells
+\fBexpire_backups\fP to use the \fIs3qlrm\fP command to
+delete directories.
+.sp
+\fBexpire_backups\fP uses a "state file" to keep track which
+backups are how many cycles old (since this cannot be inferred from
+the dates contained in the directory names). The standard name for
+this state file is \fB.expire_backups.dat\fP. If this file gets
+damaged or deleted, \fBexpire_backups\fP no longer knows the ages
+of the backups and refuses to work. In this case you can use the
+\fB\-\-reconstruct\-state\fP option to try to reconstruct the state
+from the backup dates. However, the accuracy of this reconstruction
+depends strongly on how rigorous you have been with making backups (it
+is only completely correct if the time between subsequent backups has
+always been exactly the same), so it\(aqs generally a good idea not to
+tamper with the state file.
+.SH OPTIONS
+.sp
+The \fBexpire_backups\fP command accepts the following options:
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-version
+.
+just print program version and exit
+.TP
+.BI \-\-state \ <file>
+.
+File to save state information in (default:
+".expire_backups.dat")
+.TP
+.B \-n
+.
+Dry run. Just show which backups would be deleted.
+.TP
+.B \-\-reconstruct\-state
+.
+Try to reconstruct a missing state file from backup
+dates.
+.TP
+.B \-\-use\-s3qlrm
+.
+Use \fBs3qlrm\fP command to delete backups.
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH EXIT STATUS
+.sp
+\fBexpire_backups\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+\fBexpire_backups\fP is shipped as part of S3QL, \fI\%http://code.google.com/p/s3ql/\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/contrib/expire_backups.py b/contrib/expire_backups.py
new file mode 100755
index 0000000..971e21b
--- /dev/null
+++ b/contrib/expire_backups.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+'''
+expire_backups.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+
+import sys
+import os
+import logging
+import re
+import textwrap
+import shutil
+import cPickle as pickle
+from datetime import datetime, timedelta
+from collections import defaultdict
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+from s3ql.common import setup_logging, QuietError
+from s3ql.parse_args import ArgumentParser
+from s3ql.cli.remove import main as s3qlrm
+
+log = logging.getLogger('expire_backups')
+
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description=textwrap.dedent('''\
+ ``expire_backups.py`` is a program to intelligently remove old backups
+ that are no longer needed.
+
+ To define what backups you want to keep for how long, you define a
+ number of *age ranges*. ``expire_backups`` ensures that you will
+ have at least one backup in each age range at all times. It will keep
+ exactly as many backups as are required for that and delete any
+ backups that become redundant.
+
+ Age ranges are specified by giving a list of range boundaries in terms
+ of backup cycles. Every time you create a new backup, the existing
+ backups age by one cycle.
+
+ Please refer to the S3QL documentation for details.
+ '''))
+
+ parser.add_quiet()
+ parser.add_debug()
+ parser.add_version()
+
+ parser.add_argument('cycles', nargs='+', type=int, metavar='<age>',
+ help='Age range boundaries in terms of backup cycles')
+ parser.add_argument('--state', metavar='<file>', type=str,
+ default='.expire_backups.dat',
+ # Add quotes around default to prevent groff
+ # from choking on leading . generated by buggy
+ # docutils man page generator.
+ help='File to save state information in (default: "%(default)s")')
+ parser.add_argument("-n", action="store_true", default=False,
+ help="Dry run. Just show which backups would be deleted.")
+ parser.add_argument('--reconstruct-state', action='store_true', default=False,
+ help='Try to reconstruct a missing state file from backup dates.')
+
+ parser.add_argument("--use-s3qlrm", action="store_true",
+ help="Use `s3qlrm` command to delete backups.")
+
+ options = parser.parse_args(args)
+
+ if sorted(options.cycles) != options.cycles:
+ parser.error('Age range boundaries must be in increasing order')
+
+ return options
+
+def main(args=None):
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ # Determine available backups
+ backup_list = set(x for x in os.listdir('.')
+ if re.match(r'^\d{4}-\d\d-\d\d_\d\d:\d\d:\d\d$', x))
+
+ if not os.path.exists(options.state) and len(backup_list) > 1:
+ if not options.reconstruct_state:
+ raise QuietError('Found more than one backup but no state file! Aborting.')
+ else:
+ log.warn('Trying to reconstruct state file..')
+ state = upgrade_to_state(backup_list)
+ elif not os.path.exists(options.state):
+ log.warn('Creating state file..')
+ state = dict()
+ else:
+ log.info('Reading state...')
+ state = pickle.load(open(options.state, 'rb'))
+
+ to_delete = process_backups(backup_list, state, options.cycles)
+
+ for x in to_delete:
+ log.info('Backup %s is no longer needed, removing...', x)
+ if not options.n:
+ if options.use_s3qlrm:
+ s3qlrm([x])
+ else:
+ shutil.rmtree(x)
+
+ if options.n:
+ log.info('Dry run, not saving state.')
+ else:
+ log.info('Saving state..')
+ pickle.dump(state, open(options.state, 'wb'), pickle.HIGHEST_PROTOCOL)
+
+def upgrade_to_state(backup_list):
+ log.info('Several existing backups detected, trying to convert absolute ages to cycles')
+
+ now = datetime.now()
+ age = dict()
+ for x in sorted(backup_list):
+ age[x] = now - datetime.strptime(x, '%Y-%m-%d_%H:%M:%S')
+ log.info('Backup %s is %s hours old', x, age[x])
+
+ deltas = [ abs(x - y) for x in age.itervalues()
+ for y in age.itervalues() if x != y ]
+ step = min(deltas)
+ log.info('Assuming backup interval of %s hours', step)
+
+ state = dict()
+ for x in sorted(age):
+ state[x] = 0
+ while age[x] > timedelta(0):
+ state[x] += 1
+ age[x] -= step
+ log.info('Backup %s is %d cycles old', x, state[x])
+
+ log.info('State construction complete.')
+ return state
+
+def simulate(args):
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ state = dict()
+ backup_list = set()
+ for i in xrange(50):
+ backup_list.add('backup-%2d' % i)
+ delete = process_backups(backup_list, state, options.cycles)
+ log.info('Deleting %s', delete)
+ backup_list -= delete
+
+ log.info('Available backups on day %d:', i)
+ for x in sorted(backup_list):
+ log.info(x)
+
+def process_backups(backup_list, state, cycles):
+
+ # New backups
+ new_backups = backup_list - set(state)
+ for x in sorted(new_backups):
+ log.info('Found new backup %s', x)
+ for y in state:
+ state[y] += 1
+ state[x] = 0
+
+ for x in state:
+ log.debug('Backup %s has age %d', x, state[x])
+
+ # Missing backups
+ missing_backups = set(state) - backup_list
+ for x in missing_backups:
+ log.warn('Warning: backup %s is missing. Did you delete it manually?', x)
+ del state[x]
+
+ # Ranges
+ ranges = [ (0, cycles[0]) ]
+ for i in range(1, len(cycles)):
+ ranges.append((cycles[i-1], cycles[i]))
+
+ # Go forward in time to see what backups need to be kept
+ simstate = dict()
+ keep = set()
+ missing = defaultdict(list)
+ for step in xrange(max(cycles)):
+
+ log.debug('Considering situation after %d more backups', step)
+ for x in simstate:
+ simstate[x] += 1
+ log.debug('Backup x now has simulated age %d', simstate[x])
+
+ # Add the hypothetical backup that has been made "just now"
+ if step != 0:
+ simstate[step] = 0
+
+ for (min_, max_) in ranges:
+ log.debug('Looking for backup for age range %d to %d', min_, max_)
+
+ # Look in simstate
+ found = False
+ for (backup, age) in simstate.iteritems():
+ if min_ <= age < max_:
+ found = True
+ break
+ if found:
+ # backup and age will be defined
+ #pylint: disable=W0631
+ log.debug('Using backup %s (age %d)', backup, age)
+ continue
+
+ # Look in state
+ for (backup, age) in state.iteritems():
+ age += step
+ if min_ <= age < max_:
+ log.info('Keeping backup %s (current age %d) for age range %d to %d%s',
+ backup, state[backup], min_, max_,
+ (' in %d cycles' % step) if step else '')
+ simstate[backup] = age
+ keep.add(backup)
+ break
+
+ else:
+ if step == 0:
+ log.info('Note: there is currently no backup available '
+ 'for age range %d to %d', min_, max_)
+ else:
+ missing['%d to %d' % (min_, max_)].append(step)
+
+ for range_ in sorted(missing):
+ log.info('Note: there will be no backup for age range %s '
+ 'in (forthcoming) cycle(s): %s',
+ range_, format_list(missing[range_]))
+
+ to_delete = set(state) - keep
+ for x in to_delete:
+ del state[x]
+
+ return to_delete
+
+
+def format_list(l):
+ if not l:
+ return ''
+ l = l[:]
+
+ # Append bogus end element
+ l.append(l[-1] + 2)
+
+ range_start = l.pop(0)
+ cur = range_start
+ res = list()
+ for n in l:
+ if n == cur+1:
+ pass
+ elif range_start == cur:
+ res.append('%d' % cur)
+ elif range_start == cur - 1:
+ res.append('%d' % range_start)
+ res.append('%d' % cur)
+ else:
+ res.append('%d-%d' % (range_start, cur))
+
+ if n != cur+1:
+ range_start = n
+ cur = n
+
+ if len(res) > 1:
+ return ('%s and %s' % (', '.join(res[:-1]), res[-1]))
+ else:
+ return ', '.join(res)
+
+
+if __name__ == '__main__':
+ #simulate(sys.argv[1:])
+ main(sys.argv[1:]) \ No newline at end of file
diff --git a/contrib/make_dummy.py b/contrib/make_dummy.py
new file mode 100755
index 0000000..e2b4396
--- /dev/null
+++ b/contrib/make_dummy.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+'''
+make_dummy.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Creates a dummy copy of an entire bucket. The dummy will appear to contain
+all the data of the original bucket. However, in fact only the metadata
+will be copied and all files contain just \0 bytes.
+
+---
+Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+
+from __future__ import division, print_function, absolute_import
+
+import sys
+import os
+import logging
+import tempfile
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+from s3ql.common import (setup_logging, QuietError,
+ unlock_bucket, get_backend)
+from s3ql.backends.common import ChecksumError
+from s3ql.parse_args import ArgumentParser, storage_url_type
+
+log = logging.getLogger('make_dummy')
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description="Create a dummy-copy of the source bucket. The target will "
+ 'contain a file system with the same structure, but all files'
+ 'will just contain \\0 bytes.')
+
+ parser.add_homedir()
+ parser.add_quiet()
+ parser.add_debug_modules()
+ parser.add_version()
+ parser.add_ssl()
+
+ parser.add_argument("src", metavar='<source storage-url>',
+ type=storage_url_type,
+ help='Source storage URL')
+
+ parser.add_argument("dest", metavar='<dest storage-url>',
+ type=storage_url_type,
+ help='Destination storage URL')
+
+
+ return parser.parse_args(args)
+
+
+def main(args=None):
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ with get_backend(options.src, options.homedir,
+ options.ssl) as (src_conn, src_name):
+
+ if not src_name in src_conn:
+ raise QuietError("Source bucket does not exist.")
+ src_bucket = src_conn.get_bucket(src_name)
+
+ try:
+ unlock_bucket(options.homedir, options.src, src_bucket)
+ except ChecksumError:
+ raise QuietError('Checksum error - incorrect password?')
+
+ with get_backend(options.dest, options.homedir) as (dest_conn, dest_name):
+
+ if dest_name in dest_conn:
+ raise QuietError("Bucket already exists!\n"
+ "(you can delete an existing bucket with s3qladm --delete)\n")
+
+ dest_bucket = dest_conn.create_bucket(dest_name, compression=None)
+
+ copy_objects(src_bucket, dest_bucket)
+
+
+def copy_objects(src_bucket, dest_bucket):
+
+ log.info('Copying...')
+
+
+ for (no, key) in enumerate(src_bucket):
+ if no != 0 and no % 5000 == 0:
+ log.info('Copied %d objects so far..', no)
+
+ if key.startswith('s3ql_data_'):
+ dest_bucket[key] = key
+ elif key == 's3ql_passphrase' or key.startswith('s3ql_metadata_bak'):
+ pass
+ else:
+ log.info('Copying %s..', key)
+
+ fh = tempfile.TemporaryFile()
+ meta = src_bucket.fetch_fh(key, fh, plain=True)
+ fh.seek(0)
+ dest_bucket.store_fh(key, fh, meta)
+ fh.close()
+
+ log.info('Done.')
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/contrib/pcp.1 b/contrib/pcp.1
new file mode 100644
index 0000000..9dc75ec
--- /dev/null
+++ b/contrib/pcp.1
@@ -0,0 +1,86 @@
+.TH "PCP" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+pcp \- Recursive, parallel copy of directory trees
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+pcp [options] <source> [<source> ...] <destination>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+The \fBpcp\fP command is a is a wrapper that starts several
+\fBsync\fP processes to copy directory trees in parallel. This is
+allows much better copying performance on file system that have
+relatively high latency when retrieving individual files like S3QL.
+.SH OPTIONS
+.sp
+The \fBpcp\fP command accepts the following options:
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-version
+.
+just print program version and exit
+.TP
+.B \-a
+.
+Pass \-aHAX option to rsync.
+.TP
+.BI \-\-processes \ <no>
+.
+Number of rsync processes to use (default: 10).
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH EXIT STATUS
+.sp
+\fBpcp\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+\fBpcp\fP is shipped as part of S3QL, \fI\%http://code.google.com/p/s3ql/\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/contrib/pcp.py b/contrib/pcp.py
new file mode 100755
index 0000000..fa015dc
--- /dev/null
+++ b/contrib/pcp.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+'''
+pcp.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Parallel, recursive copy of directory trees.
+
+---
+Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import sys
+import os
+import logging
+import subprocess
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+from s3ql.common import (setup_logging)
+from s3ql.parse_args import ArgumentParser
+
+log = logging.getLogger('pcp')
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description='Recursively copy source(s) to destination using multiple '
+ 'parallel rsync processes.')
+
+ parser.add_quiet()
+ parser.add_debug()
+ parser.add_version()
+
+ parser.add_argument("-a", action="store_true",
+ help='Pass -aHAX option to rsync.')
+ parser.add_argument("--processes", action="store", type=int, metavar='<no>',
+ default=10,
+ help='Number of rsync processes to use (default: %(default)s).')
+
+ parser.add_argument('source', metavar='<source>', nargs='+',
+ help='Directories to copy')
+ parser.add_argument('dest', metavar='<destination>',
+ help="Target directory")
+
+ options = parser.parse_args(args)
+ options.pps = options.source + [ options.dest ]
+
+ return options
+
+def main(args=None):
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ pool = ( 'abcdefghijklmnopqrstuvwxyz',
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
+ '0123456789' )
+ steps = [ len(x) / (options.processes-1) for x in pool ]
+ prefixes = list()
+ for i in range(options.processes - 1):
+ parts = [ x[int(i*y):int((i+1)*y)] for (x, y) in zip(pool, steps) ]
+ prefixes.append(''.join(parts))
+
+ filters = [ '-! [%s]*' % x for x in prefixes ]
+
+ # Catch all
+ filters.append( '- [%s]*' % ''.join(prefixes))
+
+ rsync_args = [ 'rsync', '-f', '+ */' ]
+ if not options.quiet:
+ rsync_args.append('--out-format')
+ rsync_args.append('%n%L')
+ if options.a:
+ rsync_args.append('-aHAX')
+
+ processes = list()
+ for filter_ in filters:
+ cmd = rsync_args + [ '-f', filter_ ] + options.pps
+ log.debug('Calling %s', cmd)
+ processes.append(subprocess.Popen(cmd))
+
+ if all([ c.wait() == 0 for c in processes]):
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:]) \ No newline at end of file
diff --git a/contrib/s3ql.conf b/contrib/s3ql.conf
new file mode 100644
index 0000000..95e92a7
--- /dev/null
+++ b/contrib/s3ql.conf
@@ -0,0 +1,33 @@
+#
+# This file can be placed in /etc/init. It defines an upstart job that
+# takes care of mounting and unmounting an S3QL file system.
+#
+description "S3QL Backup File System"
+author "Nikolaus Rath <Nikolaus@rath.org>"
+
+# This assumes that eth0 provides your internet connection
+start on (filesystem and net-device-up IFACE=eth0)
+stop on runlevel [016]
+
+env BUCKET="s3://my-backup-bla"
+env MOUNTPOINT="/mnt/backup"
+
+expect stop
+
+script
+ # Redirect stdout and stderr into the system log
+ DIR=$(mktemp -d)
+ mkfifo "$DIR/LOG_FIFO"
+ logger -t s3ql -p local0.info < "$DIR/LOG_FIFO" &
+ exec > "$DIR/LOG_FIFO"
+ exec 2>&1
+ rm -rf "$DIR"
+
+ # Check and mount file system
+ fsck.s3ql --batch "$BUCKET"
+ exec mount.s3ql --upstart "$BUCKET" "$MOUNTPOINT"
+end script
+
+pre-stop script
+ umount.s3ql "$MOUNTPOINT"
+end script
diff --git a/contrib/s3ql_backup.sh b/contrib/s3ql_backup.sh
new file mode 100755
index 0000000..4ac5849
--- /dev/null
+++ b/contrib/s3ql_backup.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# Abort entire script if any command fails
+set -e
+
+# Backup destination (storage url)
+bucket="s3://my_backup_bucket"
+
+# Recover cache if e.g. system was shut down while fs was mounted
+fsck.s3ql --batch "$bucket"
+
+# Create a temporary mountpoint and mount file system
+mountpoint="/tmp/s3ql_backup_$$"
+mkdir "$mountpoint"
+mount.s3ql "$bucket" "$mountpoint"
+
+# Make sure the file system is unmounted when we are done
+trap "cd /; umount.s3ql '$mountpoint'; rmdir '$mountpoint'" EXIT
+
+# Figure out the most recent backup
+cd "$mountpoint"
+last_backup=`python <<EOF
+import os
+import re
+backups=sorted(x for x in os.listdir('.') if re.match(r'^[\\d-]{10}_[\\d:]{8}$', x))
+if backups:
+ print backups[-1]
+EOF`
+
+# Duplicate the most recent backup unless this is the first backup
+new_backup=`date "+%Y-%m-%d_%H:%M:%S"`
+if [ -n "$last_backup" ]; then
+ echo "Copying $last_backup to $new_backup..."
+ s3qlcp "$last_backup" "$new_backup"
+
+ # Make the last backup immutable
+ # (in case the previous backup was interrupted prematurely)
+ s3qllock "$last_backup"
+fi
+
+# ..and update the copy
+rsync -aHAXx --delete-during --delete-excluded --partial -v \
+ --exclude /.cache/ \
+ --exclude /.s3ql/ \
+ --exclude /.thumbnails/ \
+ --exclude /tmp/ \
+ "/home/my_username/" "./$new_backup/"
+
+# Make the new backup immutable
+s3qllock "$new_backup"
+
+# Expire old backups
+
+# Note that expire_backups.py comes from contrib/ and is not installed
+# by default when you install from the source tarball. If you have
+# installed an S3QL package for your distribution, this script *may*
+# be installed, and it *may* also not have the .py ending.
+expire_backups.py --use-s3qlrm 1 7 14 31 90 180 360
diff --git a/doc/html/.buildinfo b/doc/html/.buildinfo
new file mode 100644
index 0000000..7d05252
--- /dev/null
+++ b/doc/html/.buildinfo
@@ -0,0 +1,4 @@
+# Sphinx build info version 1
+# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: 85f6a556ea11bab9efd893c7b6e319a1
+tags: fbb0d17656682115ca4d033fb2f83ba1
diff --git a/doc/html/_sources/about.txt b/doc/html/_sources/about.txt
new file mode 100644
index 0000000..bef3684
--- /dev/null
+++ b/doc/html/_sources/about.txt
@@ -0,0 +1,91 @@
+.. -*- mode: rst -*-
+
+============
+ About S3QL
+============
+
+S3QL is a file system that stores all its data online. It supports
+`Amazon S3 <http://aws.amazon.com/s3 Amazon S3>`_ as well as arbitrary
+SFTP servers and effectively provides you with a hard disk of dynamic,
+infinite capacity that can be accessed from any computer with internet
+access.
+
+S3QL is providing a standard, full featured UNIX file system that is
+conceptually indistinguishable from any local file system.
+Furthermore, S3QL has additional features like compression,
+encryption, data de-duplication, immutable trees and snapshotting
+which make it especially suitable for online backup and archival.
+
+S3QL is designed to favor simplicity and elegance over performance and
+feature-creep. Care has been taken to make the source code as
+readable and serviceable as possible. Solid error detection and error
+handling have been included from the very first line, and S3QL comes
+with extensive automated test cases for all its components.
+
+Features
+========
+
+
+* **Transparency.** Conceptually, S3QL is indistinguishable from a
+ local file system. For example, it supports hardlinks, symlinks,
+ ACLs and standard unix permissions, extended attributes and file
+ sizes up to 2 TB.
+
+* **Dynamic Size.** The size of an S3QL file system grows and shrinks
+ dynamically as required.
+
+* **Compression.** Before storage, all data may compressed with the
+ LZMA, bzip2 or deflate (gzip) algorithm.
+
+* **Encryption.** After compression (but before upload), all data can
+ AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum
+ is used to protect the data against manipulation.
+
+* **Data De-duplication.** If several files have identical contents,
+ the redundant data will be stored only once. This works across all
+ files stored in the file system, and also if only some parts of the
+ files are identical while other parts differ.
+
+* **Immutable Trees.** Directory trees can be made immutable, so that
+ their contents can no longer be changed in any way whatsoever. This
+ can be used to ensure that backups can not be modified after they
+ have been made.
+
+* **Copy-on-Write/Snapshotting.** S3QL can replicate entire directory
+ trees without using any additional storage space. Only if one of the
+ copies is modified, the part of the data that has been modified will
+ take up additional storage space. This can be used to create
+ intelligent snapshots that preserve the state of a directory at
+ different points in time using a minimum amount of space.
+
+* **High Performance independent of network latency.** All operations
+ that do not write or read file contents (like creating directories
+ or moving, renaming, and changing permissions of files and
+ directories) are very fast because they are carried out without any
+ network transactions.
+
+ S3QL achieves this by saving the entire file and directory structure
+ in a database. This database is locally cached and the remote
+ copy updated asynchronously.
+
+* **Support for low bandwidth connections.** S3QL splits file contents
+ into smaller blocks and caches blocks locally. This minimizes both
+ the number of network transactions required for reading and writing
+ data, and the amount of data that has to be transferred when only
+ parts of a file are read or written.
+
+
+
+Development Status
+==================
+
+After two years of beta-testing by about 93 users did not reveal any
+data-critical bugs, S3QL was declared **stable** with the release of
+version 1.0 on May 13th, 2011. Note that this does not mean that S3QL
+is bug-free. S3QL still has several known, and probably many more
+unknown bugs. However, there is a high probability that these bugs
+will, although being inconvenient, not endanger any stored data.
+
+Please report any problems on the `mailing list
+<http://groups.google.com/group/s3ql>`_ or the `issue tracker
+<http://code.google.com/p/s3ql/issues/list>`_.
diff --git a/doc/html/_sources/adm.txt b/doc/html/_sources/adm.txt
new file mode 100644
index 0000000..3e50f64
--- /dev/null
+++ b/doc/html/_sources/adm.txt
@@ -0,0 +1,83 @@
+.. -*- mode: rst -*-
+
+
+Managing Buckets
+=====================
+
+
+The `s3qladm` command performs various operations on S3QL buckets.
+The file system contained in the bucket *must not be mounted* when
+using `s3qladm` or things will go wrong badly.
+
+The syntax is ::
+
+ s3qladm [options] <action> <storage-url>
+
+where :var:`action` may be either of :program:`passphrase`,
+:program:`upgrade`, :program:`delete` or :program:`download-metadata`.
+
+The :program:`s3qladm` accepts the following general options, no
+matter what specific action is being invoked:
+
+.. pipeinclude:: ../bin/s3qladm --help
+ :start-after: show this help message and exit
+
+
+Changing the Passphrase
+-----------------------
+
+To change the passphrase a bucket, use the `s3qladm` command::
+
+ s3qladm passphrase <storage url>
+
+The passphrase can only be changed when the bucket is not mounted.
+
+Upgrading the file system
+-------------------------
+
+If you have installed a new version of S3QL, it may sometimes be
+necessary to upgrade the file system metadata as well. Note that in
+this case the file system can no longer be accessed with older
+versions of S3QL after the upgrade.
+
+During the upgrade you have to make sure that the command is not
+interrupted, and that no one else tries to mount, check or upgrade the
+file system at the same time.
+
+To upgrade a file system from the previous to the current revision,
+execute ::
+
+ s3qladm upgrade <storage url>
+
+
+Deleting a file system
+----------------------
+
+A file system can be deleted with::
+
+ s3qladm delete <storage url>
+
+This physically deletes all the data and file system structures.
+
+
+Restoring Metadata Backups
+--------------------------
+
+If the most-recent copy of the file system metadata has been damaged
+irreparably, it is possible to restore one of the automatically
+created backup copies.
+
+The command ::
+
+ s3qladm download-metadata <storage url>
+
+will give you a list of the available metadata backups and allow you
+to download them. This will create two new files in the current
+directory, ending in ``.db`` and ``.params``. To actually use the
+downloaded backup, you need to move these files into the ``~/.s3ql/``
+directory and run ``fsck.s3ql``.
+
+.. WARNING::
+
+ You should probably not use this functionality without having asked
+ for help on the mailing list first (see :ref:`resources`).
diff --git a/doc/html/_sources/backends.txt b/doc/html/_sources/backends.txt
new file mode 100644
index 0000000..480ff90
--- /dev/null
+++ b/doc/html/_sources/backends.txt
@@ -0,0 +1,292 @@
+.. -*- mode: rst -*-
+
+==================
+ Storage Backends
+==================
+
+S3QL can use different protocols to store the file system data.
+Independent of the backend that you use, the place where your file
+system data is being stored is called a *bucket*. (This is mostly for
+historical reasons, since initially S3QL supported only the Amazon S3
+backend).
+
+
+On Backend Reliability
+======================
+
+S3QL has been designed for use with a storage backend where data loss
+is so infrequent that it can be completely neglected (e.g. the Amazon
+S3 backend). If you decide to use a less reliable backend, you should
+keep the following warning in mind and read this section carefully.
+
+.. WARNING::
+
+ S3QL is not able to compensate for any failures of the backend. In
+ particular, it is not able reconstruct any data that has been lost
+ or corrupted by the backend. The persistence and durability of data
+ stored in an S3QL file system is limited and determined by the
+ backend alone.
+
+
+On the plus side, if a backend looses or corrupts some of the stored
+data, S3QL *will* detect the problem. Missing data will be detected
+when running `fsck.s3ql` or when attempting to access the data in the
+mounted file system. In the later case you will get an IO Error, and
+on unmounting S3QL will warn you that the file system is damaged and
+you need to run `fsck.s3ql`.
+
+`fsck.s3ql` will report all the affected files and move them into the
+`/lost+found` directory of the file system.
+
+You should be aware that, because of S3QL's data de-duplication
+feature, the consequences of a data loss in the backend can be
+significantly more severe than you may expect. More concretely, a data
+loss in the backend at time *x* may cause data that is written *after*
+time *x* to be lost as well. What may happen is this:
+
+#. You store an important file in the S3QL file system.
+#. The backend looses the data blocks of this file. As long as you
+ do not access the file or run `fsck.s3ql`, S3QL
+ is not aware that the data has been lost by the backend.
+#. You save an additional copy of the important file in a different
+ location on the same S3QL file system.
+#. S3QL detects that the contents of the new file are identical to the
+ data blocks that have been stored earlier. Since at this point S3QL
+ is not aware that these blocks have been lost by the backend, it
+ does not save another copy of the file contents in the backend but
+ relies on the (presumably) existing blocks instead.
+#. Therefore, even though you saved another copy, you still do not
+ have a backup of the important file (since both copies refer to the
+ same data blocks that have been lost by the backend).
+
+As one can see, this effect becomes the less important the more often
+one runs `fsck.s3ql`, since `fsck.s3ql` will make S3QL aware of any
+blocks that the backend may have lost. Figuratively, this establishes
+a "checkpoint": data loss in the backend that occurred before running
+`fsck.s3ql` can not affect any file system operations performed after
+running `fsck.s3ql`.
+
+
+Nevertheless, (as said at the beginning) the recommended way to use
+S3QL is in combination with a sufficiently reliable storage backend.
+In that case none of the above will ever be a concern.
+
+
+The `authinfo` file
+===================
+
+Most backends first try to read the file `~/.s3ql/authinfo` to determine
+the username and password for connecting to the remote host. If this
+fails, both username and password are read from the terminal.
+
+The `authinfo` file has to contain entries of the form ::
+
+ backend <backend> machine <host> login <user> password <password>
+
+So to use the login `joe` with password `jibbadup` when using the FTP
+backend to connect to the host `backups.joesdomain.com`, you would
+specify ::
+
+ backend ftp machine backups.joesdomain.com login joe password jibbadup
+
+
+Consistency Guarantees
+======================
+
+The different backends provide different types of *consistency
+guarantees*. Informally, a consistency guarantee tells you how fast
+the backend will apply changes to the stored data.
+
+S3QL defines the following three levels:
+
+* **Read-after-Write Consistency.** This is the strongest consistency
+ guarantee. If a backend offers read-after-write consistency, it
+ guarantees that as soon as you have committed any changes to the
+ backend, subsequent requests will take into account these changes.
+
+* **Read-after-Create Consistency.** If a backend provides only
+ read-after-create consistency, only the creation of a new object is
+ guaranteed to be taken into account for subsequent requests. This
+ means that, for example, if you overwrite data in an existing
+ object, subsequent requests may still return the old data for a
+ certain period of time.
+
+* **Eventual consistency.** This is the lowest consistency level.
+ Basically, any changes that you make to the backend may not be
+ visible for a certain amount of time after the change has been made.
+ However, you are guaranteed that no change will be lost. All changes
+ will *eventually* become visible.
+
+ .
+
+
+As long as your backend provides read-after-write or read-after-create
+consistency, you do not have to worry about consistency guarantees at
+all. However, if you plan to use a backend with only eventual
+consistency, you have to be a bit careful in some situations.
+
+
+.. _eventual_consistency:
+
+Dealing with Eventual Consistency
+---------------------------------
+
+.. NOTE::
+
+ The following applies only to storage backends that do not provide
+ read-after-create or read-after-write consistency. Currently,
+ this is only the Amazon S3 backend *if used with the US-Standard
+ storage region*. If you use a different storage backend, or the S3
+ backend with a different storage region, this section does not apply
+ to you.
+
+While the file system is mounted, S3QL is able to automatically handle
+all issues related to the weak eventual consistency guarantee.
+However, some issues may arise during the mount process and when the
+file system is checked.
+
+Suppose that you mount the file system, store some new data, delete
+some old data and unmount it again. Now remember that eventual
+consistency means that there is no guarantee that these changes will
+be visible immediately. At least in theory it is therefore possible
+that if you mount the file system again, S3QL does not see any of the
+changes that you have done and presents you an "old version" of the
+file system without them. Even worse, if you notice the problem and
+unmount the file system, S3QL will upload the old status (which S3QL
+necessarily has to consider as current) and thereby permanently
+override the newer version (even though this change may not become
+immediately visible either).
+
+The same problem applies when checking the file system. If the backend
+provides S3QL with only partially updated data, S3QL has no way to
+find out if this a real consistency problem that needs to be fixed or
+if it is only a temporary problem that will resolve itself
+automatically (because there are still changes that have not become
+visible yet).
+
+While this may seem to be a rather big problem, the likelihood of it
+to occur is rather low. In practice, most storage providers rarely
+need more than a few seconds to apply incoming changes, so to trigger
+this problem one would have to unmount and remount the file system in
+a very short time window. Many people therefore make sure that they
+wait a few minutes between successive mounts (or file system checks)
+and decide that the remaining risk is negligible.
+
+Nevertheless, the eventual consistency guarantee does not impose an
+upper limit on the time that it may take for change to become visible.
+Therefore there is no "totally safe" waiting time that would totally
+eliminate this problem; a theoretical possibility always remains.
+
+
+
+The Amazon S3 Backend
+=====================
+
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form `s3://<bucketname>`. Bucket names must conform to the `S3
+Bucket Name Restrictions`_.
+
+The S3 backend offers exceptionally strong reliability guarantees. As
+of August 2010, Amazon guarantees a durability of 99.999999999% per
+year. In other words, if you store a thousand million objects then on
+average you would loose less than one object in a hundred years.
+
+The Amazon S3 backend provides read-after-create consistency for the
+EU, Asia-Pacific and US-West storage regions. *For the US-Standard
+storage region, Amazon S3 provides only eventual consistency* (please
+refer to :ref:`eventual_consistency` for information about
+what this entails).
+
+When connecting to Amazon S3, S3QL uses an unencrypted HTTP
+connection, so if you want your data to stay confidential, you have
+to create the S3QL file system with encryption (this is also the default).
+
+When reading the authentication information for the S3 backend from
+the `authinfo` file, the `host` field is ignored, i.e. the first entry
+with `s3` as a backend will be used. For example ::
+
+ backend s3 machine any login myAWSaccessKeyId password myAwsSecretAccessKey
+
+Note that the bucket names come from a global pool, so chances are
+that your favorite name has already been taken by another S3 user.
+Usually a longer bucket name containing some random numbers, like
+`19283712_yourname_s3ql`, will work better.
+
+If you do not already have one, you need to obtain an Amazon S3
+account from `Amazon AWS <http://aws.amazon.com/>`_. The account is
+free, you will pay only for the amount of storage that you actually
+use.
+
+Note that the login and password for accessing S3 are not the user id
+and password that you use to log into the Amazon Webpage, but the "AWS
+access key id" and "AWS secret access key" shown under `My
+Account/Access Identifiers
+<https://aws-portal.amazon.com/gp/aws/developer/account/index.html?ie=UTF8&action=access-key>`_.
+
+.. _`S3 Bucket Name Restrictions`: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
+
+.. NOTE::
+
+ S3QL also allows you to use `reduced redundancy storage
+ <http://aws.amazon.com/s3/#protecting>`_ by using ``s3rr://``
+ instead of ``s3://`` in the storage url. However, this not
+ recommended. The reason is a combination of three factors:
+
+ * RRS has a relatively low reliability, on average you loose one
+ out of every ten-thousand objects a year. So you can expect to
+ occasionally loose some data.
+
+ * When `fsck.s3ql` asks Amazon S3 for a list of the stored objects,
+ this list includes even those objects that have been lost.
+ Therefore `fsck.s3ql` *can not detect lost objects* and lost data
+ will only become apparent when you try to actually read from a
+ file whose data has been lost. This is a (very unfortunate)
+ peculiarity of Amazon S3.
+
+ * Due to the data de-duplication feature of S3QL, unnoticed lost
+ objects may cause subsequent data loss later in time (see `On
+ Backend Reliability`_ for details).
+
+ In other words, you should really only store an S3QL file system
+ using RRS if you know exactly what you are getting into.
+
+
+
+
+The Local Backend
+=================
+
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+`local://<path>`. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. `local:///var/archive`.
+
+The local backend provides read-after-write consistency.
+
+The SFTP Backend
+================
+
+The SFTP backend uses the SFTP protocol, which is a file transfer
+protocol similar to ftp, but uses an encrypted SSH connection.
+It provides read-after-write consistency.
+
+Note that the SFTP backend is rather slow and has not been tested
+as extensively as the S3 and Local backends.
+
+The storage URL for SFTP connections has the form ::
+
+ sftp://<host>[:port]/<path>
+
+The SFTP backend will always ask you for a password if you haven't
+defined one in `~/.s3ql/authinfo`. However, public key authentication
+is tried first and the password will only be used if the public key
+authentication fails.
+
+The public and private keys will be read from the standard files in
+`~/.ssh/`. Note that S3QL will refuse to connect to a computer with
+unknown host key; to add the key to your local keyring you have to
+establish a connection to that computer with the standard SSH command
+line programs first.
+
+
+
diff --git a/doc/html/_sources/contrib.txt b/doc/html/_sources/contrib.txt
new file mode 100644
index 0000000..3ee2323
--- /dev/null
+++ b/doc/html/_sources/contrib.txt
@@ -0,0 +1,87 @@
+.. -*- mode: rst -*-
+
+=====================
+Contributed Programs
+=====================
+
+S3QL comes with a few contributed programs that are not part of the
+core distribution (and are therefore not installed automatically by
+default), but which may nevertheless be useful. These programs are in
+the `contrib` directory of the source distribution or in
+`/usr/share/doc/s3ql/contrib` if you installed S3QL from a package.
+
+
+benchmark.py
+============
+
+This program measures your uplink bandwidth and compression speed and
+recommends a compression algorithm for optimal throughput.
+
+
+s3_copy.py
+==========
+
+This program physically duplicates Amazon S3 bucket. It can be used to
+migrate buckets to a different storage region or storage class
+(standard or reduced redundancy).
+
+.. _pcp:
+
+pcp.py
+======
+
+``pcp.py`` is a wrapper program that starts several rsync processes to
+copy directory trees in parallel. This is important because
+transferring files in parallel significantly enhances performance when
+copying data from an S3QL file system (see :ref:`copy_performance` for
+details).
+
+To recursively copy the directory ``/mnt/home-backup`` into
+``/home/joe`` using 8 parallel processes and preserving permissions,
+you would execute ::
+
+ pcp.py -a --processes=8 /mnt/home-backup/ /home/joe
+
+
+s3_backup.sh
+============
+
+This is an example script that demonstrates how to set up a simple but
+powerful backup solution using S3QL and `rsync
+<http://samba.org/rsync>`_.
+
+The `s3_backup.sh` script automates the following steps:
+
+#. Mount the file system
+#. Replicate the previous backup with :ref:`s3qlcp <s3qlcp>`
+#. Update the new copy with the data from the backup source using rsync
+#. Make the new backup immutable with :ref:`s3qllock <s3qllock>`
+#. Delete old backups that are no longer needed
+#. Unmount the file system
+
+The backups are stored in directories of the form
+`YYYY-MM-DD_HH:mm:SS` and the `expire_backups.py`_ command is used to
+delete old backups.
+
+
+expire_backups.py
+=================
+
+:program:`expire_backups.py` is a program to intelligently remove old
+backups that are no longer needed.
+
+.. include:: man/expire_backups.rst
+ :start-after: begin_main_content
+ :end-before: end_main_content
+
+For a full list of available options, run :program:`expire_backups.py
+--help`.
+
+
+s3ql.conf
+=========
+
+``s3ql.conf`` is an example upstart job definition file. It defines a
+job that automatically mounts an S3QL file system on system start, and
+properly unmounts it when the system is shut down.
+
diff --git a/doc/html/_sources/fsck.txt b/doc/html/_sources/fsck.txt
new file mode 100644
index 0000000..24ee2ed
--- /dev/null
+++ b/doc/html/_sources/fsck.txt
@@ -0,0 +1,16 @@
+.. -*- mode: rst -*-
+
+
+Checking for Errors
+===================
+
+If, for some reason, the filesystem has not been correctly unmounted,
+or if you suspect that there might be errors, you should run the
+`fsck.s3ql` utility. It has the following syntax::
+
+ fsck.s3ql [options] <storage url>
+
+This command accepts the following options:
+
+.. pipeinclude:: ../bin/fsck.s3ql --help
+ :start-after: show this help message and exit
diff --git a/doc/html/_sources/index.txt b/doc/html/_sources/index.txt
new file mode 100644
index 0000000..f3b5b72
--- /dev/null
+++ b/doc/html/_sources/index.txt
@@ -0,0 +1,23 @@
+.. -*- mode: rst -*-
+
+===================
+ S3QL User's Guide
+===================
+
+.. toctree::
+ :maxdepth: 2
+
+ about
+ installation
+ backends
+ mkfs
+ adm
+ mount
+ special
+ umount
+ fsck
+ contrib
+ tips
+ issues
+ man/index
+ resources
diff --git a/doc/html/_sources/installation.txt b/doc/html/_sources/installation.txt
new file mode 100644
index 0000000..b57325e
--- /dev/null
+++ b/doc/html/_sources/installation.txt
@@ -0,0 +1,101 @@
+.. -*- mode: rst -*-
+
+
+==============
+ Installation
+==============
+
+S3QL depends on several other programs and libraries that have to be
+installed first. The best method to satisfy these dependencies depends
+on your distribution. In some cases S3QL and all its dependencies can
+be installed with as little as three commands, while in other cases more work
+may be required.
+
+The `S3QL Wiki <http://code.google.com/p/s3ql/w/list>`_ contains
+installation instructions for quite a few different Linux
+distributions. You should only use the generic instructions in this
+manual if your distribution is not included in the `distribution-specific
+installation instructions
+<http://code.google.com/p/s3ql/w/list?q=label:Installation>`_ on the wiki.
+
+
+Dependencies
+============
+
+The following is a list of the programs and libraries required for
+running S3QL. Generally, you should first check if your distribution
+already provides a suitable packages and only install from source if
+that is not the case.
+
+* Kernel version 2.6.9 or newer. Starting with kernel 2.6.26
+ you will get significantly better write performance, so you should
+ actually use *2.6.26 or newer whenever possible*.
+
+* The `FUSE Library <http://fuse.sourceforge.net/>`_ should already be
+ installed on your system. However, you have to make sure that you
+ have at least version 2.8.0.
+
+* The `PyCrypto++ Python Module
+ <http://pypi.python.org/pypi/pycryptopp>`_. To check if this module
+ is installed, try to execute `python -c 'import pycryptopp'`.
+
+* The `argparse Python Module
+ <http://pypi.python.org/pypi/argparse>`_. To check if this module is
+ installed, try to execute `python -c 'import argparse; print
+ argparse.__version__'`. If argparse is installed, this will print
+ the version number. You need version 1.1 or later.
+
+* The `APSW Python Module <http://code.google.com/p/apsw/>`_. To check
+ which (if any) version of APWS is installed, run the command ::
+
+ python -c 'import apsw; print apsw.apswversion(), apsw.sqlitelibversion()'
+
+ If APSW is installed, this should print two version numbers which
+ both have to be at least 3.7.0.
+
+* The `PyLibLZMA Python module
+ <http://pypi.python.org/pypi/pyliblzma>`_. To check if this module
+ is installed, execute `python -c 'import lzma; print
+ lzma.__version__'`. This should print a version number. You need at
+ least version 0.5.3.
+
+* The `Python LLFUSE module
+ <http://code.google.com/p/python-llfuse/>`_. To check if this module
+ is installed, execute `python -c 'import llfuse; print
+ llfuse.__version__'`. This should print a version number. You need at
+ least version 0.29.
+
+ Note that earlier S3QL versions shipped with a builtin version of
+ this module. If you are upgrading from such a version, make sure to
+ completely remove the old S3QL version first.
+
+* If you want to use the SFTP backend, then you also need the
+ `Paramiko Python Module <http://www.lag.net/paramiko/>`_. To check
+ if this module is installed, try to execute `python -c 'import
+ paramiko'`.
+
+
+.. _inst-s3ql:
+
+Installing S3QL
+===============
+
+To install S3QL itself, proceed as follows:
+
+1. Download S3QL from http://code.google.com/p/s3ql/downloads/list
+2. Unpack it into a folder of your choice
+3. Run `python setup.py test` to run a self-test. If this fails, ask
+ for help on the `mailing list
+ <http://groups.google.com/group/s3ql>`_ or report a bug in the
+ `issue tracker <http://code.google.com/p/s3ql/issues/list>`_.
+
+Now you have three options:
+
+* You can run the S3QL commands from the `bin/` directory.
+
+* You can install S3QL system-wide for all users. To do that, you
+ have to run `sudo python setup.py install`.
+
+* You can install S3QL into `~/.local` by executing `python
+ setup.py install --user`. In this case you should make sure that
+ `~/.local/bin` is in your `$PATH` environment variable.
diff --git a/doc/html/_sources/issues.txt b/doc/html/_sources/issues.txt
new file mode 100644
index 0000000..29b76ce
--- /dev/null
+++ b/doc/html/_sources/issues.txt
@@ -0,0 +1,89 @@
+.. -*- mode: rst -*-
+
+============
+Known Issues
+============
+
+* S3QL is rather slow when an application tries to write data in
+ unreasonably small chunks. If a 1 MB file is copied in chunks of 1
+ KB, this will take more than 10 times as long as when it's copied
+ with the (recommended) chunk size of 128 KB.
+
+ This is a limitation of the FUSE library (which does not yet support
+ write caching) which will hopefully be addressed in some future FUSE
+ version.
+
+ Most applications, including e.g. GNU `cp` and `rsync`, use
+ reasonably large buffers and are therefore not affected by this
+ problem and perform very efficient on S3QL file systems.
+
+ However, if you encounter unexpectedly slow performance with a
+ specific program, this might be due to the program using very small
+ write buffers. Although this is not really a bug in the program,
+ it might be worth to ask the program's authors for help.
+
+* S3QL always updates file and directory access times as if the ``relatime``
+ mount option has been specified: the access time ("atime") is only updated
+ if it is currently earlier than either the status change time
+ ("ctime") or modification time ("mtime").
+
+* S3QL directories always have an `st_nlink` value of 1. This may confuse
+ programs that rely on directories having `st_nlink` values of *(2 +
+ number of sub directories)*.
+
+ Note that this is not a bug in S3QL. Including sub directories in
+ the `st_nlink` value is a Unix convention, but by no means a
+ requirement. If an application blindly relies on this convention
+ being followed, then this is a bug in the application.
+
+ A prominent example are early versions of GNU find, which required
+ the `--noleaf` option to work correctly on S3QL file systems. This
+ bug has already been fixed in recent find versions.
+
+
+* In theory, S3QL is not fully compatible with NFS. Since S3QL does
+ not support *inode generation numbers*, NFS clients may (once again,
+ in theory) accidentally read or write the wrong file in the
+ following situation:
+
+ #. An S3QL file system is exported over NFS
+ #. NFS client 1 opens a file A
+ #. Another NFS client 2 (or the server itself) deletes file A (without
+ client 1 knowing about this)
+ #. A new file B is created by either of the clients or the server
+ #. NFS client 1 tries to read or write file A (which has actually already been deleted).
+
+ In this situation it is possible that NFS client 1 actually writes
+ or reads the newly created file B instead. The chances of this are 1
+ to (2^32 - *n*) where *n* is the total number of directory entries
+ in the S3QL file system (as displayed by `s3qlstat`).
+
+ Luckily enough, as long as you have less than about 2 thousand
+ million directory entries (2^31), the chances for this are totally
+ irrelevant and you don't have to worry about it.
+
+* The `umount` and `fusermount -u` commands will *not* block until all
+ data has been uploaded to the backend. (this is a FUSE limitation
+ that will hopefully be removed in the future, see `issue 159
+ <http://code.google.com/p/s3ql/issues/detail?id=159>`_). If you use
+ either command to unmount an S3QL file system, you have to take care
+ to explicitly wait for the `mount.s3ql` process to terminate before
+ you shut down or restart the system. Therefore it is generally not a
+ good idea to mount an S3QL file system in `/etc/fstab` (you should
+ use a dedicated init script instead).
+
+* S3QL relies on the backends not to run out of space. This is a given
+ for big storage providers like Amazon S3, but you may stumble upon
+ this if you store buckets e.g. on a small sftp server.
+
+ If there is no space left in the backend, attempts to write more
+ data into the S3QL file system will fail and the file system will be
+ in an inconsistent state and require a file system check (and you
+ should make sure to make space available in the backend before
+ running the check).
+
+ Unfortunately, there is no way to handle insufficient space in the
+ backend without leaving the file system inconsistent. Since
+ S3QL first writes data into the cache, it can no longer return an
+ error when it later turns out that the cache can not be committed to
+ the backend.
diff --git a/doc/html/_sources/man/adm.txt b/doc/html/_sources/man/adm.txt
new file mode 100644
index 0000000..c23865e
--- /dev/null
+++ b/doc/html/_sources/man/adm.txt
@@ -0,0 +1,66 @@
+.. -*- mode: rst -*-
+
+==============================
+The :program:`s3qladm` command
+==============================
+
+Synopsis
+========
+
+::
+
+ s3qladm [options] <action> <storage url>
+
+where :var:`action` may be either of :program:`passphrase`,
+:program:`upgrade`, :program:`delete` or :program:`download-metadata`.
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command performs various operations on S3QL buckets.
+The file system contained in the bucket *must not be mounted* when
+using |command| or things will go wrong badly.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/s3qladm --help
+ :start-after: show this help message and exit
+
+Actions
+=======
+
+The following actions may be specified:
+
+passphrase
+ Changes the encryption passphrase of the bucket.
+
+upgrade
+ Upgrade the file system contained in the bucket to the newest revision.
+
+delete
+ Delete the bucket and all its contents.
+
+download-metadata
+ Interactively download backups of the file system metadata.
+
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :program:`s3qladm`
diff --git a/doc/html/_sources/man/cp.txt b/doc/html/_sources/man/cp.txt
new file mode 100644
index 0000000..d0cbb41
--- /dev/null
+++ b/doc/html/_sources/man/cp.txt
@@ -0,0 +1,100 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlcp` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlcp [options] <source-dir> <dest-dir>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command duplicates the directory tree :var:`source-dir`
+into :var:`dest-dir` without physically copying the file contents.
+Both source and destination must lie inside the same S3QL file system.
+
+.. begin_main_content
+
+The replication will not take any additional space. Only if one of
+directories is modified later on, the modified data will take
+additional storage space.
+
+`s3qlcp` can only be called by the user that mounted the file system
+and (if the file system was mounted with `--allow-other` or `--allow-root`)
+the root user. This limitation might be removed in the future (see `issue 155
+<http://code.google.com/p/s3ql/issues/detail?id=155>`_).
+
+Note that:
+
+* After the replication, both source and target directory will still
+ be completely ordinary directories. You can regard `<src>` as a
+ snapshot of `<target>` or vice versa. However, the most common
+ usage of `s3qlcp` is to regularly duplicate the same source
+ directory, say `documents`, to different target directories. For a
+ e.g. monthly replication, the target directories would typically be
+ named something like `documents_Januray` for the replication in
+ January, `documents_February` for the replication in February etc.
+ In this case it is clear that the target directories should be
+ regarded as snapshots of the source directory.
+
+* Exactly the same effect could be achieved by an ordinary copy
+ program like `cp -a`. However, this procedure would be orders of
+ magnitude slower, because `cp` would have to read every file
+ completely (so that S3QL had to fetch all the data over the network
+ from the backend) before writing them into the destination folder.
+
+* Before starting with the replication, S3QL has to flush the local
+ cache. So if you just copied lots of new data into the file system
+ that has not yet been uploaded, replication will take longer than
+ usual.
+
+
+
+Snapshotting vs Hardlinking
+---------------------------
+
+Snapshot support in S3QL is inspired by the hardlinking feature that
+is offered by programs like `rsync <http://www.samba.org/rsync>`_ or
+`storeBackup <http://savannah.nongnu.org/projects/storebackup>`_.
+These programs can create a hardlink instead of copying a file if an
+identical file already exists in the backup. However, using hardlinks
+has two large disadvantages:
+
+* backups and restores always have to be made with a special program
+ that takes care of the hardlinking. The backup must not be touched
+ by any other programs (they may make changes that inadvertently
+ affect other hardlinked files)
+
+* special care needs to be taken to handle files which are already
+ hardlinked (the restore program needs to know that the hardlink was
+ not just introduced by the backup program to safe space)
+
+S3QL snapshots do not have these problems, and they can be used with
+any backup program.
+
+
+.. end_main_content
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qlcp --help
+ :start-after: show this help message and exit
+
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`s3qlcp`
+
diff --git a/doc/html/_sources/man/ctrl.txt b/doc/html/_sources/man/ctrl.txt
new file mode 100644
index 0000000..4afa33b
--- /dev/null
+++ b/doc/html/_sources/man/ctrl.txt
@@ -0,0 +1,69 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlctrl` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlctrl [options] <action> <mountpoint> ...
+
+where :var:`action` may be either of :program:`flushcache`,
+:program:`upload-meta`, :program:`cachesize` or
+:program:`log-metadata`.
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command performs various actions on the S3QL file system mounted
+in :var:`mountpoint`.
+
+The following actions may be specified:
+
+flushcache
+ Uploads all changed file data to the backend.
+
+upload-meta
+ Upload metadata to the backend. All file system operations will
+ block while a snapshot of the metadata is prepared for upload.
+
+cachesize
+ Changes the cache size of the file system. This action requires an
+ additional argument that specifies the new cache size in kB, so the
+ complete command line is::
+
+ s3qlctrl [options] cachesize <mountpoint> <new-cache-size>
+
+log
+ Change the amount of information that is logged into
+ :file:`~/.s3ql/mount.log` file. The complete syntax is::
+
+ s3qlctrl [options] log <mountpoint> <level> [<module> [<module> ...]]
+
+ here :var:`level` is the desired new log level and may be either of
+ *debug*, *info* or *warn*. One or more :var:`module` may only be
+ specified with the *debug* level and allow to restrict the debug
+ output to just the listed modules.
+
+
+Options
+=======
+
+The |command| command also accepts the following options, no matter
+what specific action is being invoked:
+
+.. pipeinclude:: ../../bin/s3qlctrl --help
+ :start-after: show this help message and exit
+
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`s3qlctrl`
+
diff --git a/doc/html/_sources/man/expire_backups.txt b/doc/html/_sources/man/expire_backups.txt
new file mode 100644
index 0000000..6b2f538
--- /dev/null
+++ b/doc/html/_sources/man/expire_backups.txt
@@ -0,0 +1,106 @@
+.. -*- mode: rst -*-
+
+
+=======================================
+ The :program:`expire_backups` command
+=======================================
+
+Synopsis
+========
+
+::
+
+ expire_backups [options] <age> [<age> ...]
+
+
+Description
+===========
+
+The |command| command intelligently remove old backups that are no
+longer needed.
+
+.. begin_main_content
+
+To define what backups you want to keep for how long, you define a
+number of *age ranges*. :program:`expire_backups` ensures that you
+will have at least one backup in each age range at all times. It will
+keep exactly as many backups as are required for that and delete any
+backups that become redundant.
+
+Age ranges are specified by giving a list of range boundaries in terms
+of backup cycles. Every time you create a new backup, the existing
+backups age by one cycle.
+
+Example: when :program:`expire_backups` is called with the age range
+definition ``1 3 7 14 31``, it will guarantee that you always have the
+following backups available:
+
+#. A backup that is 0 to 1 cycles old (i.e, the most recent backup)
+#. A backup that is 1 to 3 cycles old
+#. A backup that is 3 to 7 cycles old
+#. A backup that is 7 to 14 cycles old
+#. A backup that is 14 to 31 cycles old
+
+.. NOTE::
+
+ If you do backups in fixed intervals, then one cycle will be
+ equivalent to the backup interval. The advantage of specifying the
+ age ranges in terms of backup cycles rather than days or weeks is
+ that it allows you to gracefully handle irregular backup intervals.
+ Imagine that for some reason you do not turn on your computer for
+ one month. Now all your backups are at least a month old, and if you
+ had specified the above backup strategy in terms of absolute ages,
+ they would all be deleted! Specifying age ranges in terms of backup
+ cycles avoids these sort of problems.
+
+:program:`expire_backups` usage is simple. It requires backups to have
+names of the forms ``year-month-day_hour:minute:seconds``
+(``YYYY-MM-DD_HH:mm:ss``) and works on all backups in the current
+directory. So for the above backup strategy, the correct invocation
+would be::
+
+ expire_backups.py 1 3 7 14 31
+
+When storing your backups on an S3QL file system, you probably want to
+specify the ``--use-s3qlrm`` option as well. This tells
+:program:`expire_backups` to use the :ref:`s3qlrm <s3qlrm>` command to
+delete directories.
+
+:program:`expire_backups` uses a "state file" to keep track which
+backups are how many cycles old (since this cannot be inferred from
+the dates contained in the directory names). The standard name for
+this state file is :file:`.expire_backups.dat`. If this file gets
+damaged or deleted, :program:`expire_backups` no longer knows the ages
+of the backups and refuses to work. In this case you can use the
+:cmdopt:`--reconstruct-state` option to try to reconstruct the state
+from the backup dates. However, the accuracy of this reconstruction
+depends strongly on how rigorous you have been with making backups (it
+is only completely correct if the time between subsequent backups has
+always been exactly the same), so it's generally a good idea not to
+tamper with the state file.
+
+.. end_main_content
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../contrib/expire_backups.py --help
+ :start-after: show this help message and exit
+
+Exit Status
+===========
+
+|command| returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+See Also
+========
+
+|command| is shipped as part of S3QL, http://code.google.com/p/s3ql/.
+
+.. |command| replace:: :command:`expire_backups`
+
diff --git a/doc/html/_sources/man/fsck.txt b/doc/html/_sources/man/fsck.txt
new file mode 100644
index 0000000..ef6ed2d
--- /dev/null
+++ b/doc/html/_sources/man/fsck.txt
@@ -0,0 +1,44 @@
+.. -*- mode: rst -*-
+
+================================
+The :program:`fsck.s3ql` command
+================================
+
+Synopsis
+========
+
+::
+
+ fsck.s3ql [options] <storage url>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command checks the new file system in the location
+specified by *storage url* for errors and attempts to repair any
+problems.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/fsck.s3ql --help
+ :start-after: show this help message and exit
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`mkfs.s3ql`
diff --git a/doc/html/_sources/man/index.txt b/doc/html/_sources/man/index.txt
new file mode 100644
index 0000000..39d1154
--- /dev/null
+++ b/doc/html/_sources/man/index.txt
@@ -0,0 +1,23 @@
+
+Manpages
+========
+
+The man pages are installed with S3QL on your system and can be viewed
+with the :command:`man` command. For reference, they are also included
+here in the User's Guide.
+
+.. toctree::
+ :maxdepth: 1
+
+ mkfs
+ adm
+ mount
+ stat
+ ctrl
+ cp
+ rm
+ lock
+ umount
+ fsck
+ pcp
+ expire_backups
diff --git a/doc/html/_sources/man/lock.txt b/doc/html/_sources/man/lock.txt
new file mode 100644
index 0000000..f17bf32
--- /dev/null
+++ b/doc/html/_sources/man/lock.txt
@@ -0,0 +1,78 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qllock` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qllock [options] <directory>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The :program:`s3qllock` command makes a directory tree in an S3QL file
+system immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the :program:`s3qlrm` command.
+
+Rationale
+=========
+
+.. begin_main_content
+
+Immutability is a feature designed for backups. Traditionally, backups
+have been made on external tape drives. Once a backup was made, the
+tape drive was removed and locked somewhere in a shelf. This has the
+great advantage that the contents of the backup are now permanently
+fixed. Nothing (short of physical destruction) can change or delete
+files in the backup.
+
+In contrast, when backing up into an online storage system like S3QL,
+all backups are available every time the file system is mounted.
+Nothing prevents a file in an old backup from being changed again
+later on. In the worst case, this may make your entire backup system
+worthless. Imagine that your system gets infected by a nasty virus
+that simply deletes all files it can find -- if the virus is active
+while the backup file system is mounted, the virus will destroy all
+your old backups as well!
+
+Even if the possibility of a malicious virus or trojan horse is
+excluded, being able to change a backup after it has been made is
+generally not a good idea. A common S3QL use case is to keep the file
+system mounted at all times and periodically create backups with
+:program:`rsync -a`. This allows every user to recover her files from a
+backup without having to call the system administrator. However, this
+also allows every user to accidentally change or delete files *in* one
+of the old backups.
+
+Making a backup immutable protects you against all these problems.
+Unless you happen to run into a virus that was specifically programmed
+to attack S3QL file systems, backups can be neither deleted nor
+changed after they have been made immutable.
+
+
+.. end_main_content
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qllock --help
+ :start-after: show this help message and exit
+
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`s3qllock`
+
diff --git a/doc/html/_sources/man/mkfs.txt b/doc/html/_sources/man/mkfs.txt
new file mode 100644
index 0000000..c61270a
--- /dev/null
+++ b/doc/html/_sources/man/mkfs.txt
@@ -0,0 +1,43 @@
+.. -*- mode: rst -*-
+
+================================
+The :program:`mkfs.s3ql` command
+================================
+
+Synopsis
+========
+
+::
+
+ mkfs.s3ql [options] <storage url>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command creates a new file system in the location
+specified by *storage url*.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/mkfs.s3ql --help
+ :start-after: show this help message and exit
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`mkfs.s3ql`
diff --git a/doc/html/_sources/man/mount.txt b/doc/html/_sources/man/mount.txt
new file mode 100644
index 0000000..3905c03
--- /dev/null
+++ b/doc/html/_sources/man/mount.txt
@@ -0,0 +1,48 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`mount.s3ql` command
+=================================
+
+Synopsis
+========
+
+::
+
+ mount.s3ql [options] <storage url> <mount point>
+
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command mounts the S3QL file system stored in *storage
+url* in the directory *mount point*.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/mount.s3ql --help
+ :start-after: show this help message and exit
+
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`mount.s3ql`
+
diff --git a/doc/html/_sources/man/pcp.txt b/doc/html/_sources/man/pcp.txt
new file mode 100644
index 0000000..cd7a66c
--- /dev/null
+++ b/doc/html/_sources/man/pcp.txt
@@ -0,0 +1,46 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`pcp` command
+=================================
+
+Synopsis
+========
+
+::
+
+ pcp [options] <source> [<source> ...] <destination>
+
+
+Description
+===========
+
+The |command| command is a is a wrapper that starts several
+:program:`sync` processes to copy directory trees in parallel. This is
+allows much better copying performance on file system that have
+relatively high latency when retrieving individual files like S3QL.
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../contrib/pcp.py --help
+ :start-after: show this help message and exit
+
+Exit Status
+===========
+
+|command| returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+See Also
+========
+
+|command| is shipped as part of S3QL, http://code.google.com/p/s3ql/.
+
+.. |command| replace:: :command:`pcp`
+
diff --git a/doc/html/_sources/man/rm.txt b/doc/html/_sources/man/rm.txt
new file mode 100644
index 0000000..0832e27
--- /dev/null
+++ b/doc/html/_sources/man/rm.txt
@@ -0,0 +1,41 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlrm` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlrm [options] <directory>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command recursively deletes files and directories on an
+S3QL file system. Although |command| is faster than using e.g.
+:command:`rm -r``, the main reason for its existence is that it allows
+you to delete immutable trees (which can be created with
+:program:`s3qllock`) as well.
+
+Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qlrm --help
+ :start-after: show this help message and exit
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`s3qlrm`
+
diff --git a/doc/html/_sources/man/stat.txt b/doc/html/_sources/man/stat.txt
new file mode 100644
index 0000000..7578e19
--- /dev/null
+++ b/doc/html/_sources/man/stat.txt
@@ -0,0 +1,41 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlstat` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlstat [options] <mountpoint>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command prints statistics about the S3QL file system mounted
+at :var:`mountpoint`.
+
+|command| can only be called by the user that mounted the file system
+and (if the file system was mounted with :cmdopt:`--allow-other` or
+:cmdopt:`--allow-root`) the root user. This limitation might be
+removed in the future (see `issue 155
+<http://code.google.com/p/s3ql/issues/detail?id=155>`_).
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qlstat --help
+ :start-after: show this help message and exit
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`s3qlstat`
+
diff --git a/doc/html/_sources/man/umount.txt b/doc/html/_sources/man/umount.txt
new file mode 100644
index 0000000..e1ef7f0
--- /dev/null
+++ b/doc/html/_sources/man/umount.txt
@@ -0,0 +1,44 @@
+.. -*- mode: rst -*-
+
+==================================
+The :program:`umount.s3ql` command
+==================================
+
+Synopsis
+========
+
+::
+
+ umount.s3ql [options] <mount point>
+
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command unmounts the S3QL file system mounted in the
+directory *mount point* and blocks until all data has been uploaded to
+the storage backend.
+
+Only the user who mounted the file system with :program:`mount.s3ql`
+is able to unmount it with |command|. If you are root and want to
+unmount an S3QL file system mounted by an ordinary user, you have to
+use the :program:`fusermount -u` or :command:`umount` command instead.
+Note that these commands do not block until all data has been
+uploaded, so if you use them instead of :program:`umount.s3ql` then
+you should manually wait for the :program:`mount.s3ql` process to
+terminate before shutting down the system.
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/umount.s3ql --help
+ :start-after: show this help message and exit
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`umount.s3ql`
diff --git a/doc/html/_sources/mkfs.txt b/doc/html/_sources/mkfs.txt
new file mode 100644
index 0000000..0b9fa97
--- /dev/null
+++ b/doc/html/_sources/mkfs.txt
@@ -0,0 +1,20 @@
+.. -*- mode: rst -*-
+
+====================
+File System Creation
+====================
+
+A S3QL file system is created with the `mkfs.s3ql` command. It has the
+following syntax::
+
+ mkfs.s3ql [options] <storage url>
+
+This command accepts the following options:
+
+.. pipeinclude:: ../bin/mkfs.s3ql --help
+ :start-after: show this help message and exit
+
+Unless you have specified the `--plain` option, `mkfs.s3ql` will ask you
+to enter an encryption password. If you do not want to enter this
+password every time that you mount the file system, you can store it
+in the `~/.s3ql/authinfo` file, see :ref:`bucket_pw`.
diff --git a/doc/html/_sources/mount.txt b/doc/html/_sources/mount.txt
new file mode 100644
index 0000000..609c4a4
--- /dev/null
+++ b/doc/html/_sources/mount.txt
@@ -0,0 +1,175 @@
+.. -*- mode: rst -*-
+
+==========
+ Mounting
+==========
+
+
+A S3QL file system is mounted with the `mount.s3ql` command. It has
+the following syntax::
+
+ mount.s3ql [options] <storage url> <mountpoint>
+
+.. NOTE::
+
+ S3QL is not a network file system like `NFS
+ <http://en.wikipedia.org/wiki/Network_File_System_%28protocol%29>`_
+ or `CIFS <http://en.wikipedia.org/wiki/CIFS>`_. It can only be
+ mounted on one computer at a time.
+
+This command accepts the following options:
+
+.. pipeinclude:: ../bin/mount.s3ql --help
+ :start-after: show this help message and exit
+
+.. _bucket_pw:
+
+Storing Encryption Passwords
+============================
+
+If you are trying to mount an encrypted bucket, `mount.s3ql` will first
+try to read the password from the `.s3ql/authinfo` file (the same file
+that is used to read the backend authentication data) and prompt the
+user to enter the password only if this fails.
+
+The `authinfo` entries to specify bucket passwords are of the form ::
+
+ storage-url <storage-url> password <password>
+
+So to always use the password `topsecret` when mounting `s3://joes_bucket`,
+the entry would be ::
+
+ storage-url s3://joes_bucket password topsecret
+
+.. NOTE::
+
+ If you are using the local backend, the storage url will
+ always be converted to an absolute path. So if you are in the
+ `/home/john` directory and try to mount `local://bucket`, the matching
+ `authinfo` entry has to have a storage url of
+ `local:///home/john/bucket`.
+
+
+Compression Algorithms
+======================
+
+S3QL supports three compression algorithms, LZMA, Bzip2 and zlib (with
+LZMA being the default). The compression algorithm can be specified
+freely whenever the file system is mounted, since it affects only the
+compression of new data blocks.
+
+Roughly speaking, LZMA is slower but achieves better compression
+ratios than Bzip2, while Bzip2 in turn is slower but achieves better
+compression ratios than zlib.
+
+For maximum file system performance, the best algorithm therefore
+depends on your network connection speed: the compression algorithm
+should be fast enough to saturate your network connection.
+
+To find the optimal algorithm for your system, S3QL ships with a
+program called `benchmark.py` in the `contrib` directory. You should
+run this program on a file that has a size that is roughly equal to
+the block size of your file system and has similar contents. It will
+then determine the compression speeds for the different algorithms and
+the upload speeds for the specified backend and recommend the best
+algorithm that is fast enough to saturate your network connection.
+
+Obviously you should make sure that there is little other system load
+when you run `benchmark.py` (i.e., don't compile software or encode
+videos at the same time).
+
+
+Parallel Compression
+====================
+
+If you are running S3QL on a system with multiple cores, you might
+want to set ``--compression-threads`` to a value bigger than one. This
+will instruct S3QL to compress and encrypt several blocks at the same
+time.
+
+If you want to do this in combination with using the LZMA compression
+algorithm, you should keep an eye on memory usage though. Every
+LZMA compression threads requires about 200 MB of RAM.
+
+
+.. NOTE::
+
+ To determine the optimal compression algorithm for your network
+ connection when using multiple threads, you can pass the
+ ``--compression-threads`` option to `contrib/benchmark.py`.
+
+
+Notes about Caching
+===================
+
+S3QL maintains a local cache of the file system data to speed up
+access. The cache is block based, so it is possible that only parts of
+a file are in the cache.
+
+Maximum Number of Cache Entries
+-------------------------------
+
+The maximum size of the cache can be configured with the `--cachesize`
+option. In addition to that, the maximum number of objects in the
+cache is limited by the `--max-cache-entries` option, so it is
+possible that the cache does not grow up to the maximum cache size
+because the maximum number of cache elements has been reached. The
+reason for this limit is that each cache entry requires one open
+file descriptor, and Linux distributions usually limit the total
+number of file descriptors per process to about a thousand.
+
+If you specify a value for `--max-cache-entries`, you should therefore
+make sure to also configure your system to increase the maximum number
+of open file handles. This can be done temporarily with the `umask -n`
+command. The method to permanently change this limit system-wide
+depends on your distribution.
+
+
+
+Cache Flushing and Expiration
+-----------------------------
+
+S3QL flushes changed blocks in the cache to the backend whenever a block
+has not been accessed for at least 10 seconds. Note that when a block is
+flushed, it still remains in the cache.
+
+Cache expiration (i.e., removal of blocks from the cache) is only done
+when the maximum cache size is reached. S3QL always expires the least
+recently used blocks first.
+
+
+Automatic Mounting
+==================
+
+If you want to mount and umount an S3QL file system automatically at
+system startup and shutdown, you should do so with one dedicated S3QL
+init script for each S3QL file system.
+
+If your system is using upstart, an appropriate job can be defined
+as follows (and should be placed in `/etc/init/`):
+
+.. literalinclude:: ../contrib/s3ql.conf
+ :linenos:
+ :lines: 5-
+
+.. NOTE::
+
+ In principle, it is also possible to automatically mount an S3QL
+ file system with an appropriate entry in `/etc/fstab`. However,
+ this is not recommended for several reasons:
+
+ * file systems mounted in `/etc/fstab` will be unmounted with the
+ `umount` command, so your system will not wait until all data has
+ been uploaded but shutdown (or restart) immediately (this is a
+ FUSE limitation, see `issue 159
+ <http://code.google.com/p/s3ql/issues/detail?id=159>`_).
+
+ * There is no way to tell the system that mounting S3QL requires a
+ Python interpreter to be available, so it may attempt to run
+ `mount.s3ql` before it has mounted the volume containing the
+ Python interpreter.
+
+ * There is no standard way to tell the system that internet
+ connection has to be up before the S3QL file system can be
+ mounted.
+
diff --git a/doc/html/_sources/resources.txt b/doc/html/_sources/resources.txt
new file mode 100644
index 0000000..2c435bf
--- /dev/null
+++ b/doc/html/_sources/resources.txt
@@ -0,0 +1,22 @@
+.. -*- mode: rst -*-
+
+.. _resources:
+
+================================
+Further Resources / Getting Help
+================================
+
+If you have questions or problems with S3QL that you weren't able to
+resolve with this manual, you might want to consider the following other resources:
+
+* The `S3QL Wiki <http://code.google.com/p/s3ql/w/list>`_
+
+* The `S3QL FAQ <http://code.google.com/p/s3ql/wiki/FAQ>`_
+
+* The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You
+ can subscribe by sending a mail to
+ `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_.
+
+
+Please report any bugs you may encounter in the `Issue Tracker
+<http://code.google.com/p/s3ql/issues/list>`_.
diff --git a/doc/html/_sources/special.txt b/doc/html/_sources/special.txt
new file mode 100644
index 0000000..c5acade
--- /dev/null
+++ b/doc/html/_sources/special.txt
@@ -0,0 +1,114 @@
+.. -*- mode: rst -*-
+
+
+========================
+ Advanced S3QL Features
+========================
+
+.. _s3qlcp:
+
+Snapshotting and Copy-on-Write
+==============================
+
+The command `s3qlcp` can be used to duplicate a directory tree without
+physically copying the file contents. This is possible due to the data
+de-duplication feature of S3QL.
+
+The syntax of `s3qlcp` is::
+
+ s3qlcp [options] <src> <target>
+
+This will replicate the contents of the directory `<src>` in the
+directory `<target>`. `<src>` has to be an existing directory and
+`<target>` must not exist. Moreover, both directories have to be
+within the same S3QL file system.
+
+.. include:: man/cp.rst
+ :start-after: begin_main_content
+ :end-before: end_main_content
+
+.. _s3qlstat:
+
+Getting Statistics
+==================
+
+You can get more information about a mounted S3QL file system with the
+`s3qlstat` command. It has the following syntax::
+
+ s3qlstat [options] <mountpoint>
+
+Probably the most interesting numbers are the total size of your data,
+the total size after duplication, and the final size after
+de-duplication and compression.
+
+`s3qlstat` can only be called by the user that mounted the file system
+and (if the file system was mounted with `--allow-other` or `--allow-root`)
+the root user. This limitation might be removed in the future (see `issue 155
+<http://code.google.com/p/s3ql/issues/detail?id=155>`_).
+
+For a full list of available options, run `s3qlstat --help`.
+
+.. _s3qllock:
+
+Immutable Trees
+===============
+
+The command :program:`s3qllock` can be used to make a directory tree
+immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the :program:`s3qlrm` command (see
+below).
+
+For example, to make the directory tree beneath the directory
+``2010-04-21`` immutable, execute ::
+
+ s3qllock 2010-04-21
+
+.. include:: man/lock.rst
+ :start-after: begin_main_content
+ :end-before: end_main_content
+
+
+.. _s3qlrm:
+
+Fast Recursive Removal
+======================
+
+The ``s3qlrm`` command can be used to recursively delete files and
+directories on an S3QL file system. Although ``s3qlrm`` is faster than
+using e.g. ``rm -r``, the main reason for its existence is that it
+allows you to delete immutable trees as well. The syntax is rather
+simple::
+
+ s3qlrm <directory>
+
+Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.
+
+.. _s3qlctrl:
+
+Runtime Configuration
+=====================
+
+
+The `s3qlctrl` can be used to control a mounted S3QL file system. Its
+syntax is ::
+
+ s3qlctrl [options] <action> <mountpoint> ...
+
+`<mountpoint>` must be the location of a mounted S3QL file system.
+For a list of valid options, run `s3qlctrl --help`. `<action>`
+may be either of:
+
+ :flushcache:
+ Flush file system cache. The command blocks until the cache has
+ been flushed.
+ :log:
+ Change log level.
+ :cachesize:
+ Change file system cache size.
+ :upload-meta:
+ Trigger a metadata upload.
+
+
diff --git a/doc/html/_sources/tips.txt b/doc/html/_sources/tips.txt
new file mode 100644
index 0000000..b857f75
--- /dev/null
+++ b/doc/html/_sources/tips.txt
@@ -0,0 +1,81 @@
+.. -*- mode: rst -*-
+
+=============
+Tips & Tricks
+=============
+
+
+.. _copy_performance:
+
+
+Permanently mounted backup file system
+======================================
+
+If you use S3QL as a backup file system, it can be useful to mount the
+file system permanently (rather than just mounting it for a backup and
+unmounting it afterwards). Especially if your file system becomes
+large, this saves you long mount- and unmount times if you only want
+to restore a single file.
+
+If you decide to do so, you should make sure to
+
+* Use :ref:`s3qllock <s3qllock>` to ensure that backups are immutable
+ after they have been made.
+
+* Call :ref:`s3qlctrl upload-meta <s3qlctrl>` right after a every
+ backup to make sure that the newest metadata is stored safely (if
+ you do backups often enough, this may also allow you to set the
+ :cmdopt:`--metadata-upload-interval` option of :program:`mount.s3ql`
+ to zero).
+
+
+
+Improving copy performance
+==========================
+
+If you want to copy a lot of smaller files *from* an S3QL file system
+(e.g. for a system restore) you will probably notice that the
+performance is rather bad.
+
+The reason for this is intrinsic to the way S3QL works. Whenever you
+read a file, S3QL first has to retrieve this file over the network
+from the storage backend. This takes a minimum amount of time (the
+network latency), no matter how big or small the file is. So when you
+copy lots of small files, 99% of the time is actually spend waiting
+for network data.
+
+Theoretically, this problem is easy to solve: you just have to copy
+several files at the same time. In practice, however, almost all unix
+utilities (``cp``, ``rsync``, ``tar`` and friends) insist on copying
+data one file at a time. This makes a lot of sense when copying data
+on the local hard disk, but in case of S3QL this is really
+unfortunate.
+
+The best workaround that has been found so far is to copy files by
+starting several rsync processes at once and use exclusion rules to
+make sure that they work on different sets of files.
+
+For example, the following script will start 3 rsync instances. The
+first instance handles all filenames starting with a-f, the second the
+filenames from g-l and the third covers the rest. The ``+ */`` rule
+ensures that every instance looks into all directories. ::
+
+ #!/bin/bash
+
+ RSYNC_ARGS="-aHv /mnt/s3ql/ /home/restore/"
+
+ rsync -f "+ */" -f "-! [a-f]*" $RSYNC_ARGS &
+ rsync -f "+ */" -f "-! [g-l]*" $RSYNC_ARGS &
+ rsync -f "+ */" -f "- [a-l]*" $RSYNC_ARGS &
+
+ wait
+
+The optimum number of parallel processes depends on your network
+connection and the size of the files that you want to transfer.
+However, starting about 10 processes seems to be a good compromise
+that increases performance dramatically in almost all situations.
+
+S3QL comes with a script named ``pcp.py`` in the ``contrib`` directory
+that can be used to transfer files in parallel without having to write
+an explicit script first. See the description of :ref:`pcp` for
+details.
diff --git a/doc/html/_sources/umount.txt b/doc/html/_sources/umount.txt
new file mode 100644
index 0000000..dac248e
--- /dev/null
+++ b/doc/html/_sources/umount.txt
@@ -0,0 +1,31 @@
+.. -*- mode: rst -*-
+
+==========
+Unmounting
+==========
+
+To unmount an S3QL file system, use the command::
+
+ umount.s3ql [options] <mountpoint>
+
+This will block until all data has been committed to the storage
+backend.
+
+Only the user who mounted the file system with :command:`mount.s3ql`
+is able to unmount it again. If you are root and want to unmount an
+S3QL file system mounted by an ordinary user, you have to use the
+:command:`fusermount -u` or :command:`umount` command instead. Note
+that these commands do not block until all data has been uploaded, so
+if you use them instead of `umount.s3ql` then you should manually wait
+for the `mount.s3ql` process to terminate before shutting down the
+system.
+
+The :command:`umount.s3ql` command accepts the following options:
+
+.. pipeinclude:: ../bin/umount.s3ql --help
+ :start-after: show this help message and exit
+
+If, for some reason, the `umount.sql` command does not work, the file
+system can also be unmounted with `fusermount -u -z`. Note that this
+command will return immediately and the file system may continue to
+upload data in the background for a while longer.
diff --git a/doc/html/_static/ajax-loader.gif b/doc/html/_static/ajax-loader.gif
new file mode 100644
index 0000000..61faf8c
--- /dev/null
+++ b/doc/html/_static/ajax-loader.gif
Binary files differ
diff --git a/doc/html/_static/basic.css b/doc/html/_static/basic.css
new file mode 100644
index 0000000..4b875f8
--- /dev/null
+++ b/doc/html/_static/basic.css
@@ -0,0 +1,528 @@
+/*
+ * basic.css
+ * ~~~~~~~~~
+ *
+ * Sphinx stylesheet -- basic theme.
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+/* -- main layout ----------------------------------------------------------- */
+
+div.clearer {
+ clear: both;
+}
+
+/* -- relbar ---------------------------------------------------------------- */
+
+div.related {
+ width: 100%;
+ font-size: 90%;
+}
+
+div.related h3 {
+ display: none;
+}
+
+div.related ul {
+ margin: 0;
+ padding: 0 0 0 10px;
+ list-style: none;
+}
+
+div.related li {
+ display: inline;
+}
+
+div.related li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+/* -- sidebar --------------------------------------------------------------- */
+
+div.sphinxsidebarwrapper {
+ padding: 10px 5px 0 10px;
+}
+
+div.sphinxsidebar {
+ float: left;
+ width: 230px;
+ margin-left: -100%;
+ font-size: 90%;
+}
+
+div.sphinxsidebar ul {
+ list-style: none;
+}
+
+div.sphinxsidebar ul ul,
+div.sphinxsidebar ul.want-points {
+ margin-left: 20px;
+ list-style: square;
+}
+
+div.sphinxsidebar ul ul {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+div.sphinxsidebar form {
+ margin-top: 10px;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #98dbcc;
+ font-family: sans-serif;
+ font-size: 1em;
+}
+
+img {
+ border: 0;
+}
+
+/* -- search page ----------------------------------------------------------- */
+
+ul.search {
+ margin: 10px 0 0 20px;
+ padding: 0;
+}
+
+ul.search li {
+ padding: 5px 0 5px 20px;
+ background-image: url(file.png);
+ background-repeat: no-repeat;
+ background-position: 0 7px;
+}
+
+ul.search li a {
+ font-weight: bold;
+}
+
+ul.search li div.context {
+ color: #888;
+ margin: 2px 0 0 30px;
+ text-align: left;
+}
+
+ul.keywordmatches li.goodmatch a {
+ font-weight: bold;
+}
+
+/* -- index page ------------------------------------------------------------ */
+
+table.contentstable {
+ width: 90%;
+}
+
+table.contentstable p.biglink {
+ line-height: 150%;
+}
+
+a.biglink {
+ font-size: 1.3em;
+}
+
+span.linkdescr {
+ font-style: italic;
+ padding-top: 5px;
+ font-size: 90%;
+}
+
+/* -- general index --------------------------------------------------------- */
+
+table.indextable {
+ width: 100%;
+}
+
+table.indextable td {
+ text-align: left;
+ vertical-align: top;
+}
+
+table.indextable dl, table.indextable dd {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+table.indextable tr.pcap {
+ height: 10px;
+}
+
+table.indextable tr.cap {
+ margin-top: 10px;
+ background-color: #f2f2f2;
+}
+
+img.toggler {
+ margin-right: 3px;
+ margin-top: 3px;
+ cursor: pointer;
+}
+
+div.modindex-jumpbox {
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+ margin: 1em 0 1em 0;
+ padding: 0.4em;
+}
+
+div.genindex-jumpbox {
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+ margin: 1em 0 1em 0;
+ padding: 0.4em;
+}
+
+/* -- general body styles --------------------------------------------------- */
+
+a.headerlink {
+ visibility: hidden;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink {
+ visibility: visible;
+}
+
+div.body p.caption {
+ text-align: inherit;
+}
+
+div.body td {
+ text-align: left;
+}
+
+.field-list ul {
+ padding-left: 1em;
+}
+
+.first {
+ margin-top: 0 !important;
+}
+
+p.rubric {
+ margin-top: 30px;
+ font-weight: bold;
+}
+
+img.align-left, .figure.align-left, object.align-left {
+ clear: left;
+ float: left;
+ margin-right: 1em;
+}
+
+img.align-right, .figure.align-right, object.align-right {
+ clear: right;
+ float: right;
+ margin-left: 1em;
+}
+
+img.align-center, .figure.align-center, object.align-center {
+ display: block;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+.align-left {
+ text-align: left;
+}
+
+.align-center {
+ clear: both;
+ text-align: center;
+}
+
+.align-right {
+ text-align: right;
+}
+
+/* -- sidebars -------------------------------------------------------------- */
+
+div.sidebar {
+ margin: 0 0 0.5em 1em;
+ border: 1px solid #ddb;
+ padding: 7px 7px 0 7px;
+ background-color: #ffe;
+ width: 40%;
+ float: right;
+}
+
+p.sidebar-title {
+ font-weight: bold;
+}
+
+/* -- topics ---------------------------------------------------------------- */
+
+div.topic {
+ border: 1px solid #ccc;
+ padding: 7px 7px 0 7px;
+ margin: 10px 0 10px 0;
+}
+
+p.topic-title {
+ font-size: 1.1em;
+ font-weight: bold;
+ margin-top: 10px;
+}
+
+/* -- admonitions ----------------------------------------------------------- */
+
+div.admonition {
+ margin-top: 10px;
+ margin-bottom: 10px;
+ padding: 7px;
+}
+
+div.admonition dt {
+ font-weight: bold;
+}
+
+div.admonition dl {
+ margin-bottom: 0;
+}
+
+p.admonition-title {
+ margin: 0px 10px 5px 0px;
+ font-weight: bold;
+}
+
+div.body p.centered {
+ text-align: center;
+ margin-top: 25px;
+}
+
+/* -- tables ---------------------------------------------------------------- */
+
+table.docutils {
+ border: 0;
+ border-collapse: collapse;
+}
+
+table.docutils td, table.docutils th {
+ padding: 1px 8px 1px 5px;
+ border-top: 0;
+ border-left: 0;
+ border-right: 0;
+ border-bottom: 1px solid #aaa;
+}
+
+table.field-list td, table.field-list th {
+ border: 0 !important;
+}
+
+table.footnote td, table.footnote th {
+ border: 0 !important;
+}
+
+th {
+ text-align: left;
+ padding-right: 5px;
+}
+
+table.citation {
+ border-left: solid 1px gray;
+ margin-left: 1px;
+}
+
+table.citation td {
+ border-bottom: none;
+}
+
+/* -- other body styles ----------------------------------------------------- */
+
+ol.arabic {
+ list-style: decimal;
+}
+
+ol.loweralpha {
+ list-style: lower-alpha;
+}
+
+ol.upperalpha {
+ list-style: upper-alpha;
+}
+
+ol.lowerroman {
+ list-style: lower-roman;
+}
+
+ol.upperroman {
+ list-style: upper-roman;
+}
+
+dl {
+ margin-bottom: 15px;
+}
+
+dd p {
+ margin-top: 0px;
+}
+
+dd ul, dd table {
+ margin-bottom: 10px;
+}
+
+dd {
+ margin-top: 3px;
+ margin-bottom: 10px;
+ margin-left: 30px;
+}
+
+dt:target, .highlighted {
+ background-color: #fbe54e;
+}
+
+dl.glossary dt {
+ font-weight: bold;
+ font-size: 1.1em;
+}
+
+.field-list ul {
+ margin: 0;
+ padding-left: 1em;
+}
+
+.field-list p {
+ margin: 0;
+}
+
+.refcount {
+ color: #060;
+}
+
+.optional {
+ font-size: 1.3em;
+}
+
+.versionmodified {
+ font-style: italic;
+}
+
+.system-message {
+ background-color: #fda;
+ padding: 5px;
+ border: 3px solid red;
+}
+
+.footnote:target {
+ background-color: #ffa;
+}
+
+.line-block {
+ display: block;
+ margin-top: 1em;
+ margin-bottom: 1em;
+}
+
+.line-block .line-block {
+ margin-top: 0;
+ margin-bottom: 0;
+ margin-left: 1.5em;
+}
+
+.guilabel, .menuselection {
+ font-family: sans-serif;
+}
+
+.accelerator {
+ text-decoration: underline;
+}
+
+.classifier {
+ font-style: oblique;
+}
+
+/* -- code displays --------------------------------------------------------- */
+
+pre {
+ overflow: auto;
+ overflow-y: hidden; /* fixes display issues on Chrome browsers */
+}
+
+td.linenos pre {
+ padding: 5px 0px;
+ border: 0;
+ background-color: transparent;
+ color: #aaa;
+}
+
+table.highlighttable {
+ margin-left: 0.5em;
+}
+
+table.highlighttable td {
+ padding: 0 0.5em 0 0.5em;
+}
+
+tt.descname {
+ background-color: transparent;
+ font-weight: bold;
+ font-size: 1.2em;
+}
+
+tt.descclassname {
+ background-color: transparent;
+}
+
+tt.xref, a tt {
+ background-color: transparent;
+ font-weight: bold;
+}
+
+h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
+ background-color: transparent;
+}
+
+.viewcode-link {
+ float: right;
+}
+
+.viewcode-back {
+ float: right;
+ font-family: sans-serif;
+}
+
+div.viewcode-block:target {
+ margin: -1px -10px;
+ padding: 0 10px;
+}
+
+/* -- math display ---------------------------------------------------------- */
+
+img.math {
+ vertical-align: middle;
+}
+
+div.body div.math p {
+ text-align: center;
+}
+
+span.eqno {
+ float: right;
+}
+
+/* -- printout stylesheet --------------------------------------------------- */
+
+@media print {
+ div.document,
+ div.documentwrapper,
+ div.bodywrapper {
+ margin: 0 !important;
+ width: 100%;
+ }
+
+ div.sphinxsidebar,
+ div.related,
+ div.footer,
+ #top-link {
+ display: none;
+ }
+} \ No newline at end of file
diff --git a/doc/html/_static/comment-bright.png b/doc/html/_static/comment-bright.png
new file mode 100644
index 0000000..551517b
--- /dev/null
+++ b/doc/html/_static/comment-bright.png
Binary files differ
diff --git a/doc/html/_static/comment-close.png b/doc/html/_static/comment-close.png
new file mode 100644
index 0000000..09b54be
--- /dev/null
+++ b/doc/html/_static/comment-close.png
Binary files differ
diff --git a/doc/html/_static/comment.png b/doc/html/_static/comment.png
new file mode 100644
index 0000000..92feb52
--- /dev/null
+++ b/doc/html/_static/comment.png
Binary files differ
diff --git a/doc/html/_static/contents.png b/doc/html/_static/contents.png
new file mode 100644
index 0000000..7fb8215
--- /dev/null
+++ b/doc/html/_static/contents.png
Binary files differ
diff --git a/doc/html/_static/doctools.js b/doc/html/_static/doctools.js
new file mode 100644
index 0000000..8b9bd2c
--- /dev/null
+++ b/doc/html/_static/doctools.js
@@ -0,0 +1,247 @@
+/*
+ * doctools.js
+ * ~~~~~~~~~~~
+ *
+ * Sphinx JavaScript utilties for all documentation.
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+/**
+ * select a different prefix for underscore
+ */
+$u = _.noConflict();
+
+/**
+ * make the code below compatible with browsers without
+ * an installed firebug like debugger
+if (!window.console || !console.firebug) {
+ var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
+ "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
+ "profile", "profileEnd"];
+ window.console = {};
+ for (var i = 0; i < names.length; ++i)
+ window.console[names[i]] = function() {};
+}
+ */
+
+/**
+ * small helper function to urldecode strings
+ */
+jQuery.urldecode = function(x) {
+ return decodeURIComponent(x).replace(/\+/g, ' ');
+}
+
+/**
+ * small helper function to urlencode strings
+ */
+jQuery.urlencode = encodeURIComponent;
+
+/**
+ * This function returns the parsed url parameters of the
+ * current request. Multiple values per key are supported,
+ * it will always return arrays of strings for the value parts.
+ */
+jQuery.getQueryParameters = function(s) {
+ if (typeof s == 'undefined')
+ s = document.location.search;
+ var parts = s.substr(s.indexOf('?') + 1).split('&');
+ var result = {};
+ for (var i = 0; i < parts.length; i++) {
+ var tmp = parts[i].split('=', 2);
+ var key = jQuery.urldecode(tmp[0]);
+ var value = jQuery.urldecode(tmp[1]);
+ if (key in result)
+ result[key].push(value);
+ else
+ result[key] = [value];
+ }
+ return result;
+};
+
+/**
+ * small function to check if an array contains
+ * a given item.
+ */
+jQuery.contains = function(arr, item) {
+ for (var i = 0; i < arr.length; i++) {
+ if (arr[i] == item)
+ return true;
+ }
+ return false;
+};
+
+/**
+ * highlight a given string on a jquery object by wrapping it in
+ * span elements with the given class name.
+ */
+jQuery.fn.highlightText = function(text, className) {
+ function highlight(node) {
+ if (node.nodeType == 3) {
+ var val = node.nodeValue;
+ var pos = val.toLowerCase().indexOf(text);
+ if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
+ var span = document.createElement("span");
+ span.className = className;
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ node.parentNode.insertBefore(span, node.parentNode.insertBefore(
+ document.createTextNode(val.substr(pos + text.length)),
+ node.nextSibling));
+ node.nodeValue = val.substr(0, pos);
+ }
+ }
+ else if (!jQuery(node).is("button, select, textarea")) {
+ jQuery.each(node.childNodes, function() {
+ highlight(this);
+ });
+ }
+ }
+ return this.each(function() {
+ highlight(this);
+ });
+};
+
+/**
+ * Small JavaScript module for the documentation.
+ */
+var Documentation = {
+
+ init : function() {
+ this.fixFirefoxAnchorBug();
+ this.highlightSearchWords();
+ this.initIndexTable();
+ },
+
+ /**
+ * i18n support
+ */
+ TRANSLATIONS : {},
+ PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
+ LOCALE : 'unknown',
+
+ // gettext and ngettext don't access this so that the functions
+ // can safely bound to a different name (_ = Documentation.gettext)
+ gettext : function(string) {
+ var translated = Documentation.TRANSLATIONS[string];
+ if (typeof translated == 'undefined')
+ return string;
+ return (typeof translated == 'string') ? translated : translated[0];
+ },
+
+ ngettext : function(singular, plural, n) {
+ var translated = Documentation.TRANSLATIONS[singular];
+ if (typeof translated == 'undefined')
+ return (n == 1) ? singular : plural;
+ return translated[Documentation.PLURALEXPR(n)];
+ },
+
+ addTranslations : function(catalog) {
+ for (var key in catalog.messages)
+ this.TRANSLATIONS[key] = catalog.messages[key];
+ this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
+ this.LOCALE = catalog.locale;
+ },
+
+ /**
+ * add context elements like header anchor links
+ */
+ addContextElements : function() {
+ $('div[id] > :header:first').each(function() {
+ $('<a class="headerlink">\u00B6</a>').
+ attr('href', '#' + this.id).
+ attr('title', _('Permalink to this headline')).
+ appendTo(this);
+ });
+ $('dt[id]').each(function() {
+ $('<a class="headerlink">\u00B6</a>').
+ attr('href', '#' + this.id).
+ attr('title', _('Permalink to this definition')).
+ appendTo(this);
+ });
+ },
+
+ /**
+ * workaround a firefox stupidity
+ */
+ fixFirefoxAnchorBug : function() {
+ if (document.location.hash && $.browser.mozilla)
+ window.setTimeout(function() {
+ document.location.href += '';
+ }, 10);
+ },
+
+ /**
+ * highlight the search words provided in the url in the text
+ */
+ highlightSearchWords : function() {
+ var params = $.getQueryParameters();
+ var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
+ if (terms.length) {
+ var body = $('div.body');
+ window.setTimeout(function() {
+ $.each(terms, function() {
+ body.highlightText(this.toLowerCase(), 'highlighted');
+ });
+ }, 10);
+ $('<li class="highlight-link"><a href="javascript:Documentation.' +
+ 'hideSearchWords()">' + _('Hide Search Matches') + '</a></li>')
+ .appendTo($('.sidebar .this-page-menu'));
+ }
+ },
+
+ /**
+ * init the domain index toggle buttons
+ */
+ initIndexTable : function() {
+ var togglers = $('img.toggler').click(function() {
+ var src = $(this).attr('src');
+ var idnum = $(this).attr('id').substr(7);
+ $('tr.cg-' + idnum).toggle();
+ if (src.substr(-9) == 'minus.png')
+ $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
+ else
+ $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
+ }).css('display', '');
+ if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
+ togglers.click();
+ }
+ },
+
+ /**
+ * helper function to hide the search marks again
+ */
+ hideSearchWords : function() {
+ $('.sidebar .this-page-menu li.highlight-link').fadeOut(300);
+ $('span.highlighted').removeClass('highlighted');
+ },
+
+ /**
+ * make the url absolute
+ */
+ makeURL : function(relativeURL) {
+ return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
+ },
+
+ /**
+ * get the current relative url
+ */
+ getCurrentURL : function() {
+ var path = document.location.pathname;
+ var parts = path.split(/\//);
+ $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
+ if (this == '..')
+ parts.pop();
+ });
+ var url = parts.join('/');
+ return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
+ }
+};
+
+// quick alias for translations
+_ = Documentation.gettext;
+
+$(document).ready(function() {
+ Documentation.init();
+});
diff --git a/doc/html/_static/down-pressed.png b/doc/html/_static/down-pressed.png
new file mode 100644
index 0000000..6f7ad78
--- /dev/null
+++ b/doc/html/_static/down-pressed.png
Binary files differ
diff --git a/doc/html/_static/down.png b/doc/html/_static/down.png
new file mode 100644
index 0000000..3003a88
--- /dev/null
+++ b/doc/html/_static/down.png
Binary files differ
diff --git a/doc/html/_static/file.png b/doc/html/_static/file.png
new file mode 100644
index 0000000..d18082e
--- /dev/null
+++ b/doc/html/_static/file.png
Binary files differ
diff --git a/doc/html/_static/jquery.js b/doc/html/_static/jquery.js
new file mode 100644
index 0000000..7c24308
--- /dev/null
+++ b/doc/html/_static/jquery.js
@@ -0,0 +1,154 @@
+/*!
+ * jQuery JavaScript Library v1.4.2
+ * http://jquery.com/
+ *
+ * Copyright 2010, John Resig
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * Includes Sizzle.js
+ * http://sizzlejs.com/
+ * Copyright 2010, The Dojo Foundation
+ * Released under the MIT, BSD, and GPL Licenses.
+ *
+ * Date: Sat Feb 13 22:33:48 2010 -0500
+ */
+(function(A,w){function ma(){if(!c.isReady){try{s.documentElement.doScroll("left")}catch(a){setTimeout(ma,1);return}c.ready()}}function Qa(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}function X(a,b,d,f,e,j){var i=a.length;if(typeof b==="object"){for(var o in b)X(a,o,b[o],f,e,d);return a}if(d!==w){f=!j&&f&&c.isFunction(d);for(o=0;o<i;o++)e(a[o],b,f?d.call(a[o],o,e(a[o],b)):d,j);return a}return i?
+e(a[0],b):w}function J(){return(new Date).getTime()}function Y(){return false}function Z(){return true}function na(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function oa(a){var b,d=[],f=[],e=arguments,j,i,o,k,n,r;i=c.data(this,"events");if(!(a.liveFired===this||!i||!i.live||a.button&&a.type==="click")){a.liveFired=this;var u=i.live.slice(0);for(k=0;k<u.length;k++){i=u[k];i.origType.replace(O,"")===a.type?f.push(i.selector):u.splice(k--,1)}j=c(a.target).closest(f,a.currentTarget);n=0;for(r=
+j.length;n<r;n++)for(k=0;k<u.length;k++){i=u[k];if(j[n].selector===i.selector){o=j[n].elem;f=null;if(i.preType==="mouseenter"||i.preType==="mouseleave")f=c(a.relatedTarget).closest(i.selector)[0];if(!f||f!==o)d.push({elem:o,handleObj:i})}}n=0;for(r=d.length;n<r;n++){j=d[n];a.currentTarget=j.elem;a.data=j.handleObj.data;a.handleObj=j.handleObj;if(j.handleObj.origHandler.apply(j.elem,e)===false){b=false;break}}return b}}function pa(a,b){return"live."+(a&&a!=="*"?a+".":"")+b.replace(/\./g,"`").replace(/ /g,
+"&")}function qa(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function ra(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var f=c.data(a[d++]),e=c.data(this,f);if(f=f&&f.events){delete e.handle;e.events={};for(var j in f)for(var i in f[j])c.event.add(this,j,f[j][i],f[j][i].data)}}})}function sa(a,b,d){var f,e,j;b=b&&b[0]?b[0].ownerDocument||b[0]:s;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===s&&!ta.test(a[0])&&(c.support.checkClone||!ua.test(a[0]))){e=
+true;if(j=c.fragments[a[0]])if(j!==1)f=j}if(!f){f=b.createDocumentFragment();c.clean(a,b,f,d)}if(e)c.fragments[a[0]]=j?f:1;return{fragment:f,cacheable:e}}function K(a,b){var d={};c.each(va.concat.apply([],va.slice(0,b)),function(){d[this]=a});return d}function wa(a){return"scrollTo"in a&&a.document?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var c=function(a,b){return new c.fn.init(a,b)},Ra=A.jQuery,Sa=A.$,s=A.document,T,Ta=/^[^<]*(<[\w\W]+>)[^>]*$|^#([\w-]+)$/,Ua=/^.[^:#\[\.,]*$/,Va=/\S/,
+Wa=/^(\s|\u00A0)+|(\s|\u00A0)+$/g,Xa=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,P=navigator.userAgent,xa=false,Q=[],L,$=Object.prototype.toString,aa=Object.prototype.hasOwnProperty,ba=Array.prototype.push,R=Array.prototype.slice,ya=Array.prototype.indexOf;c.fn=c.prototype={init:function(a,b){var d,f;if(!a)return this;if(a.nodeType){this.context=this[0]=a;this.length=1;return this}if(a==="body"&&!b){this.context=s;this[0]=s.body;this.selector="body";this.length=1;return this}if(typeof a==="string")if((d=Ta.exec(a))&&
+(d[1]||!b))if(d[1]){f=b?b.ownerDocument||b:s;if(a=Xa.exec(a))if(c.isPlainObject(b)){a=[s.createElement(a[1])];c.fn.attr.call(a,b,true)}else a=[f.createElement(a[1])];else{a=sa([d[1]],[f]);a=(a.cacheable?a.fragment.cloneNode(true):a.fragment).childNodes}return c.merge(this,a)}else{if(b=s.getElementById(d[2])){if(b.id!==d[2])return T.find(a);this.length=1;this[0]=b}this.context=s;this.selector=a;return this}else if(!b&&/^\w+$/.test(a)){this.selector=a;this.context=s;a=s.getElementsByTagName(a);return c.merge(this,
+a)}else return!b||b.jquery?(b||T).find(a):c(b).find(a);else if(c.isFunction(a))return T.ready(a);if(a.selector!==w){this.selector=a.selector;this.context=a.context}return c.makeArray(a,this)},selector:"",jquery:"1.4.2",length:0,size:function(){return this.length},toArray:function(){return R.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this.slice(a)[0]:this[a]},pushStack:function(a,b,d){var f=c();c.isArray(a)?ba.apply(f,a):c.merge(f,a);f.prevObject=this;f.context=this.context;if(b===
+"find")f.selector=this.selector+(this.selector?" ":"")+d;else if(b)f.selector=this.selector+"."+b+"("+d+")";return f},each:function(a,b){return c.each(this,a,b)},ready:function(a){c.bindReady();if(c.isReady)a.call(s,c);else Q&&Q.push(a);return this},eq:function(a){return a===-1?this.slice(a):this.slice(a,+a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(R.apply(this,arguments),"slice",R.call(arguments).join(","))},map:function(a){return this.pushStack(c.map(this,
+function(b,d){return a.call(b,d,b)}))},end:function(){return this.prevObject||c(null)},push:ba,sort:[].sort,splice:[].splice};c.fn.init.prototype=c.fn;c.extend=c.fn.extend=function(){var a=arguments[0]||{},b=1,d=arguments.length,f=false,e,j,i,o;if(typeof a==="boolean"){f=a;a=arguments[1]||{};b=2}if(typeof a!=="object"&&!c.isFunction(a))a={};if(d===b){a=this;--b}for(;b<d;b++)if((e=arguments[b])!=null)for(j in e){i=a[j];o=e[j];if(a!==o)if(f&&o&&(c.isPlainObject(o)||c.isArray(o))){i=i&&(c.isPlainObject(i)||
+c.isArray(i))?i:c.isArray(o)?[]:{};a[j]=c.extend(f,i,o)}else if(o!==w)a[j]=o}return a};c.extend({noConflict:function(a){A.$=Sa;if(a)A.jQuery=Ra;return c},isReady:false,ready:function(){if(!c.isReady){if(!s.body)return setTimeout(c.ready,13);c.isReady=true;if(Q){for(var a,b=0;a=Q[b++];)a.call(s,c);Q=null}c.fn.triggerHandler&&c(s).triggerHandler("ready")}},bindReady:function(){if(!xa){xa=true;if(s.readyState==="complete")return c.ready();if(s.addEventListener){s.addEventListener("DOMContentLoaded",
+L,false);A.addEventListener("load",c.ready,false)}else if(s.attachEvent){s.attachEvent("onreadystatechange",L);A.attachEvent("onload",c.ready);var a=false;try{a=A.frameElement==null}catch(b){}s.documentElement.doScroll&&a&&ma()}}},isFunction:function(a){return $.call(a)==="[object Function]"},isArray:function(a){return $.call(a)==="[object Array]"},isPlainObject:function(a){if(!a||$.call(a)!=="[object Object]"||a.nodeType||a.setInterval)return false;if(a.constructor&&!aa.call(a,"constructor")&&!aa.call(a.constructor.prototype,
+"isPrototypeOf"))return false;var b;for(b in a);return b===w||aa.call(a,b)},isEmptyObject:function(a){for(var b in a)return false;return true},error:function(a){throw a;},parseJSON:function(a){if(typeof a!=="string"||!a)return null;a=c.trim(a);if(/^[\],:{}\s]*$/.test(a.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,"@").replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,"]").replace(/(?:^|:|,)(?:\s*\[)+/g,"")))return A.JSON&&A.JSON.parse?A.JSON.parse(a):(new Function("return "+
+a))();else c.error("Invalid JSON: "+a)},noop:function(){},globalEval:function(a){if(a&&Va.test(a)){var b=s.getElementsByTagName("head")[0]||s.documentElement,d=s.createElement("script");d.type="text/javascript";if(c.support.scriptEval)d.appendChild(s.createTextNode(a));else d.text=a;b.insertBefore(d,b.firstChild);b.removeChild(d)}},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,b,d){var f,e=0,j=a.length,i=j===w||c.isFunction(a);if(d)if(i)for(f in a){if(b.apply(a[f],
+d)===false)break}else for(;e<j;){if(b.apply(a[e++],d)===false)break}else if(i)for(f in a){if(b.call(a[f],f,a[f])===false)break}else for(d=a[0];e<j&&b.call(d,e,d)!==false;d=a[++e]);return a},trim:function(a){return(a||"").replace(Wa,"")},makeArray:function(a,b){b=b||[];if(a!=null)a.length==null||typeof a==="string"||c.isFunction(a)||typeof a!=="function"&&a.setInterval?ba.call(b,a):c.merge(b,a);return b},inArray:function(a,b){if(b.indexOf)return b.indexOf(a);for(var d=0,f=b.length;d<f;d++)if(b[d]===
+a)return d;return-1},merge:function(a,b){var d=a.length,f=0;if(typeof b.length==="number")for(var e=b.length;f<e;f++)a[d++]=b[f];else for(;b[f]!==w;)a[d++]=b[f++];a.length=d;return a},grep:function(a,b,d){for(var f=[],e=0,j=a.length;e<j;e++)!d!==!b(a[e],e)&&f.push(a[e]);return f},map:function(a,b,d){for(var f=[],e,j=0,i=a.length;j<i;j++){e=b(a[j],j,d);if(e!=null)f[f.length]=e}return f.concat.apply([],f)},guid:1,proxy:function(a,b,d){if(arguments.length===2)if(typeof b==="string"){d=a;a=d[b];b=w}else if(b&&
+!c.isFunction(b)){d=b;b=w}if(!b&&a)b=function(){return a.apply(d||this,arguments)};if(a)b.guid=a.guid=a.guid||b.guid||c.guid++;return b},uaMatch:function(a){a=a.toLowerCase();a=/(webkit)[ \/]([\w.]+)/.exec(a)||/(opera)(?:.*version)?[ \/]([\w.]+)/.exec(a)||/(msie) ([\w.]+)/.exec(a)||!/compatible/.test(a)&&/(mozilla)(?:.*? rv:([\w.]+))?/.exec(a)||[];return{browser:a[1]||"",version:a[2]||"0"}},browser:{}});P=c.uaMatch(P);if(P.browser){c.browser[P.browser]=true;c.browser.version=P.version}if(c.browser.webkit)c.browser.safari=
+true;if(ya)c.inArray=function(a,b){return ya.call(b,a)};T=c(s);if(s.addEventListener)L=function(){s.removeEventListener("DOMContentLoaded",L,false);c.ready()};else if(s.attachEvent)L=function(){if(s.readyState==="complete"){s.detachEvent("onreadystatechange",L);c.ready()}};(function(){c.support={};var a=s.documentElement,b=s.createElement("script"),d=s.createElement("div"),f="script"+J();d.style.display="none";d.innerHTML=" <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";
+var e=d.getElementsByTagName("*"),j=d.getElementsByTagName("a")[0];if(!(!e||!e.length||!j)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(j.getAttribute("style")),hrefNormalized:j.getAttribute("href")==="/a",opacity:/^0.55$/.test(j.style.opacity),cssFloat:!!j.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:s.createElement("select").appendChild(s.createElement("option")).selected,
+parentNode:d.removeChild(d.appendChild(s.createElement("div"))).parentNode===null,deleteExpando:true,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null};b.type="text/javascript";try{b.appendChild(s.createTextNode("window."+f+"=1;"))}catch(i){}a.insertBefore(b,a.firstChild);if(A[f]){c.support.scriptEval=true;delete A[f]}try{delete b.test}catch(o){c.support.deleteExpando=false}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function k(){c.support.noCloneEvent=
+false;d.detachEvent("onclick",k)});d.cloneNode(true).fireEvent("onclick")}d=s.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=s.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var k=s.createElement("div");k.style.width=k.style.paddingLeft="1px";s.body.appendChild(k);c.boxModel=c.support.boxModel=k.offsetWidth===2;s.body.removeChild(k).style.display="none"});a=function(k){var n=
+s.createElement("div");k="on"+k;var r=k in n;if(!r){n.setAttribute(k,"return;");r=typeof n[k]==="function"}return r};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=e=j=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var G="jQuery"+J(),Ya=0,za={};c.extend({cache:{},expando:G,noData:{embed:true,object:true,
+applet:true},data:function(a,b,d){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var f=a[G],e=c.cache;if(!f&&typeof b==="string"&&d===w)return null;f||(f=++Ya);if(typeof b==="object"){a[G]=f;e[f]=c.extend(true,{},b)}else if(!e[f]){a[G]=f;e[f]={}}a=e[f];if(d!==w)a[b]=d;return typeof b==="string"?a[b]:a}},removeData:function(a,b){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var d=a[G],f=c.cache,e=f[d];if(b){if(e){delete e[b];c.isEmptyObject(e)&&c.removeData(a)}}else{if(c.support.deleteExpando)delete a[c.expando];
+else a.removeAttribute&&a.removeAttribute(c.expando);delete f[d]}}}});c.fn.extend({data:function(a,b){if(typeof a==="undefined"&&this.length)return c.data(this[0]);else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===w){var f=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(f===w&&this.length)f=c.data(this[0],a);return f===w&&d[1]?this.data(d[0]):f}else return this.trigger("setData"+d[1]+"!",[d[0],b]).each(function(){c.data(this,
+a,b)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var f=c.data(a,b);if(!d)return f||[];if(!f||c.isArray(d))f=c.data(a,b,c.makeArray(d));else f.push(d);return f}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),f=d.shift();if(f==="inprogress")f=d.shift();if(f){b==="fx"&&d.unshift("inprogress");f.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===
+w)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var Aa=/[\n\t]/g,ca=/\s+/,Za=/\r/g,$a=/href|src|style/,ab=/(button|input)/i,bb=/(button|input|object|select|textarea)/i,
+cb=/^(a|area)$/i,Ba=/radio|checkbox/;c.fn.extend({attr:function(a,b){return X(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(n){var r=c(this);r.addClass(a.call(this,n,r.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1)if(e.className){for(var j=" "+e.className+" ",
+i=e.className,o=0,k=b.length;o<k;o++)if(j.indexOf(" "+b[o]+" ")<0)i+=" "+b[o];e.className=c.trim(i)}else e.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(k){var n=c(this);n.removeClass(a.call(this,k,n.attr("class")))});if(a&&typeof a==="string"||a===w)for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1&&e.className)if(a){for(var j=(" "+e.className+" ").replace(Aa," "),i=0,o=b.length;i<o;i++)j=j.replace(" "+b[i]+" ",
+" ");e.className=c.trim(j)}else e.className=""}return this},toggleClass:function(a,b){var d=typeof a,f=typeof b==="boolean";if(c.isFunction(a))return this.each(function(e){var j=c(this);j.toggleClass(a.call(this,e,j.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var e,j=0,i=c(this),o=b,k=a.split(ca);e=k[j++];){o=f?o:!i.hasClass(e);i[o?"addClass":"removeClass"](e)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=
+this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(Aa," ").indexOf(a)>-1)return true;return false},val:function(a){if(a===w){var b=this[0];if(b){if(c.nodeName(b,"option"))return(b.attributes.value||{}).specified?b.value:b.text;if(c.nodeName(b,"select")){var d=b.selectedIndex,f=[],e=b.options;b=b.type==="select-one";if(d<0)return null;var j=b?d:0;for(d=b?d+1:e.length;j<d;j++){var i=
+e[j];if(i.selected){a=c(i).val();if(b)return a;f.push(a)}}return f}if(Ba.test(b.type)&&!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Za,"")}return w}var o=c.isFunction(a);return this.each(function(k){var n=c(this),r=a;if(this.nodeType===1){if(o)r=a.call(this,k,n.val());if(typeof r==="number")r+="";if(c.isArray(r)&&Ba.test(this.type))this.checked=c.inArray(n.val(),r)>=0;else if(c.nodeName(this,"select")){var u=c.makeArray(r);c("option",this).each(function(){this.selected=
+c.inArray(c(this).val(),u)>=0});if(!u.length)this.selectedIndex=-1}else this.value=r}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,f){if(!a||a.nodeType===3||a.nodeType===8)return w;if(f&&b in c.attrFn)return c(a)[b](d);f=a.nodeType!==1||!c.isXMLDoc(a);var e=d!==w;b=f&&c.props[b]||b;if(a.nodeType===1){var j=$a.test(b);if(b in a&&f&&!j){if(e){b==="type"&&ab.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");
+a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:bb.test(a.nodeName)||cb.test(a.nodeName)&&a.href?0:w;return a[b]}if(!c.support.style&&f&&b==="style"){if(e)a.style.cssText=""+d;return a.style.cssText}e&&a.setAttribute(b,""+d);a=!c.support.hrefNormalized&&f&&j?a.getAttribute(b,2):a.getAttribute(b);return a===null?w:a}return c.style(a,b,d)}});var O=/\.(.*)$/,db=function(a){return a.replace(/[^\w\s\.\|`]/g,
+function(b){return"\\"+b})};c.event={add:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){if(a.setInterval&&a!==A&&!a.frameElement)a=A;var e,j;if(d.handler){e=d;d=e.handler}if(!d.guid)d.guid=c.guid++;if(j=c.data(a)){var i=j.events=j.events||{},o=j.handle;if(!o)j.handle=o=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(o.elem,arguments):w};o.elem=a;b=b.split(" ");for(var k,n=0,r;k=b[n++];){j=e?c.extend({},e):{handler:d,data:f};if(k.indexOf(".")>-1){r=k.split(".");
+k=r.shift();j.namespace=r.slice(0).sort().join(".")}else{r=[];j.namespace=""}j.type=k;j.guid=d.guid;var u=i[k],z=c.event.special[k]||{};if(!u){u=i[k]=[];if(!z.setup||z.setup.call(a,f,r,o)===false)if(a.addEventListener)a.addEventListener(k,o,false);else a.attachEvent&&a.attachEvent("on"+k,o)}if(z.add){z.add.call(a,j);if(!j.handler.guid)j.handler.guid=d.guid}u.push(j);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){var e,j=0,i,o,k,n,r,u,z=c.data(a),
+C=z&&z.events;if(z&&C){if(b&&b.type){d=b.handler;b=b.type}if(!b||typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(e in C)c.event.remove(a,e+b)}else{for(b=b.split(" ");e=b[j++];){n=e;i=e.indexOf(".")<0;o=[];if(!i){o=e.split(".");e=o.shift();k=new RegExp("(^|\\.)"+c.map(o.slice(0).sort(),db).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(r=C[e])if(d){n=c.event.special[e]||{};for(B=f||0;B<r.length;B++){u=r[B];if(d.guid===u.guid){if(i||k.test(u.namespace)){f==null&&r.splice(B--,1);n.remove&&n.remove.call(a,u)}if(f!=
+null)break}}if(r.length===0||f!=null&&r.length===1){if(!n.teardown||n.teardown.call(a,o)===false)Ca(a,e,z.handle);delete C[e]}}else for(var B=0;B<r.length;B++){u=r[B];if(i||k.test(u.namespace)){c.event.remove(a,n,u.handler,B);r.splice(B--,1)}}}if(c.isEmptyObject(C)){if(b=z.handle)b.elem=null;delete z.events;delete z.handle;c.isEmptyObject(z)&&c.removeData(a)}}}}},trigger:function(a,b,d,f){var e=a.type||a;if(!f){a=typeof a==="object"?a[G]?a:c.extend(c.Event(e),a):c.Event(e);if(e.indexOf("!")>=0){a.type=
+e=e.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[e]&&c.each(c.cache,function(){this.events&&this.events[e]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return w;a.result=w;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(f=c.data(d,"handle"))&&f.apply(d,b);f=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+e]&&d["on"+e].apply(d,b)===false)a.result=false}catch(j){}if(!a.isPropagationStopped()&&
+f)c.event.trigger(a,b,f,true);else if(!a.isDefaultPrevented()){f=a.target;var i,o=c.nodeName(f,"a")&&e==="click",k=c.event.special[e]||{};if((!k._default||k._default.call(d,a)===false)&&!o&&!(f&&f.nodeName&&c.noData[f.nodeName.toLowerCase()])){try{if(f[e]){if(i=f["on"+e])f["on"+e]=null;c.event.triggered=true;f[e]()}}catch(n){}if(i)f["on"+e]=i;c.event.triggered=false}}},handle:function(a){var b,d,f,e;a=arguments[0]=c.event.fix(a||A.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;
+if(!b){d=a.type.split(".");a.type=d.shift();f=new RegExp("(^|\\.)"+d.slice(0).sort().join("\\.(?:.*\\.)?")+"(\\.|$)")}e=c.data(this,"events");d=e[a.type];if(e&&d){d=d.slice(0);e=0;for(var j=d.length;e<j;e++){var i=d[e];if(b||f.test(i.namespace)){a.handler=i.handler;a.data=i.data;a.handleObj=i;i=i.handler.apply(this,arguments);if(i!==w){a.result=i;if(i===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
+fix:function(a){if(a[G])return a;var b=a;a=c.Event(b);for(var d=this.props.length,f;d;){f=this.props[--d];a[f]=b[f]}if(!a.target)a.target=a.srcElement||s;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=s.documentElement;d=s.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
+d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(!a.which&&(a.charCode||a.charCode===0?a.charCode:a.keyCode))a.which=a.charCode||a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==w)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,a.origType,c.extend({},a,{handler:oa}))},remove:function(a){var b=true,d=a.origType.replace(O,"");c.each(c.data(this,
+"events").live||[],function(){if(d===this.origType.replace(O,""))return b=false});b&&c.event.remove(this,a.origType,oa)}},beforeunload:{setup:function(a,b,d){if(this.setInterval)this.onbeforeunload=d;return false},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};var Ca=s.removeEventListener?function(a,b,d){a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=
+a;this.type=a.type}else this.type=a;this.timeStamp=J();this[G]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=Z;var a=this.originalEvent;if(a){a.preventDefault&&a.preventDefault();a.returnValue=false}},stopPropagation:function(){this.isPropagationStopped=Z;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=Z;this.stopPropagation()},isDefaultPrevented:Y,isPropagationStopped:Y,
+isImmediatePropagationStopped:Y};var Da=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},Ea=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?Ea:Da,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?Ea:Da)}}});if(!c.support.submitBubbles)c.event.special.submit=
+{setup:function(){if(this.nodeName.toLowerCase()!=="form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length)return na("submit",this,arguments)});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13)return na("submit",this,arguments)})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};
+if(!c.support.changeBubbles){var da=/textarea|input|select/i,ea,Fa=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(f){return f.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},fa=function(a,b){var d=a.target,f,e;if(!(!da.test(d.nodeName)||d.readOnly)){f=c.data(d,"_change_data");e=Fa(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",
+e);if(!(f===w||e===f))if(f!=null||e){a.type="change";return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:fa,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return fa.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return fa.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,
+"_change_data",Fa(a))}},setup:function(){if(this.type==="file")return false;for(var a in ea)c.event.add(this,a+".specialChange",ea[a]);return da.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return da.test(this.nodeName)}};ea=c.event.special.change.filters}s.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(f){f=c.event.fix(f);f.type=b;return c.event.handle.call(this,f)}c.event.special[b]={setup:function(){this.addEventListener(a,
+d,true)},teardown:function(){this.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,f,e){if(typeof d==="object"){for(var j in d)this[b](j,f,d[j],e);return this}if(c.isFunction(f)){e=f;f=w}var i=b==="one"?c.proxy(e,function(k){c(this).unbind(k,i);return e.apply(this,arguments)}):e;if(d==="unload"&&b!=="one")this.one(d,f,e);else{j=0;for(var o=this.length;j<o;j++)c.event.add(this[j],d,i,f)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&
+!a.preventDefault)for(var d in a)this.unbind(d,a[d]);else{d=0;for(var f=this.length;d<f;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,f){return this.live(b,d,f,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){a=c.Event(a);a.preventDefault();a.stopPropagation();c.event.trigger(a,b,this[0]);return a.result}},
+toggle:function(a){for(var b=arguments,d=1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(f){var e=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,e+1);f.preventDefault();return b[e].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var Ga={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,f,e,j){var i,o=0,k,n,r=j||this.selector,
+u=j?this:c(this.context);if(c.isFunction(f)){e=f;f=w}for(d=(d||"").split(" ");(i=d[o++])!=null;){j=O.exec(i);k="";if(j){k=j[0];i=i.replace(O,"")}if(i==="hover")d.push("mouseenter"+k,"mouseleave"+k);else{n=i;if(i==="focus"||i==="blur"){d.push(Ga[i]+k);i+=k}else i=(Ga[i]||i)+k;b==="live"?u.each(function(){c.event.add(this,pa(i,r),{data:f,selector:r,handler:e,origType:i,origHandler:e,preType:n})}):u.unbind(pa(i,r),e)}}return this}});c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),
+function(a,b){c.fn[b]=function(d){return d?this.bind(b,d):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});A.attachEvent&&!A.addEventListener&&A.attachEvent("onunload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});(function(){function a(g){for(var h="",l,m=0;g[m];m++){l=g[m];if(l.nodeType===3||l.nodeType===4)h+=l.nodeValue;else if(l.nodeType!==8)h+=a(l.childNodes)}return h}function b(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];
+if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1&&!p){t.sizcache=l;t.sizset=q}if(t.nodeName.toLowerCase()===h){y=t;break}t=t[g]}m[q]=y}}}function d(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1){if(!p){t.sizcache=l;t.sizset=q}if(typeof h!=="string"){if(t===h){y=true;break}}else if(k.filter(h,[t]).length>0){y=t;break}}t=t[g]}m[q]=y}}}var f=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
+e=0,j=Object.prototype.toString,i=false,o=true;[0,0].sort(function(){o=false;return 0});var k=function(g,h,l,m){l=l||[];var q=h=h||s;if(h.nodeType!==1&&h.nodeType!==9)return[];if(!g||typeof g!=="string")return l;for(var p=[],v,t,y,S,H=true,M=x(h),I=g;(f.exec(""),v=f.exec(I))!==null;){I=v[3];p.push(v[1]);if(v[2]){S=v[3];break}}if(p.length>1&&r.exec(g))if(p.length===2&&n.relative[p[0]])t=ga(p[0]+p[1],h);else for(t=n.relative[p[0]]?[h]:k(p.shift(),h);p.length;){g=p.shift();if(n.relative[g])g+=p.shift();
+t=ga(g,t)}else{if(!m&&p.length>1&&h.nodeType===9&&!M&&n.match.ID.test(p[0])&&!n.match.ID.test(p[p.length-1])){v=k.find(p.shift(),h,M);h=v.expr?k.filter(v.expr,v.set)[0]:v.set[0]}if(h){v=m?{expr:p.pop(),set:z(m)}:k.find(p.pop(),p.length===1&&(p[0]==="~"||p[0]==="+")&&h.parentNode?h.parentNode:h,M);t=v.expr?k.filter(v.expr,v.set):v.set;if(p.length>0)y=z(t);else H=false;for(;p.length;){var D=p.pop();v=D;if(n.relative[D])v=p.pop();else D="";if(v==null)v=h;n.relative[D](y,v,M)}}else y=[]}y||(y=t);y||k.error(D||
+g);if(j.call(y)==="[object Array]")if(H)if(h&&h.nodeType===1)for(g=0;y[g]!=null;g++){if(y[g]&&(y[g]===true||y[g].nodeType===1&&E(h,y[g])))l.push(t[g])}else for(g=0;y[g]!=null;g++)y[g]&&y[g].nodeType===1&&l.push(t[g]);else l.push.apply(l,y);else z(y,l);if(S){k(S,q,l,m);k.uniqueSort(l)}return l};k.uniqueSort=function(g){if(B){i=o;g.sort(B);if(i)for(var h=1;h<g.length;h++)g[h]===g[h-1]&&g.splice(h--,1)}return g};k.matches=function(g,h){return k(g,null,null,h)};k.find=function(g,h,l){var m,q;if(!g)return[];
+for(var p=0,v=n.order.length;p<v;p++){var t=n.order[p];if(q=n.leftMatch[t].exec(g)){var y=q[1];q.splice(1,1);if(y.substr(y.length-1)!=="\\"){q[1]=(q[1]||"").replace(/\\/g,"");m=n.find[t](q,h,l);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=h.getElementsByTagName("*"));return{set:m,expr:g}};k.filter=function(g,h,l,m){for(var q=g,p=[],v=h,t,y,S=h&&h[0]&&x(h[0]);g&&h.length;){for(var H in n.filter)if((t=n.leftMatch[H].exec(g))!=null&&t[2]){var M=n.filter[H],I,D;D=t[1];y=false;t.splice(1,1);if(D.substr(D.length-
+1)!=="\\"){if(v===p)p=[];if(n.preFilter[H])if(t=n.preFilter[H](t,v,l,p,m,S)){if(t===true)continue}else y=I=true;if(t)for(var U=0;(D=v[U])!=null;U++)if(D){I=M(D,t,U,v);var Ha=m^!!I;if(l&&I!=null)if(Ha)y=true;else v[U]=false;else if(Ha){p.push(D);y=true}}if(I!==w){l||(v=p);g=g.replace(n.match[H],"");if(!y)return[];break}}}if(g===q)if(y==null)k.error(g);else break;q=g}return v};k.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=k.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF-]|\\.)+)/,
+CLASS:/\.((?:[\w\u00c0-\uFFFF-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},
+relative:{"+":function(g,h){var l=typeof h==="string",m=l&&!/\W/.test(h);l=l&&!m;if(m)h=h.toLowerCase();m=0;for(var q=g.length,p;m<q;m++)if(p=g[m]){for(;(p=p.previousSibling)&&p.nodeType!==1;);g[m]=l||p&&p.nodeName.toLowerCase()===h?p||false:p===h}l&&k.filter(h,g,true)},">":function(g,h){var l=typeof h==="string";if(l&&!/\W/.test(h)){h=h.toLowerCase();for(var m=0,q=g.length;m<q;m++){var p=g[m];if(p){l=p.parentNode;g[m]=l.nodeName.toLowerCase()===h?l:false}}}else{m=0;for(q=g.length;m<q;m++)if(p=g[m])g[m]=
+l?p.parentNode:p.parentNode===h;l&&k.filter(h,g,true)}},"":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("parentNode",h,m,g,p,l)},"~":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("previousSibling",h,m,g,p,l)}},find:{ID:function(g,h,l){if(typeof h.getElementById!=="undefined"&&!l)return(g=h.getElementById(g[1]))?[g]:[]},NAME:function(g,h){if(typeof h.getElementsByName!=="undefined"){var l=[];
+h=h.getElementsByName(g[1]);for(var m=0,q=h.length;m<q;m++)h[m].getAttribute("name")===g[1]&&l.push(h[m]);return l.length===0?null:l}},TAG:function(g,h){return h.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,h,l,m,q,p){g=" "+g[1].replace(/\\/g,"")+" ";if(p)return g;p=0;for(var v;(v=h[p])!=null;p++)if(v)if(q^(v.className&&(" "+v.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))l||m.push(v);else if(l)h[p]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},
+CHILD:function(g){if(g[1]==="nth"){var h=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=h[1]+(h[2]||1)-0;g[3]=h[3]-0}g[0]=e++;return g},ATTR:function(g,h,l,m,q,p){h=g[1].replace(/\\/g,"");if(!p&&n.attrMap[h])g[1]=n.attrMap[h];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,h,l,m,q){if(g[1]==="not")if((f.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=k(g[3],null,null,h);else{g=k.filter(g[3],h,l,true^q);l||m.push.apply(m,
+g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,h,l){return!!k(l[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},
+text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},
+setFilters:{first:function(g,h){return h===0},last:function(g,h,l,m){return h===m.length-1},even:function(g,h){return h%2===0},odd:function(g,h){return h%2===1},lt:function(g,h,l){return h<l[3]-0},gt:function(g,h,l){return h>l[3]-0},nth:function(g,h,l){return l[3]-0===h},eq:function(g,h,l){return l[3]-0===h}},filter:{PSEUDO:function(g,h,l,m){var q=h[1],p=n.filters[q];if(p)return p(g,l,h,m);else if(q==="contains")return(g.textContent||g.innerText||a([g])||"").indexOf(h[3])>=0;else if(q==="not"){h=
+h[3];l=0;for(m=h.length;l<m;l++)if(h[l]===g)return false;return true}else k.error("Syntax error, unrecognized expression: "+q)},CHILD:function(g,h){var l=h[1],m=g;switch(l){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(l==="first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":l=h[2];var q=h[3];if(l===1&&q===0)return true;h=h[0];var p=g.parentNode;if(p&&(p.sizcache!==h||!g.nodeIndex)){var v=0;for(m=p.firstChild;m;m=
+m.nextSibling)if(m.nodeType===1)m.nodeIndex=++v;p.sizcache=h}g=g.nodeIndex-q;return l===0?g===0:g%l===0&&g/l>=0}},ID:function(g,h){return g.nodeType===1&&g.getAttribute("id")===h},TAG:function(g,h){return h==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===h},CLASS:function(g,h){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(h)>-1},ATTR:function(g,h){var l=h[1];g=n.attrHandle[l]?n.attrHandle[l](g):g[l]!=null?g[l]:g.getAttribute(l);l=g+"";var m=h[2];h=h[4];return g==null?m==="!=":m===
+"="?l===h:m==="*="?l.indexOf(h)>=0:m==="~="?(" "+l+" ").indexOf(h)>=0:!h?l&&g!==false:m==="!="?l!==h:m==="^="?l.indexOf(h)===0:m==="$="?l.substr(l.length-h.length)===h:m==="|="?l===h||l.substr(0,h.length+1)===h+"-":false},POS:function(g,h,l,m){var q=n.setFilters[h[2]];if(q)return q(g,l,h,m)}}},r=n.match.POS;for(var u in n.match){n.match[u]=new RegExp(n.match[u].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[u]=new RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[u].source.replace(/\\(\d+)/g,function(g,
+h){return"\\"+(h-0+1)}))}var z=function(g,h){g=Array.prototype.slice.call(g,0);if(h){h.push.apply(h,g);return h}return g};try{Array.prototype.slice.call(s.documentElement.childNodes,0)}catch(C){z=function(g,h){h=h||[];if(j.call(g)==="[object Array]")Array.prototype.push.apply(h,g);else if(typeof g.length==="number")for(var l=0,m=g.length;l<m;l++)h.push(g[l]);else for(l=0;g[l];l++)h.push(g[l]);return h}}var B;if(s.documentElement.compareDocumentPosition)B=function(g,h){if(!g.compareDocumentPosition||
+!h.compareDocumentPosition){if(g==h)i=true;return g.compareDocumentPosition?-1:1}g=g.compareDocumentPosition(h)&4?-1:g===h?0:1;if(g===0)i=true;return g};else if("sourceIndex"in s.documentElement)B=function(g,h){if(!g.sourceIndex||!h.sourceIndex){if(g==h)i=true;return g.sourceIndex?-1:1}g=g.sourceIndex-h.sourceIndex;if(g===0)i=true;return g};else if(s.createRange)B=function(g,h){if(!g.ownerDocument||!h.ownerDocument){if(g==h)i=true;return g.ownerDocument?-1:1}var l=g.ownerDocument.createRange(),m=
+h.ownerDocument.createRange();l.setStart(g,0);l.setEnd(g,0);m.setStart(h,0);m.setEnd(h,0);g=l.compareBoundaryPoints(Range.START_TO_END,m);if(g===0)i=true;return g};(function(){var g=s.createElement("div"),h="script"+(new Date).getTime();g.innerHTML="<a name='"+h+"'/>";var l=s.documentElement;l.insertBefore(g,l.firstChild);if(s.getElementById(h)){n.find.ID=function(m,q,p){if(typeof q.getElementById!=="undefined"&&!p)return(q=q.getElementById(m[1]))?q.id===m[1]||typeof q.getAttributeNode!=="undefined"&&
+q.getAttributeNode("id").nodeValue===m[1]?[q]:w:[]};n.filter.ID=function(m,q){var p=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&p&&p.nodeValue===q}}l.removeChild(g);l=g=null})();(function(){var g=s.createElement("div");g.appendChild(s.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(h,l){l=l.getElementsByTagName(h[1]);if(h[1]==="*"){h=[];for(var m=0;l[m];m++)l[m].nodeType===1&&h.push(l[m]);l=h}return l};g.innerHTML="<a href='#'></a>";
+if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(h){return h.getAttribute("href",2)};g=null})();s.querySelectorAll&&function(){var g=k,h=s.createElement("div");h.innerHTML="<p class='TEST'></p>";if(!(h.querySelectorAll&&h.querySelectorAll(".TEST").length===0)){k=function(m,q,p,v){q=q||s;if(!v&&q.nodeType===9&&!x(q))try{return z(q.querySelectorAll(m),p)}catch(t){}return g(m,q,p,v)};for(var l in g)k[l]=g[l];h=null}}();
+(function(){var g=s.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(h,l,m){if(typeof l.getElementsByClassName!=="undefined"&&!m)return l.getElementsByClassName(h[1])};g=null}}})();var E=s.compareDocumentPosition?function(g,h){return!!(g.compareDocumentPosition(h)&16)}:
+function(g,h){return g!==h&&(g.contains?g.contains(h):true)},x=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false},ga=function(g,h){var l=[],m="",q;for(h=h.nodeType?[h]:h;q=n.match.PSEUDO.exec(g);){m+=q[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;q=0;for(var p=h.length;q<p;q++)k(g,h[q],l);return k.filter(m,l)};c.find=k;c.expr=k.selectors;c.expr[":"]=c.expr.filters;c.unique=k.uniqueSort;c.text=a;c.isXMLDoc=x;c.contains=E})();var eb=/Until$/,fb=/^(?:parents|prevUntil|prevAll)/,
+gb=/,/;R=Array.prototype.slice;var Ia=function(a,b,d){if(c.isFunction(b))return c.grep(a,function(e,j){return!!b.call(e,j,e)===d});else if(b.nodeType)return c.grep(a,function(e){return e===b===d});else if(typeof b==="string"){var f=c.grep(a,function(e){return e.nodeType===1});if(Ua.test(b))return c.filter(b,f,!d);else b=c.filter(b,f)}return c.grep(a,function(e){return c.inArray(e,b)>=0===d})};c.fn.extend({find:function(a){for(var b=this.pushStack("","find",a),d=0,f=0,e=this.length;f<e;f++){d=b.length;
+c.find(a,this[f],b);if(f>0)for(var j=d;j<b.length;j++)for(var i=0;i<d;i++)if(b[i]===b[j]){b.splice(j--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,f=b.length;d<f;d++)if(c.contains(this,b[d]))return true})},not:function(a){return this.pushStack(Ia(this,a,false),"not",a)},filter:function(a){return this.pushStack(Ia(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a,b){if(c.isArray(a)){var d=[],f=this[0],e,j=
+{},i;if(f&&a.length){e=0;for(var o=a.length;e<o;e++){i=a[e];j[i]||(j[i]=c.expr.match.POS.test(i)?c(i,b||this.context):i)}for(;f&&f.ownerDocument&&f!==b;){for(i in j){e=j[i];if(e.jquery?e.index(f)>-1:c(f).is(e)){d.push({selector:i,elem:f});delete j[i]}}f=f.parentNode}}return d}var k=c.expr.match.POS.test(a)?c(a,b||this.context):null;return this.map(function(n,r){for(;r&&r.ownerDocument&&r!==b;){if(k?k.index(r)>-1:c(r).is(a))return r;r=r.parentNode}return null})},index:function(a){if(!a||typeof a===
+"string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){a=typeof a==="string"?c(a,b||this.context):c.makeArray(a);b=c.merge(this.get(),a);return this.pushStack(qa(a[0])||qa(b[0])?b:c.unique(b))},andSelf:function(){return this.add(this.prevObject)}});c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",
+d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?
+a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,f){var e=c.map(this,b,d);eb.test(a)||(f=d);if(f&&typeof f==="string")e=c.filter(f,e);e=this.length>1?c.unique(e):e;if((this.length>1||gb.test(f))&&fb.test(a))e=e.reverse();return this.pushStack(e,a,R.call(arguments).join(","))}});c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return c.find.matches(a,b)},dir:function(a,b,d){var f=[];for(a=a[b];a&&a.nodeType!==9&&(d===w||a.nodeType!==1||!c(a).is(d));){a.nodeType===
+1&&f.push(a);a=a[b]}return f},nth:function(a,b,d){b=b||1;for(var f=0;a;a=a[d])if(a.nodeType===1&&++f===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var Ja=/ jQuery\d+="(?:\d+|null)"/g,V=/^\s+/,Ka=/(<([\w:]+)[^>]*?)\/>/g,hb=/^(?:area|br|col|embed|hr|img|input|link|meta|param)$/i,La=/<([\w:]+)/,ib=/<tbody/i,jb=/<|&#?\w+;/,ta=/<script|<object|<embed|<option|<style/i,ua=/checked\s*(?:[^=]|=\s*.checked.)/i,Ma=function(a,b,d){return hb.test(d)?
+a:b+"></"+d+">"},F={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]};F.optgroup=F.option;F.tbody=F.tfoot=F.colgroup=F.caption=F.thead;F.th=F.td;if(!c.support.htmlSerialize)F._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=
+c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==w)return this.empty().append((this[0]&&this[0].ownerDocument||s).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},
+wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},
+prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,
+this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,f;(f=this[d])!=null;d++)if(!a||c.filter(a,[f]).length){if(!b&&f.nodeType===1){c.cleanData(f.getElementsByTagName("*"));c.cleanData([f])}f.parentNode&&f.parentNode.removeChild(f)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);
+return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,f=this.ownerDocument;if(!d){d=f.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(Ja,"").replace(/=([^="'>\s]+\/)>/g,'="$1">').replace(V,"")],f)[0]}else return this.cloneNode(true)});if(a===true){ra(this,b);ra(this.find("*"),b.find("*"))}return b},html:function(a){if(a===w)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(Ja,
+""):null;else if(typeof a==="string"&&!ta.test(a)&&(c.support.leadingWhitespace||!V.test(a))&&!F[(La.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Ka,Ma);try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(f){this.empty().append(a)}}else c.isFunction(a)?this.each(function(e){var j=c(this),i=j.html();j.empty().append(function(){return a.call(this,e,i)})}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&
+this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=c(this),f=d.html();d.replaceWith(a.call(this,b,f))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a,true)},domManip:function(a,b,d){function f(u){return c.nodeName(u,"table")?u.getElementsByTagName("tbody")[0]||
+u.appendChild(u.ownerDocument.createElement("tbody")):u}var e,j,i=a[0],o=[],k;if(!c.support.checkClone&&arguments.length===3&&typeof i==="string"&&ua.test(i))return this.each(function(){c(this).domManip(a,b,d,true)});if(c.isFunction(i))return this.each(function(u){var z=c(this);a[0]=i.call(this,u,b?z.html():w);z.domManip(a,b,d)});if(this[0]){e=i&&i.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:sa(a,this,o);k=e.fragment;if(j=k.childNodes.length===
+1?(k=k.firstChild):k.firstChild){b=b&&c.nodeName(j,"tr");for(var n=0,r=this.length;n<r;n++)d.call(b?f(this[n],j):this[n],n>0||e.cacheable||this.length>1?k.cloneNode(true):k)}o.length&&c.each(o,Qa)}return this}});c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var f=[];d=c(d);var e=this.length===1&&this[0].parentNode;if(e&&e.nodeType===11&&e.childNodes.length===1&&d.length===1){d[b](this[0]);
+return this}else{e=0;for(var j=d.length;e<j;e++){var i=(e>0?this.clone(true):this).get();c.fn[b].apply(c(d[e]),i);f=f.concat(i)}return this.pushStack(f,a,d.selector)}}});c.extend({clean:function(a,b,d,f){b=b||s;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||s;for(var e=[],j=0,i;(i=a[j])!=null;j++){if(typeof i==="number")i+="";if(i){if(typeof i==="string"&&!jb.test(i))i=b.createTextNode(i);else if(typeof i==="string"){i=i.replace(Ka,Ma);var o=(La.exec(i)||["",
+""])[1].toLowerCase(),k=F[o]||F._default,n=k[0],r=b.createElement("div");for(r.innerHTML=k[1]+i+k[2];n--;)r=r.lastChild;if(!c.support.tbody){n=ib.test(i);o=o==="table"&&!n?r.firstChild&&r.firstChild.childNodes:k[1]==="<table>"&&!n?r.childNodes:[];for(k=o.length-1;k>=0;--k)c.nodeName(o[k],"tbody")&&!o[k].childNodes.length&&o[k].parentNode.removeChild(o[k])}!c.support.leadingWhitespace&&V.test(i)&&r.insertBefore(b.createTextNode(V.exec(i)[0]),r.firstChild);i=r.childNodes}if(i.nodeType)e.push(i);else e=
+c.merge(e,i)}}if(d)for(j=0;e[j];j++)if(f&&c.nodeName(e[j],"script")&&(!e[j].type||e[j].type.toLowerCase()==="text/javascript"))f.push(e[j].parentNode?e[j].parentNode.removeChild(e[j]):e[j]);else{e[j].nodeType===1&&e.splice.apply(e,[j+1,0].concat(c.makeArray(e[j].getElementsByTagName("script"))));d.appendChild(e[j])}return e},cleanData:function(a){for(var b,d,f=c.cache,e=c.event.special,j=c.support.deleteExpando,i=0,o;(o=a[i])!=null;i++)if(d=o[c.expando]){b=f[d];if(b.events)for(var k in b.events)e[k]?
+c.event.remove(o,k):Ca(o,k,b.handle);if(j)delete o[c.expando];else o.removeAttribute&&o.removeAttribute(c.expando);delete f[d]}}});var kb=/z-?index|font-?weight|opacity|zoom|line-?height/i,Na=/alpha\([^)]*\)/,Oa=/opacity=([^)]*)/,ha=/float/i,ia=/-([a-z])/ig,lb=/([A-Z])/g,mb=/^-?\d+(?:px)?$/i,nb=/^-?\d/,ob={position:"absolute",visibility:"hidden",display:"block"},pb=["Left","Right"],qb=["Top","Bottom"],rb=s.defaultView&&s.defaultView.getComputedStyle,Pa=c.support.cssFloat?"cssFloat":"styleFloat",ja=
+function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){return X(this,a,b,true,function(d,f,e){if(e===w)return c.curCSS(d,f);if(typeof e==="number"&&!kb.test(f))e+="px";c.style(d,f,e)})};c.extend({style:function(a,b,d){if(!a||a.nodeType===3||a.nodeType===8)return w;if((b==="width"||b==="height")&&parseFloat(d)<0)d=w;var f=a.style||a,e=d!==w;if(!c.support.opacity&&b==="opacity"){if(e){f.zoom=1;b=parseInt(d,10)+""==="NaN"?"":"alpha(opacity="+d*100+")";a=f.filter||c.curCSS(a,"filter")||"";f.filter=
+Na.test(a)?a.replace(Na,b):b}return f.filter&&f.filter.indexOf("opacity=")>=0?parseFloat(Oa.exec(f.filter)[1])/100+"":""}if(ha.test(b))b=Pa;b=b.replace(ia,ja);if(e)f[b]=d;return f[b]},css:function(a,b,d,f){if(b==="width"||b==="height"){var e,j=b==="width"?pb:qb;function i(){e=b==="width"?a.offsetWidth:a.offsetHeight;f!=="border"&&c.each(j,function(){f||(e-=parseFloat(c.curCSS(a,"padding"+this,true))||0);if(f==="margin")e+=parseFloat(c.curCSS(a,"margin"+this,true))||0;else e-=parseFloat(c.curCSS(a,
+"border"+this+"Width",true))||0})}a.offsetWidth!==0?i():c.swap(a,ob,i);return Math.max(0,Math.round(e))}return c.curCSS(a,b,d)},curCSS:function(a,b,d){var f,e=a.style;if(!c.support.opacity&&b==="opacity"&&a.currentStyle){f=Oa.test(a.currentStyle.filter||"")?parseFloat(RegExp.$1)/100+"":"";return f===""?"1":f}if(ha.test(b))b=Pa;if(!d&&e&&e[b])f=e[b];else if(rb){if(ha.test(b))b="float";b=b.replace(lb,"-$1").toLowerCase();e=a.ownerDocument.defaultView;if(!e)return null;if(a=e.getComputedStyle(a,null))f=
+a.getPropertyValue(b);if(b==="opacity"&&f==="")f="1"}else if(a.currentStyle){d=b.replace(ia,ja);f=a.currentStyle[b]||a.currentStyle[d];if(!mb.test(f)&&nb.test(f)){b=e.left;var j=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;e.left=d==="fontSize"?"1em":f||0;f=e.pixelLeft+"px";e.left=b;a.runtimeStyle.left=j}}return f},swap:function(a,b,d){var f={};for(var e in b){f[e]=a.style[e];a.style[e]=b[e]}d.call(a);for(e in b)a.style[e]=f[e]}});if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=
+a.offsetWidth,d=a.offsetHeight,f=a.nodeName.toLowerCase()==="tr";return b===0&&d===0&&!f?true:b>0&&d>0&&!f?false:c.curCSS(a,"display")==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var sb=J(),tb=/<script(.|\s)*?\/script>/gi,ub=/select|textarea/i,vb=/color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week/i,N=/=\?(&|$)/,ka=/\?/,wb=/(\?|&)_=.*?(&|$)/,xb=/^(\w+:)?\/\/([^\/?#]+)/,yb=/%20/g,zb=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!==
+"string")return zb.call(this,a);else if(!this.length)return this;var f=a.indexOf(" ");if(f>=0){var e=a.slice(f,a.length);a=a.slice(0,f)}f="GET";if(b)if(c.isFunction(b)){d=b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);f="POST"}var j=this;c.ajax({url:a,type:f,dataType:"html",data:b,complete:function(i,o){if(o==="success"||o==="notmodified")j.html(e?c("<div />").append(i.responseText.replace(tb,"")).find(e):i.responseText);d&&j.each(d,[i.responseText,o,i])}});return this},
+serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||ub.test(this.nodeName)||vb.test(this.type))}).map(function(a,b){a=c(this).val();return a==null?null:c.isArray(a)?c.map(a,function(d){return{name:b.name,value:d}}):{name:b.name,value:a}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),
+function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:f})},getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:f})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,
+global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:A.XMLHttpRequest&&(A.location.protocol!=="file:"||!A.ActiveXObject)?function(){return new A.XMLHttpRequest}:function(){try{return new A.ActiveXObject("Microsoft.XMLHTTP")}catch(a){}},accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},etag:{},ajax:function(a){function b(){e.success&&
+e.success.call(k,o,i,x);e.global&&f("ajaxSuccess",[x,e])}function d(){e.complete&&e.complete.call(k,x,i);e.global&&f("ajaxComplete",[x,e]);e.global&&!--c.active&&c.event.trigger("ajaxStop")}function f(q,p){(e.context?c(e.context):c.event).trigger(q,p)}var e=c.extend(true,{},c.ajaxSettings,a),j,i,o,k=a&&a.context||e,n=e.type.toUpperCase();if(e.data&&e.processData&&typeof e.data!=="string")e.data=c.param(e.data,e.traditional);if(e.dataType==="jsonp"){if(n==="GET")N.test(e.url)||(e.url+=(ka.test(e.url)?
+"&":"?")+(e.jsonp||"callback")+"=?");else if(!e.data||!N.test(e.data))e.data=(e.data?e.data+"&":"")+(e.jsonp||"callback")+"=?";e.dataType="json"}if(e.dataType==="json"&&(e.data&&N.test(e.data)||N.test(e.url))){j=e.jsonpCallback||"jsonp"+sb++;if(e.data)e.data=(e.data+"").replace(N,"="+j+"$1");e.url=e.url.replace(N,"="+j+"$1");e.dataType="script";A[j]=A[j]||function(q){o=q;b();d();A[j]=w;try{delete A[j]}catch(p){}z&&z.removeChild(C)}}if(e.dataType==="script"&&e.cache===null)e.cache=false;if(e.cache===
+false&&n==="GET"){var r=J(),u=e.url.replace(wb,"$1_="+r+"$2");e.url=u+(u===e.url?(ka.test(e.url)?"&":"?")+"_="+r:"")}if(e.data&&n==="GET")e.url+=(ka.test(e.url)?"&":"?")+e.data;e.global&&!c.active++&&c.event.trigger("ajaxStart");r=(r=xb.exec(e.url))&&(r[1]&&r[1]!==location.protocol||r[2]!==location.host);if(e.dataType==="script"&&n==="GET"&&r){var z=s.getElementsByTagName("head")[0]||s.documentElement,C=s.createElement("script");C.src=e.url;if(e.scriptCharset)C.charset=e.scriptCharset;if(!j){var B=
+false;C.onload=C.onreadystatechange=function(){if(!B&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){B=true;b();d();C.onload=C.onreadystatechange=null;z&&C.parentNode&&z.removeChild(C)}}}z.insertBefore(C,z.firstChild);return w}var E=false,x=e.xhr();if(x){e.username?x.open(n,e.url,e.async,e.username,e.password):x.open(n,e.url,e.async);try{if(e.data||a&&a.contentType)x.setRequestHeader("Content-Type",e.contentType);if(e.ifModified){c.lastModified[e.url]&&x.setRequestHeader("If-Modified-Since",
+c.lastModified[e.url]);c.etag[e.url]&&x.setRequestHeader("If-None-Match",c.etag[e.url])}r||x.setRequestHeader("X-Requested-With","XMLHttpRequest");x.setRequestHeader("Accept",e.dataType&&e.accepts[e.dataType]?e.accepts[e.dataType]+", */*":e.accepts._default)}catch(ga){}if(e.beforeSend&&e.beforeSend.call(k,x,e)===false){e.global&&!--c.active&&c.event.trigger("ajaxStop");x.abort();return false}e.global&&f("ajaxSend",[x,e]);var g=x.onreadystatechange=function(q){if(!x||x.readyState===0||q==="abort"){E||
+d();E=true;if(x)x.onreadystatechange=c.noop}else if(!E&&x&&(x.readyState===4||q==="timeout")){E=true;x.onreadystatechange=c.noop;i=q==="timeout"?"timeout":!c.httpSuccess(x)?"error":e.ifModified&&c.httpNotModified(x,e.url)?"notmodified":"success";var p;if(i==="success")try{o=c.httpData(x,e.dataType,e)}catch(v){i="parsererror";p=v}if(i==="success"||i==="notmodified")j||b();else c.handleError(e,x,i,p);d();q==="timeout"&&x.abort();if(e.async)x=null}};try{var h=x.abort;x.abort=function(){x&&h.call(x);
+g("abort")}}catch(l){}e.async&&e.timeout>0&&setTimeout(function(){x&&!E&&g("timeout")},e.timeout);try{x.send(n==="POST"||n==="PUT"||n==="DELETE"?e.data:null)}catch(m){c.handleError(e,x,null,m);d()}e.async||g();return x}},handleError:function(a,b,d,f){if(a.error)a.error.call(a.context||a,b,d,f);if(a.global)(a.context?c(a.context):c.event).trigger("ajaxError",[b,a,f])},active:0,httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===
+1223||a.status===0}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),f=a.getResponseHeader("Etag");if(d)c.lastModified[b]=d;if(f)c.etag[b]=f;return a.status===304||a.status===0},httpData:function(a,b,d){var f=a.getResponseHeader("content-type")||"",e=b==="xml"||!b&&f.indexOf("xml")>=0;a=e?a.responseXML:a.responseText;e&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b===
+"json"||!b&&f.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&f.indexOf("javascript")>=0)c.globalEval(a);return a},param:function(a,b){function d(i,o){if(c.isArray(o))c.each(o,function(k,n){b||/\[\]$/.test(i)?f(i,n):d(i+"["+(typeof n==="object"||c.isArray(n)?k:"")+"]",n)});else!b&&o!=null&&typeof o==="object"?c.each(o,function(k,n){d(i+"["+k+"]",n)}):f(i,o)}function f(i,o){o=c.isFunction(o)?o():o;e[e.length]=encodeURIComponent(i)+"="+encodeURIComponent(o)}var e=[];if(b===w)b=c.ajaxSettings.traditional;
+if(c.isArray(a)||a.jquery)c.each(a,function(){f(this.name,this.value)});else for(var j in a)d(j,a[j]);return e.join("&").replace(yb,"+")}});var la={},Ab=/toggle|show|hide/,Bb=/^([+-]=)?([\d+-.]+)(.*)$/,W,va=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b){if(a||a===0)return this.animate(K("show",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");
+this[a].style.display=d||"";if(c.css(this[a],"display")==="none"){d=this[a].nodeName;var f;if(la[d])f=la[d];else{var e=c("<"+d+" />").appendTo("body");f=e.css("display");if(f==="none")f="block";e.remove();la[d]=f}c.data(this[a],"olddisplay",f)}}a=0;for(b=this.length;a<b;a++)this[a].style.display=c.data(this[a],"olddisplay")||"";return this}},hide:function(a,b){if(a||a===0)return this.animate(K("hide",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");!d&&d!=="none"&&c.data(this[a],
+"olddisplay",c.css(this[a],"display"))}a=0;for(b=this.length;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b){var d=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||d?this.each(function(){var f=d?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(K("toggle",3),a,b);return this},fadeTo:function(a,b,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d)},
+animate:function(a,b,d,f){var e=c.speed(b,d,f);if(c.isEmptyObject(a))return this.each(e.complete);return this[e.queue===false?"each":"queue"](function(){var j=c.extend({},e),i,o=this.nodeType===1&&c(this).is(":hidden"),k=this;for(i in a){var n=i.replace(ia,ja);if(i!==n){a[n]=a[i];delete a[i];i=n}if(a[i]==="hide"&&o||a[i]==="show"&&!o)return j.complete.call(this);if((i==="height"||i==="width")&&this.style){j.display=c.css(this,"display");j.overflow=this.style.overflow}if(c.isArray(a[i])){(j.specialEasing=
+j.specialEasing||{})[i]=a[i][1];a[i]=a[i][0]}}if(j.overflow!=null)this.style.overflow="hidden";j.curAnim=c.extend({},a);c.each(a,function(r,u){var z=new c.fx(k,j,r);if(Ab.test(u))z[u==="toggle"?o?"show":"hide":u](a);else{var C=Bb.exec(u),B=z.cur(true)||0;if(C){u=parseFloat(C[2]);var E=C[3]||"px";if(E!=="px"){k.style[r]=(u||1)+E;B=(u||1)/z.cur(true)*B;k.style[r]=B+E}if(C[1])u=(C[1]==="-="?-1:1)*u+B;z.custom(B,u,E)}else z.custom(B,u,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);
+this.each(function(){for(var f=d.length-1;f>=0;f--)if(d[f].elem===this){b&&d[f](true);d.splice(f,1)}});b||this.dequeue();return this}});c.each({slideDown:K("show",1),slideUp:K("hide",1),slideToggle:K("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,f){return this.animate(b,d,f)}});c.extend({speed:function(a,b,d){var f=a&&typeof a==="object"?a:{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};f.duration=c.fx.off?0:typeof f.duration===
+"number"?f.duration:c.fx.speeds[f.duration]||c.fx.speeds._default;f.old=f.complete;f.complete=function(){f.queue!==false&&c(this).dequeue();c.isFunction(f.old)&&f.old.call(this)};return f},easing:{linear:function(a,b,d,f){return d+f*a},swing:function(a,b,d,f){return(-Math.cos(a*Math.PI)/2+0.5)*f+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||
+c.fx.step._default)(this);if((this.prop==="height"||this.prop==="width")&&this.elem.style)this.elem.style.display="block"},cur:function(a){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];return(a=parseFloat(c.css(this.elem,this.prop,a)))&&a>-10000?a:parseFloat(c.curCSS(this.elem,this.prop))||0},custom:function(a,b,d){function f(j){return e.step(j)}this.startTime=J();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;
+this.pos=this.state=0;var e=this;f.elem=this.elem;if(f()&&c.timers.push(f)&&!W)W=setInterval(c.fx.tick,13)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true;this.custom(this.cur(),0)},step:function(a){var b=J(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=
+this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var f in this.options.curAnim)if(this.options.curAnim[f]!==true)d=false;if(d){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;a=c.data(this.elem,"olddisplay");this.elem.style.display=a?a:this.options.display;if(c.css(this.elem,"display")==="none")this.elem.style.display="block"}this.options.hide&&c(this.elem).hide();if(this.options.hide||this.options.show)for(var e in this.options.curAnim)c.style(this.elem,
+e,this.options.orig[e]);this.options.complete.call(this.elem)}return false}else{e=b-this.startTime;this.state=e/this.options.duration;a=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||a](this.state,e,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a=c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||
+c.fx.stop()},stop:function(){clearInterval(W);W=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a===b.elem}).length};c.fn.offset="getBoundingClientRect"in s.documentElement?
+function(a){var b=this[0];if(a)return this.each(function(e){c.offset.setOffset(this,a,e)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);var d=b.getBoundingClientRect(),f=b.ownerDocument;b=f.body;f=f.documentElement;return{top:d.top+(self.pageYOffset||c.support.boxModel&&f.scrollTop||b.scrollTop)-(f.clientTop||b.clientTop||0),left:d.left+(self.pageXOffset||c.support.boxModel&&f.scrollLeft||b.scrollLeft)-(f.clientLeft||b.clientLeft||0)}}:function(a){var b=
+this[0];if(a)return this.each(function(r){c.offset.setOffset(this,a,r)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d=b.offsetParent,f=b,e=b.ownerDocument,j,i=e.documentElement,o=e.body;f=(e=e.defaultView)?e.getComputedStyle(b,null):b.currentStyle;for(var k=b.offsetTop,n=b.offsetLeft;(b=b.parentNode)&&b!==o&&b!==i;){if(c.offset.supportsFixedPosition&&f.position==="fixed")break;j=e?e.getComputedStyle(b,null):b.currentStyle;
+k-=b.scrollTop;n-=b.scrollLeft;if(b===d){k+=b.offsetTop;n+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&/^t(able|d|h)$/i.test(b.nodeName))){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=d;d=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&j.overflow!=="visible"){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=j}if(f.position==="relative"||f.position==="static"){k+=o.offsetTop;n+=o.offsetLeft}if(c.offset.supportsFixedPosition&&
+f.position==="fixed"){k+=Math.max(i.scrollTop,o.scrollTop);n+=Math.max(i.scrollLeft,o.scrollLeft)}return{top:k,left:n}};c.offset={initialize:function(){var a=s.body,b=s.createElement("div"),d,f,e,j=parseFloat(c.curCSS(a,"marginTop",true))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";
+a.insertBefore(b,a.firstChild);d=b.firstChild;f=d.firstChild;e=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=f.offsetTop!==5;this.doesAddBorderForTableAndCells=e.offsetTop===5;f.style.position="fixed";f.style.top="20px";this.supportsFixedPosition=f.offsetTop===20||f.offsetTop===15;f.style.position=f.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=f.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==j;a.removeChild(b);
+c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.curCSS(a,"marginTop",true))||0;d+=parseFloat(c.curCSS(a,"marginLeft",true))||0}return{top:b,left:d}},setOffset:function(a,b,d){if(/static/.test(c.curCSS(a,"position")))a.style.position="relative";var f=c(a),e=f.offset(),j=parseInt(c.curCSS(a,"top",true),10)||0,i=parseInt(c.curCSS(a,"left",true),10)||0;if(c.isFunction(b))b=b.call(a,
+d,e);d={top:b.top-e.top+j,left:b.left-e.left+i};"using"in b?b.using.call(a,d):f.css(d)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),f=/^body|html$/i.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.curCSS(a,"marginTop",true))||0;d.left-=parseFloat(c.curCSS(a,"marginLeft",true))||0;f.top+=parseFloat(c.curCSS(b[0],"borderTopWidth",true))||0;f.left+=parseFloat(c.curCSS(b[0],"borderLeftWidth",true))||0;return{top:d.top-
+f.top,left:d.left-f.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||s.body;a&&!/^body|html$/i.test(a.nodeName)&&c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(f){var e=this[0],j;if(!e)return null;if(f!==w)return this.each(function(){if(j=wa(this))j.scrollTo(!a?f:c(j).scrollLeft(),a?f:c(j).scrollTop());else this[d]=f});else return(j=wa(e))?"pageXOffset"in j?j[a?"pageYOffset":
+"pageXOffset"]:c.support.boxModel&&j.document.documentElement[d]||j.document.body[d]:e[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase();c.fn["inner"+b]=function(){return this[0]?c.css(this[0],d,false,"padding"):null};c.fn["outer"+b]=function(f){return this[0]?c.css(this[0],d,false,f?"margin":"border"):null};c.fn[d]=function(f){var e=this[0];if(!e)return f==null?null:this;if(c.isFunction(f))return this.each(function(j){var i=c(this);i[d](f.call(this,j,i[d]()))});return"scrollTo"in
+e&&e.document?e.document.compatMode==="CSS1Compat"&&e.document.documentElement["client"+b]||e.document.body["client"+b]:e.nodeType===9?Math.max(e.documentElement["client"+b],e.body["scroll"+b],e.documentElement["scroll"+b],e.body["offset"+b],e.documentElement["offset"+b]):f===w?c.css(e,d):this.css(d,typeof f==="string"?f:f+"px")}});A.jQuery=A.$=c})(window);
diff --git a/doc/html/_static/minus.png b/doc/html/_static/minus.png
new file mode 100644
index 0000000..da1c562
--- /dev/null
+++ b/doc/html/_static/minus.png
Binary files differ
diff --git a/doc/html/_static/navigation.png b/doc/html/_static/navigation.png
new file mode 100644
index 0000000..1081dc1
--- /dev/null
+++ b/doc/html/_static/navigation.png
Binary files differ
diff --git a/doc/html/_static/plus.png b/doc/html/_static/plus.png
new file mode 100644
index 0000000..b3cb374
--- /dev/null
+++ b/doc/html/_static/plus.png
Binary files differ
diff --git a/doc/html/_static/pygments.css b/doc/html/_static/pygments.css
new file mode 100644
index 0000000..c30fc0b
--- /dev/null
+++ b/doc/html/_static/pygments.css
@@ -0,0 +1,62 @@
+.highlight .hll { background-color: #ffffcc }
+.highlight { background: #ffffff; }
+.highlight .c { color: #808080 } /* Comment */
+.highlight .err { color: #F00000; background-color: #F0A0A0 } /* Error */
+.highlight .k { color: #008000; font-weight: bold } /* Keyword */
+.highlight .o { color: #303030 } /* Operator */
+.highlight .cm { color: #808080 } /* Comment.Multiline */
+.highlight .cp { color: #507090 } /* Comment.Preproc */
+.highlight .c1 { color: #808080 } /* Comment.Single */
+.highlight .cs { color: #cc0000; font-weight: bold } /* Comment.Special */
+.highlight .gd { color: #A00000 } /* Generic.Deleted */
+.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .gr { color: #FF0000 } /* Generic.Error */
+.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
+.highlight .gi { color: #00A000 } /* Generic.Inserted */
+.highlight .go { color: #808080 } /* Generic.Output */
+.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
+.highlight .gs { font-weight: bold } /* Generic.Strong */
+.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
+.highlight .gt { color: #0040D0 } /* Generic.Traceback */
+.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
+.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
+.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
+.highlight .kp { color: #003080; font-weight: bold } /* Keyword.Pseudo */
+.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
+.highlight .kt { color: #303090; font-weight: bold } /* Keyword.Type */
+.highlight .m { color: #6000E0; font-weight: bold } /* Literal.Number */
+.highlight .s { background-color: #fff0f0 } /* Literal.String */
+.highlight .na { color: #0000C0 } /* Name.Attribute */
+.highlight .nb { color: #007020 } /* Name.Builtin */
+.highlight .nc { color: #B00060; font-weight: bold } /* Name.Class */
+.highlight .no { color: #003060; font-weight: bold } /* Name.Constant */
+.highlight .nd { color: #505050; font-weight: bold } /* Name.Decorator */
+.highlight .ni { color: #800000; font-weight: bold } /* Name.Entity */
+.highlight .ne { color: #F00000; font-weight: bold } /* Name.Exception */
+.highlight .nf { color: #0060B0; font-weight: bold } /* Name.Function */
+.highlight .nl { color: #907000; font-weight: bold } /* Name.Label */
+.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
+.highlight .nt { color: #007000 } /* Name.Tag */
+.highlight .nv { color: #906030 } /* Name.Variable */
+.highlight .ow { color: #000000; font-weight: bold } /* Operator.Word */
+.highlight .w { color: #bbbbbb } /* Text.Whitespace */
+.highlight .mf { color: #6000E0; font-weight: bold } /* Literal.Number.Float */
+.highlight .mh { color: #005080; font-weight: bold } /* Literal.Number.Hex */
+.highlight .mi { color: #0000D0; font-weight: bold } /* Literal.Number.Integer */
+.highlight .mo { color: #4000E0; font-weight: bold } /* Literal.Number.Oct */
+.highlight .sb { background-color: #fff0f0 } /* Literal.String.Backtick */
+.highlight .sc { color: #0040D0 } /* Literal.String.Char */
+.highlight .sd { color: #D04020 } /* Literal.String.Doc */
+.highlight .s2 { background-color: #fff0f0 } /* Literal.String.Double */
+.highlight .se { color: #606060; font-weight: bold; background-color: #fff0f0 } /* Literal.String.Escape */
+.highlight .sh { background-color: #fff0f0 } /* Literal.String.Heredoc */
+.highlight .si { background-color: #e0e0e0 } /* Literal.String.Interpol */
+.highlight .sx { color: #D02000; background-color: #fff0f0 } /* Literal.String.Other */
+.highlight .sr { color: #000000; background-color: #fff0ff } /* Literal.String.Regex */
+.highlight .s1 { background-color: #fff0f0 } /* Literal.String.Single */
+.highlight .ss { color: #A06000 } /* Literal.String.Symbol */
+.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
+.highlight .vc { color: #306090 } /* Name.Variable.Class */
+.highlight .vg { color: #d07000; font-weight: bold } /* Name.Variable.Global */
+.highlight .vi { color: #3030B0 } /* Name.Variable.Instance */
+.highlight .il { color: #0000D0; font-weight: bold } /* Literal.Number.Integer.Long */ \ No newline at end of file
diff --git a/doc/html/_static/searchtools.js b/doc/html/_static/searchtools.js
new file mode 100644
index 0000000..f474eb4
--- /dev/null
+++ b/doc/html/_static/searchtools.js
@@ -0,0 +1,515 @@
+/*
+ * searchtools.js_t
+ * ~~~~~~~~~~~~~~~~
+ *
+ * Sphinx JavaScript utilties for the full-text search.
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+/**
+ * helper function to return a node containing the
+ * search summary for a given text. keywords is a list
+ * of stemmed words, hlwords is the list of normal, unstemmed
+ * words. the first one is used to find the occurance, the
+ * latter for highlighting it.
+ */
+
+jQuery.makeSearchSummary = function(text, keywords, hlwords) {
+ var textLower = text.toLowerCase();
+ var start = 0;
+ $.each(keywords, function() {
+ var i = textLower.indexOf(this.toLowerCase());
+ if (i > -1)
+ start = i;
+ });
+ start = Math.max(start - 120, 0);
+ var excerpt = ((start > 0) ? '...' : '') +
+ $.trim(text.substr(start, 240)) +
+ ((start + 240 - text.length) ? '...' : '');
+ var rv = $('<div class="context"></div>').text(excerpt);
+ $.each(hlwords, function() {
+ rv = rv.highlightText(this, 'highlighted');
+ });
+ return rv;
+}
+
+
+/**
+ * Porter Stemmer
+ */
+var Stemmer = function() {
+
+ var step2list = {
+ ational: 'ate',
+ tional: 'tion',
+ enci: 'ence',
+ anci: 'ance',
+ izer: 'ize',
+ bli: 'ble',
+ alli: 'al',
+ entli: 'ent',
+ eli: 'e',
+ ousli: 'ous',
+ ization: 'ize',
+ ation: 'ate',
+ ator: 'ate',
+ alism: 'al',
+ iveness: 'ive',
+ fulness: 'ful',
+ ousness: 'ous',
+ aliti: 'al',
+ iviti: 'ive',
+ biliti: 'ble',
+ logi: 'log'
+ };
+
+ var step3list = {
+ icate: 'ic',
+ ative: '',
+ alize: 'al',
+ iciti: 'ic',
+ ical: 'ic',
+ ful: '',
+ ness: ''
+ };
+
+ var c = "[^aeiou]"; // consonant
+ var v = "[aeiouy]"; // vowel
+ var C = c + "[^aeiouy]*"; // consonant sequence
+ var V = v + "[aeiou]*"; // vowel sequence
+
+ var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
+ var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
+ var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
+ var s_v = "^(" + C + ")?" + v; // vowel in stem
+
+ this.stemWord = function (w) {
+ var stem;
+ var suffix;
+ var firstch;
+ var origword = w;
+
+ if (w.length < 3)
+ return w;
+
+ var re;
+ var re2;
+ var re3;
+ var re4;
+
+ firstch = w.substr(0,1);
+ if (firstch == "y")
+ w = firstch.toUpperCase() + w.substr(1);
+
+ // Step 1a
+ re = /^(.+?)(ss|i)es$/;
+ re2 = /^(.+?)([^s])s$/;
+
+ if (re.test(w))
+ w = w.replace(re,"$1$2");
+ else if (re2.test(w))
+ w = w.replace(re2,"$1$2");
+
+ // Step 1b
+ re = /^(.+?)eed$/;
+ re2 = /^(.+?)(ed|ing)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ re = new RegExp(mgr0);
+ if (re.test(fp[1])) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1];
+ re2 = new RegExp(s_v);
+ if (re2.test(stem)) {
+ w = stem;
+ re2 = /(at|bl|iz)$/;
+ re3 = new RegExp("([^aeiouylsz])\\1$");
+ re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re2.test(w))
+ w = w + "e";
+ else if (re3.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ else if (re4.test(w))
+ w = w + "e";
+ }
+ }
+
+ // Step 1c
+ re = /^(.+?)y$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(s_v);
+ if (re.test(stem))
+ w = stem + "i";
+ }
+
+ // Step 2
+ re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step2list[suffix];
+ }
+
+ // Step 3
+ re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step3list[suffix];
+ }
+
+ // Step 4
+ re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+ re2 = /^(.+?)(s|t)(ion)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ if (re.test(stem))
+ w = stem;
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1] + fp[2];
+ re2 = new RegExp(mgr1);
+ if (re2.test(stem))
+ w = stem;
+ }
+
+ // Step 5
+ re = /^(.+?)e$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ re2 = new RegExp(meq1);
+ re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
+ w = stem;
+ }
+ re = /ll$/;
+ re2 = new RegExp(mgr1);
+ if (re.test(w) && re2.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+
+ // and turn initial Y back to y
+ if (firstch == "y")
+ w = firstch.toLowerCase() + w.substr(1);
+ return w;
+ }
+}
+
+
+/**
+ * Search Module
+ */
+var Search = {
+
+ _index : null,
+ _queued_query : null,
+ _pulse_status : -1,
+
+ init : function() {
+ var params = $.getQueryParameters();
+ if (params.q) {
+ var query = params.q[0];
+ $('input[name="q"]')[0].value = query;
+ this.performSearch(query);
+ }
+ },
+
+ loadIndex : function(url) {
+ $.ajax({type: "GET", url: url, data: null, success: null,
+ dataType: "script", cache: true});
+ },
+
+ setIndex : function(index) {
+ var q;
+ this._index = index;
+ if ((q = this._queued_query) !== null) {
+ this._queued_query = null;
+ Search.query(q);
+ }
+ },
+
+ hasIndex : function() {
+ return this._index !== null;
+ },
+
+ deferQuery : function(query) {
+ this._queued_query = query;
+ },
+
+ stopPulse : function() {
+ this._pulse_status = 0;
+ },
+
+ startPulse : function() {
+ if (this._pulse_status >= 0)
+ return;
+ function pulse() {
+ Search._pulse_status = (Search._pulse_status + 1) % 4;
+ var dotString = '';
+ for (var i = 0; i < Search._pulse_status; i++)
+ dotString += '.';
+ Search.dots.text(dotString);
+ if (Search._pulse_status > -1)
+ window.setTimeout(pulse, 500);
+ };
+ pulse();
+ },
+
+ /**
+ * perform a search for something
+ */
+ performSearch : function(query) {
+ // create the required interface elements
+ this.out = $('#search-results');
+ this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
+ this.dots = $('<span></span>').appendTo(this.title);
+ this.status = $('<p style="display: none"></p>').appendTo(this.out);
+ this.output = $('<ul class="search"/>').appendTo(this.out);
+
+ $('#search-progress').text(_('Preparing search...'));
+ this.startPulse();
+
+ // index already loaded, the browser was quick!
+ if (this.hasIndex())
+ this.query(query);
+ else
+ this.deferQuery(query);
+ },
+
+ query : function(query) {
+ var stopwords = ["and","then","into","it","as","are","in","if","for","no","there","their","was","is","be","to","that","but","they","not","such","with","by","a","on","these","of","will","this","near","the","or","at"];
+
+ // Stem the searchterms and add them to the correct list
+ var stemmer = new Stemmer();
+ var searchterms = [];
+ var excluded = [];
+ var hlterms = [];
+ var tmp = query.split(/\s+/);
+ var object = (tmp.length == 1) ? tmp[0].toLowerCase() : null;
+ for (var i = 0; i < tmp.length; i++) {
+ if ($u.indexOf(stopwords, tmp[i]) != -1 || tmp[i].match(/^\d+$/) ||
+ tmp[i] == "") {
+ // skip this "word"
+ continue;
+ }
+ // stem the word
+ var word = stemmer.stemWord(tmp[i]).toLowerCase();
+ // select the correct list
+ if (word[0] == '-') {
+ var toAppend = excluded;
+ word = word.substr(1);
+ }
+ else {
+ var toAppend = searchterms;
+ hlterms.push(tmp[i].toLowerCase());
+ }
+ // only add if not already in the list
+ if (!$.contains(toAppend, word))
+ toAppend.push(word);
+ };
+ var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
+
+ // console.debug('SEARCH: searching for:');
+ // console.info('required: ', searchterms);
+ // console.info('excluded: ', excluded);
+
+ // prepare search
+ var filenames = this._index.filenames;
+ var titles = this._index.titles;
+ var terms = this._index.terms;
+ var objects = this._index.objects;
+ var objtypes = this._index.objtypes;
+ var objnames = this._index.objnames;
+ var fileMap = {};
+ var files = null;
+ // different result priorities
+ var importantResults = [];
+ var objectResults = [];
+ var regularResults = [];
+ var unimportantResults = [];
+ $('#search-progress').empty();
+
+ // lookup as object
+ if (object != null) {
+ for (var prefix in objects) {
+ for (var name in objects[prefix]) {
+ var fullname = (prefix ? prefix + '.' : '') + name;
+ if (fullname.toLowerCase().indexOf(object) > -1) {
+ match = objects[prefix][name];
+ descr = objnames[match[1]] + _(', in ') + titles[match[0]];
+ // XXX the generated anchors are not generally correct
+ // XXX there may be custom prefixes
+ result = [filenames[match[0]], fullname, '#'+fullname, descr];
+ switch (match[2]) {
+ case 1: objectResults.push(result); break;
+ case 0: importantResults.push(result); break;
+ case 2: unimportantResults.push(result); break;
+ }
+ }
+ }
+ }
+ }
+
+ // sort results descending
+ objectResults.sort(function(a, b) {
+ return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
+ });
+
+ importantResults.sort(function(a, b) {
+ return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
+ });
+
+ unimportantResults.sort(function(a, b) {
+ return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
+ });
+
+
+ // perform the search on the required terms
+ for (var i = 0; i < searchterms.length; i++) {
+ var word = searchterms[i];
+ // no match but word was a required one
+ if ((files = terms[word]) == null)
+ break;
+ if (files.length == undefined) {
+ files = [files];
+ }
+ // create the mapping
+ for (var j = 0; j < files.length; j++) {
+ var file = files[j];
+ if (file in fileMap)
+ fileMap[file].push(word);
+ else
+ fileMap[file] = [word];
+ }
+ }
+
+ // now check if the files don't contain excluded terms
+ for (var file in fileMap) {
+ var valid = true;
+
+ // check if all requirements are matched
+ if (fileMap[file].length != searchterms.length)
+ continue;
+
+ // ensure that none of the excluded terms is in the
+ // search result.
+ for (var i = 0; i < excluded.length; i++) {
+ if (terms[excluded[i]] == file ||
+ $.contains(terms[excluded[i]] || [], file)) {
+ valid = false;
+ break;
+ }
+ }
+
+ // if we have still a valid result we can add it
+ // to the result list
+ if (valid)
+ regularResults.push([filenames[file], titles[file], '', null]);
+ }
+
+ // delete unused variables in order to not waste
+ // memory until list is retrieved completely
+ delete filenames, titles, terms;
+
+ // now sort the regular results descending by title
+ regularResults.sort(function(a, b) {
+ var left = a[1].toLowerCase();
+ var right = b[1].toLowerCase();
+ return (left > right) ? -1 : ((left < right) ? 1 : 0);
+ });
+
+ // combine all results
+ var results = unimportantResults.concat(regularResults)
+ .concat(objectResults).concat(importantResults);
+
+ // print the results
+ var resultCount = results.length;
+ function displayNextItem() {
+ // results left, load the summary and display it
+ if (results.length) {
+ var item = results.pop();
+ var listItem = $('<li style="display:none"></li>');
+ if (DOCUMENTATION_OPTIONS.FILE_SUFFIX == '') {
+ // dirhtml builder
+ var dirname = item[0] + '/';
+ if (dirname.match(/\/index\/$/)) {
+ dirname = dirname.substring(0, dirname.length-6);
+ } else if (dirname == 'index/') {
+ dirname = '';
+ }
+ listItem.append($('<a/>').attr('href',
+ DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
+ highlightstring + item[2]).html(item[1]));
+ } else {
+ // normal html builders
+ listItem.append($('<a/>').attr('href',
+ item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
+ highlightstring + item[2]).html(item[1]));
+ }
+ if (item[3]) {
+ listItem.append($('<span> (' + item[3] + ')</span>'));
+ Search.output.append(listItem);
+ listItem.slideDown(5, function() {
+ displayNextItem();
+ });
+ } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
+ $.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' +
+ item[0] + '.txt', function(data) {
+ if (data != '') {
+ listItem.append($.makeSearchSummary(data, searchterms, hlterms));
+ Search.output.append(listItem);
+ }
+ listItem.slideDown(5, function() {
+ displayNextItem();
+ });
+ });
+ } else {
+ // no source available, just display title
+ Search.output.append(listItem);
+ listItem.slideDown(5, function() {
+ displayNextItem();
+ });
+ }
+ }
+ // search finished, update title and status message
+ else {
+ Search.stopPulse();
+ Search.title.text(_('Search Results'));
+ if (!resultCount)
+ Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
+ else
+ Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
+ Search.status.fadeIn(500);
+ }
+ }
+ displayNextItem();
+ }
+}
+
+$(document).ready(function() {
+ Search.init();
+}); \ No newline at end of file
diff --git a/doc/html/_static/sphinxdoc.css b/doc/html/_static/sphinxdoc.css
new file mode 100644
index 0000000..38ca95a
--- /dev/null
+++ b/doc/html/_static/sphinxdoc.css
@@ -0,0 +1,340 @@
+/**
+ * Sphinx stylesheet -- sphinxdoc theme
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ letter-spacing: -0.01em;
+ line-height: 150%;
+ text-align: center;
+ background-color: #BFD1D4;
+ color: black;
+ padding: 0;
+ border: 1px solid #aaa;
+
+ margin: 0px 80px 0px 80px;
+ min-width: 740px;
+}
+
+div.document {
+ background-color: white;
+ text-align: left;
+ background-image: url(contents.png);
+ background-repeat: repeat-x;
+}
+
+div.bodywrapper {
+ margin: 0 240px 0 0;
+ border-right: 1px solid #ccc;
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+ font-size: 1em;
+}
+
+div.related ul {
+ background-image: url(navigation.png);
+ height: 2em;
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+}
+
+div.related ul li {
+ margin: 0;
+ padding: 0;
+ height: 2em;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: #EE9816;
+}
+
+div.related ul li a:hover {
+ color: #3CA8E7;
+}
+
+div.sphinxsidebarwrapper {
+ padding: 0;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0.5em 15px 15px 0;
+ width: 210px;
+ float: right;
+ font-size: 1em;
+ text-align: left;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin: 1em 0 0.5em 0;
+ font-size: 1em;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border: 1px solid #86989B;
+ background-color: #AFC1C4;
+}
+
+div.sphinxsidebar h3 a {
+ color: white;
+}
+
+div.sphinxsidebar ul {
+ padding-left: 1.5em;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+}
+
+div.footer {
+ background-color: #E3EFF1;
+ color: #86989B;
+ padding: 3px 8px 3px 0;
+ clear: both;
+ font-size: 0.8em;
+ text-align: right;
+}
+
+div.footer a {
+ color: #86989B;
+ text-decoration: underline;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: #CA7900;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #2491CF;
+}
+
+/* div.body a {
+ text-decoration: underline;
+} */
+
+h1 {
+ margin: 0;
+ padding: 0.7em 0 0.3em 0;
+ font-size: 1.5em;
+ color: #11557C;
+}
+
+h2 {
+ margin: 1.3em 0 0.2em 0;
+ font-size: 1.35em;
+ padding: 0;
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.1em;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+ display: none;
+ margin: 0 0 0 0.3em;
+ padding: 0 0.2em 0 0.2em;
+ color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+ display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+ color: #777;
+ background-color: #eee;
+}
+
+a.headerlink {
+ color: #c60f0f!important;
+ font-size: 1em;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none!important;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'Deja Vu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.01em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border-bottom: 1px solid #ddd;
+ color: #333;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+a tt {
+ border: 0;
+ color: #CA7900;
+}
+
+a tt:hover {
+ color: #2491CF;
+}
+
+pre {
+ font-family: 'Deja Vu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 2px 7px;
+ border: 1px solid #ccc;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ background-color: #f7f7f7;
+ padding: 0;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin: 0;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border-bottom: 1px solid #86989B;
+ font-weight: bold;
+ background-color: #AFC1C4;
+}
+
+div.warning {
+ border: 1px solid #940000;
+}
+
+div.warning p.admonition-title {
+ background-color: #CF0000;
+ border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+div.versioninfo {
+ margin: 1em 0 0 0;
+ border: 1px solid #ccc;
+ background-color: #DDEAF0;
+ padding: 8px;
+ line-height: 1.3em;
+ font-size: 0.9em;
+}
+
+/* Niko's Styles */
+
+div.body li p {
+ margin-bottom: 0.8em;
+ margin-top: 0.8em;
+}
+
+table.option-list td,
+table.option-list th {
+ border: 0px;
+}
+
+strong.program {
+ font-weight: normal;
+ font-style: italic;
+}
diff --git a/doc/html/_static/underscore.js b/doc/html/_static/underscore.js
new file mode 100644
index 0000000..9146e08
--- /dev/null
+++ b/doc/html/_static/underscore.js
@@ -0,0 +1,16 @@
+(function(){var j=this,n=j._,i=function(a){this._wrapped=a},m=typeof StopIteration!=="undefined"?StopIteration:"__break__",b=j._=function(a){return new i(a)};if(typeof exports!=="undefined")exports._=b;var k=Array.prototype.slice,o=Array.prototype.unshift,p=Object.prototype.toString,q=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;b.VERSION="0.5.5";b.each=function(a,c,d){try{if(a.forEach)a.forEach(c,d);else if(b.isArray(a)||b.isArguments(a))for(var e=0,f=a.length;e<f;e++)c.call(d,
+a[e],e,a);else{var g=b.keys(a);f=g.length;for(e=0;e<f;e++)c.call(d,a[g[e]],g[e],a)}}catch(h){if(h!=m)throw h;}return a};b.map=function(a,c,d){if(a&&b.isFunction(a.map))return a.map(c,d);var e=[];b.each(a,function(f,g,h){e.push(c.call(d,f,g,h))});return e};b.reduce=function(a,c,d,e){if(a&&b.isFunction(a.reduce))return a.reduce(b.bind(d,e),c);b.each(a,function(f,g,h){c=d.call(e,c,f,g,h)});return c};b.reduceRight=function(a,c,d,e){if(a&&b.isFunction(a.reduceRight))return a.reduceRight(b.bind(d,e),c);
+var f=b.clone(b.toArray(a)).reverse();b.each(f,function(g,h){c=d.call(e,c,g,h,a)});return c};b.detect=function(a,c,d){var e;b.each(a,function(f,g,h){if(c.call(d,f,g,h)){e=f;b.breakLoop()}});return e};b.select=function(a,c,d){if(a&&b.isFunction(a.filter))return a.filter(c,d);var e=[];b.each(a,function(f,g,h){c.call(d,f,g,h)&&e.push(f)});return e};b.reject=function(a,c,d){var e=[];b.each(a,function(f,g,h){!c.call(d,f,g,h)&&e.push(f)});return e};b.all=function(a,c,d){c=c||b.identity;if(a&&b.isFunction(a.every))return a.every(c,
+d);var e=true;b.each(a,function(f,g,h){(e=e&&c.call(d,f,g,h))||b.breakLoop()});return e};b.any=function(a,c,d){c=c||b.identity;if(a&&b.isFunction(a.some))return a.some(c,d);var e=false;b.each(a,function(f,g,h){if(e=c.call(d,f,g,h))b.breakLoop()});return e};b.include=function(a,c){if(b.isArray(a))return b.indexOf(a,c)!=-1;var d=false;b.each(a,function(e){if(d=e===c)b.breakLoop()});return d};b.invoke=function(a,c){var d=b.rest(arguments,2);return b.map(a,function(e){return(c?e[c]:e).apply(e,d)})};b.pluck=
+function(a,c){return b.map(a,function(d){return d[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);var e={computed:-Infinity};b.each(a,function(f,g,h){g=c?c.call(d,f,g,h):f;g>=e.computed&&(e={value:f,computed:g})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);var e={computed:Infinity};b.each(a,function(f,g,h){g=c?c.call(d,f,g,h):f;g<e.computed&&(e={value:f,computed:g})});return e.value};b.sortBy=function(a,c,d){return b.pluck(b.map(a,
+function(e,f,g){return{value:e,criteria:c.call(d,e,f,g)}}).sort(function(e,f){e=e.criteria;f=f.criteria;return e<f?-1:e>f?1:0}),"value")};b.sortedIndex=function(a,c,d){d=d||b.identity;for(var e=0,f=a.length;e<f;){var g=e+f>>1;d(a[g])<d(c)?(e=g+1):(f=g)}return e};b.toArray=function(a){if(!a)return[];if(a.toArray)return a.toArray();if(b.isArray(a))return a;if(b.isArguments(a))return k.call(a);return b.values(a)};b.size=function(a){return b.toArray(a).length};b.first=function(a,c,d){return c&&!d?k.call(a,
+0,c):a[0]};b.rest=function(a,c,d){return k.call(a,b.isUndefined(c)||d?1:c)};b.last=function(a){return a[a.length-1]};b.compact=function(a){return b.select(a,function(c){return!!c})};b.flatten=function(a){return b.reduce(a,[],function(c,d){if(b.isArray(d))return c.concat(b.flatten(d));c.push(d);return c})};b.without=function(a){var c=b.rest(arguments);return b.select(a,function(d){return!b.include(c,d)})};b.uniq=function(a,c){return b.reduce(a,[],function(d,e,f){if(0==f||(c===true?b.last(d)!=e:!b.include(d,
+e)))d.push(e);return d})};b.intersect=function(a){var c=b.rest(arguments);return b.select(b.uniq(a),function(d){return b.all(c,function(e){return b.indexOf(e,d)>=0})})};b.zip=function(){for(var a=b.toArray(arguments),c=b.max(b.pluck(a,"length")),d=new Array(c),e=0;e<c;e++)d[e]=b.pluck(a,String(e));return d};b.indexOf=function(a,c){if(a.indexOf)return a.indexOf(c);for(var d=0,e=a.length;d<e;d++)if(a[d]===c)return d;return-1};b.lastIndexOf=function(a,c){if(a.lastIndexOf)return a.lastIndexOf(c);for(var d=
+a.length;d--;)if(a[d]===c)return d;return-1};b.range=function(a,c,d){var e=b.toArray(arguments),f=e.length<=1;a=f?0:e[0];c=f?e[0]:e[1];d=e[2]||1;e=Math.ceil((c-a)/d);if(e<=0)return[];e=new Array(e);f=a;for(var g=0;1;f+=d){if((d>0?f-c:c-f)>=0)return e;e[g++]=f}};b.bind=function(a,c){var d=b.rest(arguments,2);return function(){return a.apply(c||j,d.concat(b.toArray(arguments)))}};b.bindAll=function(a){var c=b.rest(arguments);if(c.length==0)c=b.functions(a);b.each(c,function(d){a[d]=b.bind(a[d],a)});
+return a};b.delay=function(a,c){var d=b.rest(arguments,2);return setTimeout(function(){return a.apply(a,d)},c)};b.defer=function(a){return b.delay.apply(b,[a,1].concat(b.rest(arguments)))};b.wrap=function(a,c){return function(){var d=[a].concat(b.toArray(arguments));return c.apply(c,d)}};b.compose=function(){var a=b.toArray(arguments);return function(){for(var c=b.toArray(arguments),d=a.length-1;d>=0;d--)c=[a[d].apply(this,c)];return c[0]}};b.keys=function(a){if(b.isArray(a))return b.range(0,a.length);
+var c=[];for(var d in a)q.call(a,d)&&c.push(d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=function(a){return b.select(b.keys(a),function(c){return b.isFunction(a[c])}).sort()};b.extend=function(a,c){for(var d in c)a[d]=c[d];return a};b.clone=function(a){if(b.isArray(a))return a.slice(0);return b.extend({},a)};b.tap=function(a,c){c(a);return a};b.isEqual=function(a,c){if(a===c)return true;var d=typeof a;if(d!=typeof c)return false;if(a==c)return true;if(!a&&c||a&&!c)return false;
+if(a.isEqual)return a.isEqual(c);if(b.isDate(a)&&b.isDate(c))return a.getTime()===c.getTime();if(b.isNaN(a)&&b.isNaN(c))return true;if(b.isRegExp(a)&&b.isRegExp(c))return a.source===c.source&&a.global===c.global&&a.ignoreCase===c.ignoreCase&&a.multiline===c.multiline;if(d!=="object")return false;if(a.length&&a.length!==c.length)return false;d=b.keys(a);var e=b.keys(c);if(d.length!=e.length)return false;for(var f in a)if(!b.isEqual(a[f],c[f]))return false;return true};b.isEmpty=function(a){return b.keys(a).length==
+0};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=function(a){return!!(a&&a.concat&&a.unshift)};b.isArguments=function(a){return a&&b.isNumber(a.length)&&!b.isArray(a)&&!r.call(a,"length")};b.isFunction=function(a){return!!(a&&a.constructor&&a.call&&a.apply)};b.isString=function(a){return!!(a===""||a&&a.charCodeAt&&a.substr)};b.isNumber=function(a){return p.call(a)==="[object Number]"};b.isDate=function(a){return!!(a&&a.getTimezoneOffset&&a.setUTCFullYear)};b.isRegExp=function(a){return!!(a&&
+a.test&&a.exec&&(a.ignoreCase||a.ignoreCase===false))};b.isNaN=function(a){return b.isNumber(a)&&isNaN(a)};b.isNull=function(a){return a===null};b.isUndefined=function(a){return typeof a=="undefined"};b.noConflict=function(){j._=n;return this};b.identity=function(a){return a};b.breakLoop=function(){throw m;};var s=0;b.uniqueId=function(a){var c=s++;return a?a+c:c};b.template=function(a,c){a=new Function("obj","var p=[],print=function(){p.push.apply(p,arguments);};with(obj){p.push('"+a.replace(/[\r\t\n]/g,
+" ").replace(/'(?=[^%]*%>)/g,"\t").split("'").join("\\'").split("\t").join("'").replace(/<%=(.+?)%>/g,"',$1,'").split("<%").join("');").split("%>").join("p.push('")+"');}return p.join('');");return c?a(c):a};b.forEach=b.each;b.foldl=b.inject=b.reduce;b.foldr=b.reduceRight;b.filter=b.select;b.every=b.all;b.some=b.any;b.head=b.first;b.tail=b.rest;b.methods=b.functions;var l=function(a,c){return c?b(a).chain():a};b.each(b.functions(b),function(a){var c=b[a];i.prototype[a]=function(){var d=b.toArray(arguments);
+o.call(d,this._wrapped);return l(c.apply(b,d),this._chain)}});b.each(["pop","push","reverse","shift","sort","splice","unshift"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){c.apply(this._wrapped,arguments);return l(this._wrapped,this._chain)}});b.each(["concat","join","slice"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){return l(c.apply(this._wrapped,arguments),this._chain)}});i.prototype.chain=function(){this._chain=true;return this};i.prototype.value=function(){return this._wrapped}})();
diff --git a/doc/html/_static/up-pressed.png b/doc/html/_static/up-pressed.png
new file mode 100644
index 0000000..8bd587a
--- /dev/null
+++ b/doc/html/_static/up-pressed.png
Binary files differ
diff --git a/doc/html/_static/up.png b/doc/html/_static/up.png
new file mode 100644
index 0000000..b946256
--- /dev/null
+++ b/doc/html/_static/up.png
Binary files differ
diff --git a/doc/html/_static/websupport.js b/doc/html/_static/websupport.js
new file mode 100644
index 0000000..cbb6092
--- /dev/null
+++ b/doc/html/_static/websupport.js
@@ -0,0 +1,808 @@
+/*
+ * websupport.js
+ * ~~~~~~~~~~~~~
+ *
+ * sphinx.websupport utilties for all documentation.
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+(function($) {
+ $.fn.autogrow = function() {
+ return this.each(function() {
+ var textarea = this;
+
+ $.fn.autogrow.resize(textarea);
+
+ $(textarea)
+ .focus(function() {
+ textarea.interval = setInterval(function() {
+ $.fn.autogrow.resize(textarea);
+ }, 500);
+ })
+ .blur(function() {
+ clearInterval(textarea.interval);
+ });
+ });
+ };
+
+ $.fn.autogrow.resize = function(textarea) {
+ var lineHeight = parseInt($(textarea).css('line-height'), 10);
+ var lines = textarea.value.split('\n');
+ var columns = textarea.cols;
+ var lineCount = 0;
+ $.each(lines, function() {
+ lineCount += Math.ceil(this.length / columns) || 1;
+ });
+ var height = lineHeight * (lineCount + 1);
+ $(textarea).css('height', height);
+ };
+})(jQuery);
+
+(function($) {
+ var comp, by;
+
+ function init() {
+ initEvents();
+ initComparator();
+ }
+
+ function initEvents() {
+ $('a.comment-close').live("click", function(event) {
+ event.preventDefault();
+ hide($(this).attr('id').substring(2));
+ });
+ $('a.vote').live("click", function(event) {
+ event.preventDefault();
+ handleVote($(this));
+ });
+ $('a.reply').live("click", function(event) {
+ event.preventDefault();
+ openReply($(this).attr('id').substring(2));
+ });
+ $('a.close-reply').live("click", function(event) {
+ event.preventDefault();
+ closeReply($(this).attr('id').substring(2));
+ });
+ $('a.sort-option').live("click", function(event) {
+ event.preventDefault();
+ handleReSort($(this));
+ });
+ $('a.show-proposal').live("click", function(event) {
+ event.preventDefault();
+ showProposal($(this).attr('id').substring(2));
+ });
+ $('a.hide-proposal').live("click", function(event) {
+ event.preventDefault();
+ hideProposal($(this).attr('id').substring(2));
+ });
+ $('a.show-propose-change').live("click", function(event) {
+ event.preventDefault();
+ showProposeChange($(this).attr('id').substring(2));
+ });
+ $('a.hide-propose-change').live("click", function(event) {
+ event.preventDefault();
+ hideProposeChange($(this).attr('id').substring(2));
+ });
+ $('a.accept-comment').live("click", function(event) {
+ event.preventDefault();
+ acceptComment($(this).attr('id').substring(2));
+ });
+ $('a.delete-comment').live("click", function(event) {
+ event.preventDefault();
+ deleteComment($(this).attr('id').substring(2));
+ });
+ $('a.comment-markup').live("click", function(event) {
+ event.preventDefault();
+ toggleCommentMarkupBox($(this).attr('id').substring(2));
+ });
+ }
+
+ /**
+ * Set comp, which is a comparator function used for sorting and
+ * inserting comments into the list.
+ */
+ function setComparator() {
+ // If the first three letters are "asc", sort in ascending order
+ // and remove the prefix.
+ if (by.substring(0,3) == 'asc') {
+ var i = by.substring(3);
+ comp = function(a, b) { return a[i] - b[i]; };
+ } else {
+ // Otherwise sort in descending order.
+ comp = function(a, b) { return b[by] - a[by]; };
+ }
+
+ // Reset link styles and format the selected sort option.
+ $('a.sel').attr('href', '#').removeClass('sel');
+ $('a.by' + by).removeAttr('href').addClass('sel');
+ }
+
+ /**
+ * Create a comp function. If the user has preferences stored in
+ * the sortBy cookie, use those, otherwise use the default.
+ */
+ function initComparator() {
+ by = 'rating'; // Default to sort by rating.
+ // If the sortBy cookie is set, use that instead.
+ if (document.cookie.length > 0) {
+ var start = document.cookie.indexOf('sortBy=');
+ if (start != -1) {
+ start = start + 7;
+ var end = document.cookie.indexOf(";", start);
+ if (end == -1) {
+ end = document.cookie.length;
+ by = unescape(document.cookie.substring(start, end));
+ }
+ }
+ }
+ setComparator();
+ }
+
+ /**
+ * Show a comment div.
+ */
+ function show(id) {
+ $('#ao' + id).hide();
+ $('#ah' + id).show();
+ var context = $.extend({id: id}, opts);
+ var popup = $(renderTemplate(popupTemplate, context)).hide();
+ popup.find('textarea[name="proposal"]').hide();
+ popup.find('a.by' + by).addClass('sel');
+ var form = popup.find('#cf' + id);
+ form.submit(function(event) {
+ event.preventDefault();
+ addComment(form);
+ });
+ $('#s' + id).after(popup);
+ popup.slideDown('fast', function() {
+ getComments(id);
+ });
+ }
+
+ /**
+ * Hide a comment div.
+ */
+ function hide(id) {
+ $('#ah' + id).hide();
+ $('#ao' + id).show();
+ var div = $('#sc' + id);
+ div.slideUp('fast', function() {
+ div.remove();
+ });
+ }
+
+ /**
+ * Perform an ajax request to get comments for a node
+ * and insert the comments into the comments tree.
+ */
+ function getComments(id) {
+ $.ajax({
+ type: 'GET',
+ url: opts.getCommentsURL,
+ data: {node: id},
+ success: function(data, textStatus, request) {
+ var ul = $('#cl' + id);
+ var speed = 100;
+ $('#cf' + id)
+ .find('textarea[name="proposal"]')
+ .data('source', data.source);
+
+ if (data.comments.length === 0) {
+ ul.html('<li>No comments yet.</li>');
+ ul.data('empty', true);
+ } else {
+ // If there are comments, sort them and put them in the list.
+ var comments = sortComments(data.comments);
+ speed = data.comments.length * 100;
+ appendComments(comments, ul);
+ ul.data('empty', false);
+ }
+ $('#cn' + id).slideUp(speed + 200);
+ ul.slideDown(speed);
+ },
+ error: function(request, textStatus, error) {
+ showError('Oops, there was a problem retrieving the comments.');
+ },
+ dataType: 'json'
+ });
+ }
+
+ /**
+ * Add a comment via ajax and insert the comment into the comment tree.
+ */
+ function addComment(form) {
+ var node_id = form.find('input[name="node"]').val();
+ var parent_id = form.find('input[name="parent"]').val();
+ var text = form.find('textarea[name="comment"]').val();
+ var proposal = form.find('textarea[name="proposal"]').val();
+
+ if (text == '') {
+ showError('Please enter a comment.');
+ return;
+ }
+
+ // Disable the form that is being submitted.
+ form.find('textarea,input').attr('disabled', 'disabled');
+
+ // Send the comment to the server.
+ $.ajax({
+ type: "POST",
+ url: opts.addCommentURL,
+ dataType: 'json',
+ data: {
+ node: node_id,
+ parent: parent_id,
+ text: text,
+ proposal: proposal
+ },
+ success: function(data, textStatus, error) {
+ // Reset the form.
+ if (node_id) {
+ hideProposeChange(node_id);
+ }
+ form.find('textarea')
+ .val('')
+ .add(form.find('input'))
+ .removeAttr('disabled');
+ var ul = $('#cl' + (node_id || parent_id));
+ if (ul.data('empty')) {
+ $(ul).empty();
+ ul.data('empty', false);
+ }
+ insertComment(data.comment);
+ var ao = $('#ao' + node_id);
+ ao.find('img').attr({'src': opts.commentBrightImage});
+ if (node_id) {
+ // if this was a "root" comment, remove the commenting box
+ // (the user can get it back by reopening the comment popup)
+ $('#ca' + node_id).slideUp();
+ }
+ },
+ error: function(request, textStatus, error) {
+ form.find('textarea,input').removeAttr('disabled');
+ showError('Oops, there was a problem adding the comment.');
+ }
+ });
+ }
+
+ /**
+ * Recursively append comments to the main comment list and children
+ * lists, creating the comment tree.
+ */
+ function appendComments(comments, ul) {
+ $.each(comments, function() {
+ var div = createCommentDiv(this);
+ ul.append($(document.createElement('li')).html(div));
+ appendComments(this.children, div.find('ul.comment-children'));
+ // To avoid stagnating data, don't store the comments children in data.
+ this.children = null;
+ div.data('comment', this);
+ });
+ }
+
+ /**
+ * After adding a new comment, it must be inserted in the correct
+ * location in the comment tree.
+ */
+ function insertComment(comment) {
+ var div = createCommentDiv(comment);
+
+ // To avoid stagnating data, don't store the comments children in data.
+ comment.children = null;
+ div.data('comment', comment);
+
+ var ul = $('#cl' + (comment.node || comment.parent));
+ var siblings = getChildren(ul);
+
+ var li = $(document.createElement('li'));
+ li.hide();
+
+ // Determine where in the parents children list to insert this comment.
+ for(i=0; i < siblings.length; i++) {
+ if (comp(comment, siblings[i]) <= 0) {
+ $('#cd' + siblings[i].id)
+ .parent()
+ .before(li.html(div));
+ li.slideDown('fast');
+ return;
+ }
+ }
+
+ // If we get here, this comment rates lower than all the others,
+ // or it is the only comment in the list.
+ ul.append(li.html(div));
+ li.slideDown('fast');
+ }
+
+ function acceptComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.acceptCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ $('#cm' + id).fadeOut('fast');
+ $('#cd' + id).removeClass('moderate');
+ },
+ error: function(request, textStatus, error) {
+ showError('Oops, there was a problem accepting the comment.');
+ }
+ });
+ }
+
+ function deleteComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.deleteCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ var div = $('#cd' + id);
+ if (data == 'delete') {
+ // Moderator mode: remove the comment and all children immediately
+ div.slideUp('fast', function() {
+ div.remove();
+ });
+ return;
+ }
+ // User mode: only mark the comment as deleted
+ div
+ .find('span.user-id:first')
+ .text('[deleted]').end()
+ .find('div.comment-text:first')
+ .text('[deleted]').end()
+ .find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
+ ', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
+ .remove();
+ var comment = div.data('comment');
+ comment.username = '[deleted]';
+ comment.text = '[deleted]';
+ div.data('comment', comment);
+ },
+ error: function(request, textStatus, error) {
+ showError('Oops, there was a problem deleting the comment.');
+ }
+ });
+ }
+
+ function showProposal(id) {
+ $('#sp' + id).hide();
+ $('#hp' + id).show();
+ $('#pr' + id).slideDown('fast');
+ }
+
+ function hideProposal(id) {
+ $('#hp' + id).hide();
+ $('#sp' + id).show();
+ $('#pr' + id).slideUp('fast');
+ }
+
+ function showProposeChange(id) {
+ $('#pc' + id).hide();
+ $('#hc' + id).show();
+ var textarea = $('#pt' + id);
+ textarea.val(textarea.data('source'));
+ $.fn.autogrow.resize(textarea[0]);
+ textarea.slideDown('fast');
+ }
+
+ function hideProposeChange(id) {
+ $('#hc' + id).hide();
+ $('#pc' + id).show();
+ var textarea = $('#pt' + id);
+ textarea.val('').removeAttr('disabled');
+ textarea.slideUp('fast');
+ }
+
+ function toggleCommentMarkupBox(id) {
+ $('#mb' + id).toggle();
+ }
+
+ /** Handle when the user clicks on a sort by link. */
+ function handleReSort(link) {
+ var classes = link.attr('class').split(/\s+/);
+ for (var i=0; i<classes.length; i++) {
+ if (classes[i] != 'sort-option') {
+ by = classes[i].substring(2);
+ }
+ }
+ setComparator();
+ // Save/update the sortBy cookie.
+ var expiration = new Date();
+ expiration.setDate(expiration.getDate() + 365);
+ document.cookie= 'sortBy=' + escape(by) +
+ ';expires=' + expiration.toUTCString();
+ $('ul.comment-ul').each(function(index, ul) {
+ var comments = getChildren($(ul), true);
+ comments = sortComments(comments);
+ appendComments(comments, $(ul).empty());
+ });
+ }
+
+ /**
+ * Function to process a vote when a user clicks an arrow.
+ */
+ function handleVote(link) {
+ if (!opts.voting) {
+ showError("You'll need to login to vote.");
+ return;
+ }
+
+ var id = link.attr('id');
+ if (!id) {
+ // Didn't click on one of the voting arrows.
+ return;
+ }
+ // If it is an unvote, the new vote value is 0,
+ // Otherwise it's 1 for an upvote, or -1 for a downvote.
+ var value = 0;
+ if (id.charAt(1) != 'u') {
+ value = id.charAt(0) == 'u' ? 1 : -1;
+ }
+ // The data to be sent to the server.
+ var d = {
+ comment_id: id.substring(2),
+ value: value
+ };
+
+ // Swap the vote and unvote links.
+ link.hide();
+ $('#' + id.charAt(0) + (id.charAt(1) == 'u' ? 'v' : 'u') + d.comment_id)
+ .show();
+
+ // The div the comment is displayed in.
+ var div = $('div#cd' + d.comment_id);
+ var data = div.data('comment');
+
+ // If this is not an unvote, and the other vote arrow has
+ // already been pressed, unpress it.
+ if ((d.value !== 0) && (data.vote === d.value * -1)) {
+ $('#' + (d.value == 1 ? 'd' : 'u') + 'u' + d.comment_id).hide();
+ $('#' + (d.value == 1 ? 'd' : 'u') + 'v' + d.comment_id).show();
+ }
+
+ // Update the comments rating in the local data.
+ data.rating += (data.vote === 0) ? d.value : (d.value - data.vote);
+ data.vote = d.value;
+ div.data('comment', data);
+
+ // Change the rating text.
+ div.find('.rating:first')
+ .text(data.rating + ' point' + (data.rating == 1 ? '' : 's'));
+
+ // Send the vote information to the server.
+ $.ajax({
+ type: "POST",
+ url: opts.processVoteURL,
+ data: d,
+ error: function(request, textStatus, error) {
+ showError('Oops, there was a problem casting that vote.');
+ }
+ });
+ }
+
+ /**
+ * Open a reply form used to reply to an existing comment.
+ */
+ function openReply(id) {
+ // Swap out the reply link for the hide link
+ $('#rl' + id).hide();
+ $('#cr' + id).show();
+
+ // Add the reply li to the children ul.
+ var div = $(renderTemplate(replyTemplate, {id: id})).hide();
+ $('#cl' + id)
+ .prepend(div)
+ // Setup the submit handler for the reply form.
+ .find('#rf' + id)
+ .submit(function(event) {
+ event.preventDefault();
+ addComment($('#rf' + id));
+ closeReply(id);
+ })
+ .find('input[type=button]')
+ .click(function() {
+ closeReply(id);
+ });
+ div.slideDown('fast', function() {
+ $('#rf' + id).find('textarea').focus();
+ });
+ }
+
+ /**
+ * Close the reply form opened with openReply.
+ */
+ function closeReply(id) {
+ // Remove the reply div from the DOM.
+ $('#rd' + id).slideUp('fast', function() {
+ $(this).remove();
+ });
+
+ // Swap out the hide link for the reply link
+ $('#cr' + id).hide();
+ $('#rl' + id).show();
+ }
+
+ /**
+ * Recursively sort a tree of comments using the comp comparator.
+ */
+ function sortComments(comments) {
+ comments.sort(comp);
+ $.each(comments, function() {
+ this.children = sortComments(this.children);
+ });
+ return comments;
+ }
+
+ /**
+ * Get the children comments from a ul. If recursive is true,
+ * recursively include childrens' children.
+ */
+ function getChildren(ul, recursive) {
+ var children = [];
+ ul.children().children("[id^='cd']")
+ .each(function() {
+ var comment = $(this).data('comment');
+ if (recursive)
+ comment.children = getChildren($(this).find('#cl' + comment.id), true);
+ children.push(comment);
+ });
+ return children;
+ }
+
+ /** Create a div to display a comment in. */
+ function createCommentDiv(comment) {
+ if (!comment.displayed && !opts.moderator) {
+ return $('<div class="moderate">Thank you! Your comment will show up '
+ + 'once it is has been approved by a moderator.</div>');
+ }
+ // Prettify the comment rating.
+ comment.pretty_rating = comment.rating + ' point' +
+ (comment.rating == 1 ? '' : 's');
+ // Make a class (for displaying not yet moderated comments differently)
+ comment.css_class = comment.displayed ? '' : ' moderate';
+ // Create a div for this comment.
+ var context = $.extend({}, opts, comment);
+ var div = $(renderTemplate(commentTemplate, context));
+
+ // If the user has voted on this comment, highblight the correct arrow.
+ if (comment.vote) {
+ var direction = (comment.vote == 1) ? 'u' : 'd';
+ div.find('#' + direction + 'v' + comment.id).hide();
+ div.find('#' + direction + 'u' + comment.id).show();
+ }
+
+ if (opts.moderator || comment.text != '[deleted]') {
+ div.find('a.reply').show();
+ if (comment.proposal_diff)
+ div.find('#sp' + comment.id).show();
+ if (opts.moderator && !comment.displayed)
+ div.find('#cm' + comment.id).show();
+ if (opts.moderator || (opts.username == comment.username))
+ div.find('#dc' + comment.id).show();
+ }
+ return div;
+ }
+
+ /**
+ * A simple template renderer. Placeholders such as <%id%> are replaced
+ * by context['id'] with items being escaped. Placeholders such as <#id#>
+ * are not escaped.
+ */
+ function renderTemplate(template, context) {
+ var esc = $(document.createElement('div'));
+
+ function handle(ph, escape) {
+ var cur = context;
+ $.each(ph.split('.'), function() {
+ cur = cur[this];
+ });
+ return escape ? esc.text(cur || "").html() : cur;
+ }
+
+ return template.replace(/<([%#])([\w\.]*)\1>/g, function() {
+ return handle(arguments[2], arguments[1] == '%' ? true : false);
+ });
+ }
+
+ /** Flash an error message briefly. */
+ function showError(message) {
+ $(document.createElement('div')).attr({'class': 'popup-error'})
+ .append($(document.createElement('div'))
+ .attr({'class': 'error-message'}).text(message))
+ .appendTo('body')
+ .fadeIn("slow")
+ .delay(2000)
+ .fadeOut("slow");
+ }
+
+ /** Add a link the user uses to open the comments popup. */
+ $.fn.comment = function() {
+ return this.each(function() {
+ var id = $(this).attr('id').substring(1);
+ var count = COMMENT_METADATA[id];
+ var title = count + ' comment' + (count == 1 ? '' : 's');
+ var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
+ var addcls = count == 0 ? ' nocomment' : '';
+ $(this)
+ .append(
+ $(document.createElement('a')).attr({
+ href: '#',
+ 'class': 'sphinx-comment-open' + addcls,
+ id: 'ao' + id
+ })
+ .append($(document.createElement('img')).attr({
+ src: image,
+ alt: 'comment',
+ title: title
+ }))
+ .click(function(event) {
+ event.preventDefault();
+ show($(this).attr('id').substring(2));
+ })
+ )
+ .append(
+ $(document.createElement('a')).attr({
+ href: '#',
+ 'class': 'sphinx-comment-close hidden',
+ id: 'ah' + id
+ })
+ .append($(document.createElement('img')).attr({
+ src: opts.closeCommentImage,
+ alt: 'close',
+ title: 'close'
+ }))
+ .click(function(event) {
+ event.preventDefault();
+ hide($(this).attr('id').substring(2));
+ })
+ );
+ });
+ };
+
+ var opts = {
+ processVoteURL: '/_process_vote',
+ addCommentURL: '/_add_comment',
+ getCommentsURL: '/_get_comments',
+ acceptCommentURL: '/_accept_comment',
+ deleteCommentURL: '/_delete_comment',
+ commentImage: '/static/_static/comment.png',
+ closeCommentImage: '/static/_static/comment-close.png',
+ loadingImage: '/static/_static/ajax-loader.gif',
+ commentBrightImage: '/static/_static/comment-bright.png',
+ upArrow: '/static/_static/up.png',
+ downArrow: '/static/_static/down.png',
+ upArrowPressed: '/static/_static/up-pressed.png',
+ downArrowPressed: '/static/_static/down-pressed.png',
+ voting: false,
+ moderator: false
+ };
+
+ if (typeof COMMENT_OPTIONS != "undefined") {
+ opts = jQuery.extend(opts, COMMENT_OPTIONS);
+ }
+
+ var popupTemplate = '\
+ <div class="sphinx-comments" id="sc<%id%>">\
+ <p class="sort-options">\
+ Sort by:\
+ <a href="#" class="sort-option byrating">best rated</a>\
+ <a href="#" class="sort-option byascage">newest</a>\
+ <a href="#" class="sort-option byage">oldest</a>\
+ </p>\
+ <div class="comment-header">Comments</div>\
+ <div class="comment-loading" id="cn<%id%>">\
+ loading comments... <img src="<%loadingImage%>" alt="" /></div>\
+ <ul id="cl<%id%>" class="comment-ul"></ul>\
+ <div id="ca<%id%>">\
+ <p class="add-a-comment">Add a comment\
+ (<a href="#" class="comment-markup" id="ab<%id%>">markup</a>):</p>\
+ <div class="comment-markup-box" id="mb<%id%>">\
+ reStructured text markup: <i>*emph*</i>, <b>**strong**</b>, \
+ <tt>``code``</tt>, \
+ code blocks: <tt>::</tt> and an indented block after blank line</div>\
+ <form method="post" id="cf<%id%>" class="comment-form" action="">\
+ <textarea name="comment" cols="80"></textarea>\
+ <p class="propose-button">\
+ <a href="#" id="pc<%id%>" class="show-propose-change">\
+ Propose a change &#9657;\
+ </a>\
+ <a href="#" id="hc<%id%>" class="hide-propose-change">\
+ Propose a change &#9663;\
+ </a>\
+ </p>\
+ <textarea name="proposal" id="pt<%id%>" cols="80"\
+ spellcheck="false"></textarea>\
+ <input type="submit" value="Add comment" />\
+ <input type="hidden" name="node" value="<%id%>" />\
+ <input type="hidden" name="parent" value="" />\
+ </form>\
+ </div>\
+ </div>';
+
+ var commentTemplate = '\
+ <div id="cd<%id%>" class="sphinx-comment<%css_class%>">\
+ <div class="vote">\
+ <div class="arrow">\
+ <a href="#" id="uv<%id%>" class="vote" title="vote up">\
+ <img src="<%upArrow%>" />\
+ </a>\
+ <a href="#" id="uu<%id%>" class="un vote" title="vote up">\
+ <img src="<%upArrowPressed%>" />\
+ </a>\
+ </div>\
+ <div class="arrow">\
+ <a href="#" id="dv<%id%>" class="vote" title="vote down">\
+ <img src="<%downArrow%>" id="da<%id%>" />\
+ </a>\
+ <a href="#" id="du<%id%>" class="un vote" title="vote down">\
+ <img src="<%downArrowPressed%>" />\
+ </a>\
+ </div>\
+ </div>\
+ <div class="comment-content">\
+ <p class="tagline comment">\
+ <span class="user-id"><%username%></span>\
+ <span class="rating"><%pretty_rating%></span>\
+ <span class="delta"><%time.delta%></span>\
+ </p>\
+ <div class="comment-text comment"><#text#></div>\
+ <p class="comment-opts comment">\
+ <a href="#" class="reply hidden" id="rl<%id%>">reply &#9657;</a>\
+ <a href="#" class="close-reply" id="cr<%id%>">reply &#9663;</a>\
+ <a href="#" id="sp<%id%>" class="show-proposal">proposal &#9657;</a>\
+ <a href="#" id="hp<%id%>" class="hide-proposal">proposal &#9663;</a>\
+ <a href="#" id="dc<%id%>" class="delete-comment hidden">delete</a>\
+ <span id="cm<%id%>" class="moderation hidden">\
+ <a href="#" id="ac<%id%>" class="accept-comment">accept</a>\
+ </span>\
+ </p>\
+ <pre class="proposal" id="pr<%id%>">\
+<#proposal_diff#>\
+ </pre>\
+ <ul class="comment-children" id="cl<%id%>"></ul>\
+ </div>\
+ <div class="clearleft"></div>\
+ </div>\
+ </div>';
+
+ var replyTemplate = '\
+ <li>\
+ <div class="reply-div" id="rd<%id%>">\
+ <form id="rf<%id%>">\
+ <textarea name="comment" cols="80"></textarea>\
+ <input type="submit" value="Add reply" />\
+ <input type="button" value="Cancel" />\
+ <input type="hidden" name="parent" value="<%id%>" />\
+ <input type="hidden" name="node" value="" />\
+ </form>\
+ </div>\
+ </li>';
+
+ $(document).ready(function() {
+ init();
+ });
+})(jQuery);
+
+$(document).ready(function() {
+ // add comment anchors for all paragraphs that are commentable
+ $('.sphinx-has-comment').comment();
+
+ // highlight search words in search results
+ $("div.context").each(function() {
+ var params = $.getQueryParameters();
+ var terms = (params.q) ? params.q[0].split(/\s+/) : [];
+ var result = $(this);
+ $.each(terms, function() {
+ result.highlightText(this.toLowerCase(), 'highlighted');
+ });
+ });
+
+ // directly open comment window if requested
+ var anchor = document.location.hash;
+ if (anchor.substring(0, 9) == '#comment-') {
+ $('#ao' + anchor.substring(9)).click();
+ document.location.hash = '#s' + anchor.substring(9);
+ }
+});
diff --git a/doc/html/about.html b/doc/html/about.html
new file mode 100644
index 0000000..f2af2ae
--- /dev/null
+++ b/doc/html/about.html
@@ -0,0 +1,196 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>About S3QL &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="#" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Installation" href="installation.html" />
+ <link rel="prev" title="S3QL User’s Guide" href="index.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="installation.html" title="Installation"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="index.html" title="S3QL User’s Guide"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1 current"><a class="current reference internal" href="">About S3QL</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#features">Features</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#development-status">Development Status</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="about-s3ql">
+<h1>About S3QL<a class="headerlink" href="#about-s3ql" title="Permalink to this headline">¶</a></h1>
+<p>S3QL is a file system that stores all its data online. It supports
+<a class="reference external" href="http://aws.amazon.com/s3AmazonS3">Amazon S3</a> as well as arbitrary
+SFTP servers and effectively provides you with a hard disk of dynamic,
+infinite capacity that can be accessed from any computer with internet
+access.</p>
+<p>S3QL is providing a standard, full featured UNIX file system that is
+conceptually indistinguishable from any local file system.
+Furthermore, S3QL has additional features like compression,
+encryption, data de-duplication, immutable trees and snapshotting
+which make it especially suitable for online backup and archival.</p>
+<p>S3QL is designed to favor simplicity and elegance over performance and
+feature-creep. Care has been taken to make the source code as
+readable and serviceable as possible. Solid error detection and error
+handling have been included from the very first line, and S3QL comes
+with extensive automated test cases for all its components.</p>
+<div class="section" id="features">
+<h2>Features<a class="headerlink" href="#features" title="Permalink to this headline">¶</a></h2>
+<ul>
+<li><p class="first"><strong>Transparency.</strong> Conceptually, S3QL is indistinguishable from a
+local file system. For example, it supports hardlinks, symlinks,
+ACLs and standard unix permissions, extended attributes and file
+sizes up to 2 TB.</p>
+</li>
+<li><p class="first"><strong>Dynamic Size.</strong> The size of an S3QL file system grows and shrinks
+dynamically as required.</p>
+</li>
+<li><p class="first"><strong>Compression.</strong> Before storage, all data may compressed with the
+LZMA, bzip2 or deflate (gzip) algorithm.</p>
+</li>
+<li><p class="first"><strong>Encryption.</strong> After compression (but before upload), all data can
+AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum
+is used to protect the data against manipulation.</p>
+</li>
+<li><p class="first"><strong>Data De-duplication.</strong> If several files have identical contents,
+the redundant data will be stored only once. This works across all
+files stored in the file system, and also if only some parts of the
+files are identical while other parts differ.</p>
+</li>
+<li><p class="first"><strong>Immutable Trees.</strong> Directory trees can be made immutable, so that
+their contents can no longer be changed in any way whatsoever. This
+can be used to ensure that backups can not be modified after they
+have been made.</p>
+</li>
+<li><p class="first"><strong>Copy-on-Write/Snapshotting.</strong> S3QL can replicate entire directory
+trees without using any additional storage space. Only if one of the
+copies is modified, the part of the data that has been modified will
+take up additional storage space. This can be used to create
+intelligent snapshots that preserve the state of a directory at
+different points in time using a minimum amount of space.</p>
+</li>
+<li><p class="first"><strong>High Performance independent of network latency.</strong> All operations
+that do not write or read file contents (like creating directories
+or moving, renaming, and changing permissions of files and
+directories) are very fast because they are carried out without any
+network transactions.</p>
+<p>S3QL achieves this by saving the entire file and directory structure
+in a database. This database is locally cached and the remote
+copy updated asynchronously.</p>
+</li>
+<li><p class="first"><strong>Support for low bandwidth connections.</strong> S3QL splits file contents
+into smaller blocks and caches blocks locally. This minimizes both
+the number of network transactions required for reading and writing
+data, and the amount of data that has to be transferred when only
+parts of a file are read or written.</p>
+</li>
+</ul>
+</div>
+<div class="section" id="development-status">
+<h2>Development Status<a class="headerlink" href="#development-status" title="Permalink to this headline">¶</a></h2>
+<p>After two years of beta-testing by about 93 users did not reveal any
+data-critical bugs, S3QL was declared <strong>stable</strong> with the release of
+version 1.0 on May 13th, 2011. Note that this does not mean that S3QL
+is bug-free. S3QL still has several known, and probably many more
+unknown bugs. However, there is a high probability that these bugs
+will, although being inconvenient, not endanger any stored data.</p>
+<p>Please report any problems on the <a class="reference external" href="http://groups.google.com/group/s3ql">mailing list</a> or the <a class="reference external" href="http://code.google.com/p/s3ql/issues/list">issue tracker</a>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="installation.html" title="Installation"
+ >next</a></li>
+ <li class="right" >
+ <a href="index.html" title="S3QL User’s Guide"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/adm.html b/doc/html/adm.html
new file mode 100644
index 0000000..cda4433
--- /dev/null
+++ b/doc/html/adm.html
@@ -0,0 +1,217 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Managing Buckets &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Mounting" href="mount.html" />
+ <link rel="prev" title="File System Creation" href="mkfs.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mount.html" title="Mounting"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="mkfs.html" title="File System Creation"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Managing Buckets</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#changing-the-passphrase">Changing the Passphrase</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#upgrading-the-file-system">Upgrading the file system</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#deleting-a-file-system">Deleting a file system</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#restoring-metadata-backups">Restoring Metadata Backups</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="managing-buckets">
+<h1>Managing Buckets<a class="headerlink" href="#managing-buckets" title="Permalink to this headline">¶</a></h1>
+<p>The <tt class=" docutils literal"><span class="pre">s3qladm</span></tt> command performs various operations on S3QL buckets.
+The file system contained in the bucket <em>must not be mounted</em> when
+using <tt class=" docutils literal"><span class="pre">s3qladm</span></tt> or things will go wrong badly.</p>
+<p>The syntax is</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qladm </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;action&gt;</span><span class="l"> </span><span class="nv">&lt;storage-url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>where <tt class="var docutils literal"><span class="pre">action</span></tt> may be either of <strong class="program">passphrase</strong>,
+<strong class="program">upgrade</strong>, <strong class="program">delete</strong> or <strong class="program">download-metadata</strong>.</p>
+<p>The <strong class="program">s3qladm</strong> accepts the following general options, no
+matter what specific action is being invoked:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to get
+debug messages from all modules. This option can be
+specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication info.
+(default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+<p>Hint: run <tt class=" docutils literal"><span class="pre">s3qladm</span> <span class="pre">&lt;action&gt;</span> <span class="pre">--help</span></tt> to get help on the additional
+arguments that the different actions take.</p>
+<div class="section" id="changing-the-passphrase">
+<h2>Changing the Passphrase<a class="headerlink" href="#changing-the-passphrase" title="Permalink to this headline">¶</a></h2>
+<p>To change the passphrase a bucket, use the <tt class=" docutils literal"><span class="pre">s3qladm</span></tt> command:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qladm passphrase </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>The passphrase can only be changed when the bucket is not mounted.</p>
+</div>
+<div class="section" id="upgrading-the-file-system">
+<h2>Upgrading the file system<a class="headerlink" href="#upgrading-the-file-system" title="Permalink to this headline">¶</a></h2>
+<p>If you have installed a new version of S3QL, it may sometimes be
+necessary to upgrade the file system metadata as well. Note that in
+this case the file system can no longer be accessed with older
+versions of S3QL after the upgrade.</p>
+<p>During the upgrade you have to make sure that the command is not
+interrupted, and that no one else tries to mount, check or upgrade the
+file system at the same time.</p>
+<p>To upgrade a file system from the previous to the current revision,
+execute</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qladm upgrade </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="deleting-a-file-system">
+<h2>Deleting a file system<a class="headerlink" href="#deleting-a-file-system" title="Permalink to this headline">¶</a></h2>
+<p>A file system can be deleted with:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qladm delete </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>This physically deletes all the data and file system structures.</p>
+</div>
+<div class="section" id="restoring-metadata-backups">
+<h2>Restoring Metadata Backups<a class="headerlink" href="#restoring-metadata-backups" title="Permalink to this headline">¶</a></h2>
+<p>If the most-recent copy of the file system metadata has been damaged
+irreparably, it is possible to restore one of the automatically
+created backup copies.</p>
+<p>The command</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qladm download-metadata </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>will give you a list of the available metadata backups and allow you
+to download them. This will create two new files in the current
+directory, ending in <tt class="docutils literal"><span class="pre">.db</span></tt> and <tt class="docutils literal"><span class="pre">.params</span></tt>. To actually use the
+downloaded backup, you need to move these files into the <tt class="docutils literal"><span class="pre">~/.s3ql/</span></tt>
+directory and run <tt class="docutils literal"><span class="pre">fsck.s3ql</span></tt>.</p>
+<div class="admonition warning">
+<p class="first admonition-title">Warning</p>
+<p class="last">You should probably not use this functionality without having asked
+for help on the mailing list first (see <a class="reference internal" href="resources.html#resources"><em>Further Resources / Getting Help</em></a>).</p>
+</div>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mount.html" title="Mounting"
+ >next</a></li>
+ <li class="right" >
+ <a href="mkfs.html" title="File System Creation"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/backends.html b/doc/html/backends.html
new file mode 100644
index 0000000..e906681
--- /dev/null
+++ b/doc/html/backends.html
@@ -0,0 +1,360 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Storage Backends &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="File System Creation" href="mkfs.html" />
+ <link rel="prev" title="Installation" href="installation.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mkfs.html" title="File System Creation"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="installation.html" title="Installation"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Storage Backends</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#on-backend-reliability">On Backend Reliability</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#the-authinfo-file">The <tt class=" docutils literal"><span class="pre">authinfo</span></tt> file</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#consistency-guarantees">Consistency Guarantees</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#the-amazon-s3-backend">The Amazon S3 Backend</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#the-local-backend">The Local Backend</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#the-sftp-backend">The SFTP Backend</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="storage-backends">
+<h1>Storage Backends<a class="headerlink" href="#storage-backends" title="Permalink to this headline">¶</a></h1>
+<p>S3QL can use different protocols to store the file system data.
+Independent of the backend that you use, the place where your file
+system data is being stored is called a <em>bucket</em>. (This is mostly for
+historical reasons, since initially S3QL supported only the Amazon S3
+backend).</p>
+<div class="section" id="on-backend-reliability">
+<h2>On Backend Reliability<a class="headerlink" href="#on-backend-reliability" title="Permalink to this headline">¶</a></h2>
+<p>S3QL has been designed for use with a storage backend where data loss
+is so infrequent that it can be completely neglected (e.g. the Amazon
+S3 backend). If you decide to use a less reliable backend, you should
+keep the following warning in mind and read this section carefully.</p>
+<div class="admonition warning">
+<p class="first admonition-title">Warning</p>
+<p class="last">S3QL is not able to compensate for any failures of the backend. In
+particular, it is not able reconstruct any data that has been lost
+or corrupted by the backend. The persistence and durability of data
+stored in an S3QL file system is limited and determined by the
+backend alone.</p>
+</div>
+<p>On the plus side, if a backend looses or corrupts some of the stored
+data, S3QL <em>will</em> detect the problem. Missing data will be detected
+when running <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt> or when attempting to access the data in the
+mounted file system. In the later case you will get an IO Error, and
+on unmounting S3QL will warn you that the file system is damaged and
+you need to run <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt>.</p>
+<p><tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt> will report all the affected files and move them into the
+<tt class=" docutils literal"><span class="pre">/lost+found</span></tt> directory of the file system.</p>
+<p>You should be aware that, because of S3QL&#8217;s data de-duplication
+feature, the consequences of a data loss in the backend can be
+significantly more severe than you may expect. More concretely, a data
+loss in the backend at time <em>x</em> may cause data that is written <em>after</em>
+time <em>x</em> to be lost as well. What may happen is this:</p>
+<ol class="arabic simple">
+<li>You store an important file in the S3QL file system.</li>
+<li>The backend looses the data blocks of this file. As long as you
+do not access the file or run <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt>, S3QL
+is not aware that the data has been lost by the backend.</li>
+<li>You save an additional copy of the important file in a different
+location on the same S3QL file system.</li>
+<li>S3QL detects that the contents of the new file are identical to the
+data blocks that have been stored earlier. Since at this point S3QL
+is not aware that these blocks have been lost by the backend, it
+does not save another copy of the file contents in the backend but
+relies on the (presumably) existing blocks instead.</li>
+<li>Therefore, even though you saved another copy, you still do not
+have a backup of the important file (since both copies refer to the
+same data blocks that have been lost by the backend).</li>
+</ol>
+<p>As one can see, this effect becomes the less important the more often
+one runs <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt>, since <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt> will make S3QL aware of any
+blocks that the backend may have lost. Figuratively, this establishes
+a &#8220;checkpoint&#8221;: data loss in the backend that occurred before running
+<tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt> can not affect any file system operations performed after
+running <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt>.</p>
+<p>Nevertheless, (as said at the beginning) the recommended way to use
+S3QL is in combination with a sufficiently reliable storage backend.
+In that case none of the above will ever be a concern.</p>
+</div>
+<div class="section" id="the-authinfo-file">
+<h2>The <tt class=" docutils literal"><span class="pre">authinfo</span></tt> file<a class="headerlink" href="#the-authinfo-file" title="Permalink to this headline">¶</a></h2>
+<p>Most backends first try to read the file <tt class=" docutils literal"><span class="pre">~/.s3ql/authinfo</span></tt> to determine
+the username and password for connecting to the remote host. If this
+fails, both username and password are read from the terminal.</p>
+<p>The <tt class=" docutils literal"><span class="pre">authinfo</span></tt> file has to contain entries of the form</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">backend </span><span class="nv">&lt;backend&gt;</span><span class="l"> machine </span><span class="nv">&lt;host&gt;</span><span class="l"> login </span><span class="nv">&lt;user&gt;</span><span class="l"> password </span><span class="nv">&lt;password&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>So to use the login <tt class=" docutils literal"><span class="pre">joe</span></tt> with password <tt class=" docutils literal"><span class="pre">jibbadup</span></tt> when using the FTP
+backend to connect to the host <tt class=" docutils literal"><span class="pre">backups.joesdomain.com</span></tt>, you would
+specify</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">backend ftp machine backups.joesdomain.com login joe password jibbadup</span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="consistency-guarantees">
+<h2>Consistency Guarantees<a class="headerlink" href="#consistency-guarantees" title="Permalink to this headline">¶</a></h2>
+<p>The different backends provide different types of <em>consistency
+guarantees</em>. Informally, a consistency guarantee tells you how fast
+the backend will apply changes to the stored data.</p>
+<p>S3QL defines the following three levels:</p>
+<ul>
+<li><p class="first"><strong>Read-after-Write Consistency.</strong> This is the strongest consistency
+guarantee. If a backend offers read-after-write consistency, it
+guarantees that as soon as you have committed any changes to the
+backend, subsequent requests will take into account these changes.</p>
+</li>
+<li><p class="first"><strong>Read-after-Create Consistency.</strong> If a backend provides only
+read-after-create consistency, only the creation of a new object is
+guaranteed to be taken into account for subsequent requests. This
+means that, for example, if you overwrite data in an existing
+object, subsequent requests may still return the old data for a
+certain period of time.</p>
+</li>
+<li><p class="first"><strong>Eventual consistency.</strong> This is the lowest consistency level.
+Basically, any changes that you make to the backend may not be
+visible for a certain amount of time after the change has been made.
+However, you are guaranteed that no change will be lost. All changes
+will <em>eventually</em> become visible.</p>
+<p>.</p>
+</li>
+</ul>
+<p>As long as your backend provides read-after-write or read-after-create
+consistency, you do not have to worry about consistency guarantees at
+all. However, if you plan to use a backend with only eventual
+consistency, you have to be a bit careful in some situations.</p>
+<div class="section" id="dealing-with-eventual-consistency">
+<span id="eventual-consistency"></span><h3>Dealing with Eventual Consistency<a class="headerlink" href="#dealing-with-eventual-consistency" title="Permalink to this headline">¶</a></h3>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">The following applies only to storage backends that do not provide
+read-after-create or read-after-write consistency. Currently,
+this is only the Amazon S3 backend <em>if used with the US-Standard
+storage region</em>. If you use a different storage backend, or the S3
+backend with a different storage region, this section does not apply
+to you.</p>
+</div>
+<p>While the file system is mounted, S3QL is able to automatically handle
+all issues related to the weak eventual consistency guarantee.
+However, some issues may arise during the mount process and when the
+file system is checked.</p>
+<p>Suppose that you mount the file system, store some new data, delete
+some old data and unmount it again. Now remember that eventual
+consistency means that there is no guarantee that these changes will
+be visible immediately. At least in theory it is therefore possible
+that if you mount the file system again, S3QL does not see any of the
+changes that you have done and presents you an &#8220;old version&#8221; of the
+file system without them. Even worse, if you notice the problem and
+unmount the file system, S3QL will upload the old status (which S3QL
+necessarily has to consider as current) and thereby permanently
+override the newer version (even though this change may not become
+immediately visible either).</p>
+<p>The same problem applies when checking the file system. If the backend
+provides S3QL with only partially updated data, S3QL has no way to
+find out if this a real consistency problem that needs to be fixed or
+if it is only a temporary problem that will resolve itself
+automatically (because there are still changes that have not become
+visible yet).</p>
+<p>While this may seem to be a rather big problem, the likelihood of it
+to occur is rather low. In practice, most storage providers rarely
+need more than a few seconds to apply incoming changes, so to trigger
+this problem one would have to unmount and remount the file system in
+a very short time window. Many people therefore make sure that they
+wait a few minutes between successive mounts (or file system checks)
+and decide that the remaining risk is negligible.</p>
+<p>Nevertheless, the eventual consistency guarantee does not impose an
+upper limit on the time that it may take for change to become visible.
+Therefore there is no &#8220;totally safe&#8221; waiting time that would totally
+eliminate this problem; a theoretical possibility always remains.</p>
+</div>
+</div>
+<div class="section" id="the-amazon-s3-backend">
+<h2>The Amazon S3 Backend<a class="headerlink" href="#the-amazon-s3-backend" title="Permalink to this headline">¶</a></h2>
+<p>To store your file system in an Amazon S3 bucket, use a storage URL of
+the form <tt class=" docutils literal"><span class="pre">s3://&lt;bucketname&gt;</span></tt>. Bucket names must conform to the <a class="reference external" href="http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html">S3
+Bucket Name Restrictions</a>.</p>
+<p>The S3 backend offers exceptionally strong reliability guarantees. As
+of August 2010, Amazon guarantees a durability of 99.999999999% per
+year. In other words, if you store a thousand million objects then on
+average you would loose less than one object in a hundred years.</p>
+<p>The Amazon S3 backend provides read-after-create consistency for the
+EU, Asia-Pacific and US-West storage regions. <em>For the US-Standard
+storage region, Amazon S3 provides only eventual consistency</em> (please
+refer to <a class="reference internal" href="#eventual-consistency"><em>Dealing with Eventual Consistency</em></a> for information about
+what this entails).</p>
+<p>When connecting to Amazon S3, S3QL uses an unencrypted HTTP
+connection, so if you want your data to stay confidential, you have
+to create the S3QL file system with encryption (this is also the default).</p>
+<p>When reading the authentication information for the S3 backend from
+the <tt class=" docutils literal"><span class="pre">authinfo</span></tt> file, the <tt class=" docutils literal"><span class="pre">host</span></tt> field is ignored, i.e. the first entry
+with <tt class=" docutils literal"><span class="pre">s3</span></tt> as a backend will be used. For example</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">backend s3 machine any login myAWSaccessKeyId password myAwsSecretAccessKey</span>
+</pre></div>
+</div>
+<p>Note that the bucket names come from a global pool, so chances are
+that your favorite name has already been taken by another S3 user.
+Usually a longer bucket name containing some random numbers, like
+<tt class=" docutils literal"><span class="pre">19283712_yourname_s3ql</span></tt>, will work better.</p>
+<p>If you do not already have one, you need to obtain an Amazon S3
+account from <a class="reference external" href="http://aws.amazon.com/">Amazon AWS</a>. The account is
+free, you will pay only for the amount of storage that you actually
+use.</p>
+<p>Note that the login and password for accessing S3 are not the user id
+and password that you use to log into the Amazon Webpage, but the &#8220;AWS
+access key id&#8221; and &#8220;AWS secret access key&#8221; shown under <a class="reference external" href="https://aws-portal.amazon.com/gp/aws/developer/account/index.html?ie=UTF8&amp;action=access-key">My
+Account/Access Identifiers</a>.</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p>S3QL also allows you to use <a class="reference external" href="http://aws.amazon.com/s3/#protecting">reduced redundancy storage</a> by using <tt class="docutils literal"><span class="pre">s3rr://</span></tt>
+instead of <tt class="docutils literal"><span class="pre">s3://</span></tt> in the storage url. However, this not
+recommended. The reason is a combination of three factors:</p>
+<ul class="simple">
+<li>RRS has a relatively low reliability, on average you loose one
+out of every ten-thousand objects a year. So you can expect to
+occasionally loose some data.</li>
+<li>When <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt> asks Amazon S3 for a list of the stored objects,
+this list includes even those objects that have been lost.
+Therefore <tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt> <em>can not detect lost objects</em> and lost data
+will only become apparent when you try to actually read from a
+file whose data has been lost. This is a (very unfortunate)
+peculiarity of Amazon S3.</li>
+<li>Due to the data de-duplication feature of S3QL, unnoticed lost
+objects may cause subsequent data loss later in time (see <a class="reference internal" href="#on-backend-reliability">On
+Backend Reliability</a> for details).</li>
+</ul>
+<p class="last">In other words, you should really only store an S3QL file system
+using RRS if you know exactly what you are getting into.</p>
+</div>
+</div>
+<div class="section" id="the-local-backend">
+<h2>The Local Backend<a class="headerlink" href="#the-local-backend" title="Permalink to this headline">¶</a></h2>
+<p>The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+<tt class=" docutils literal"><span class="pre">local://&lt;path&gt;</span></tt>. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. <tt class=" docutils literal"><span class="pre">local:///var/archive</span></tt>.</p>
+<p>The local backend provides read-after-write consistency.</p>
+</div>
+<div class="section" id="the-sftp-backend">
+<h2>The SFTP Backend<a class="headerlink" href="#the-sftp-backend" title="Permalink to this headline">¶</a></h2>
+<p>The SFTP backend uses the SFTP protocol, which is a file transfer
+protocol similar to ftp, but uses an encrypted SSH connection.
+It provides read-after-write consistency.</p>
+<p>Note that the SFTP backend is rather slow and has not been tested
+as extensively as the S3 and Local backends.</p>
+<p>The storage URL for SFTP connections has the form</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">sftp://</span><span class="nv">&lt;host&gt;</span><span class="ge">[:port]</span><span class="l">/</span><span class="nv">&lt;path&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>The SFTP backend will always ask you for a password if you haven&#8217;t
+defined one in <tt class=" docutils literal"><span class="pre">~/.s3ql/authinfo</span></tt>. However, public key authentication
+is tried first and the password will only be used if the public key
+authentication fails.</p>
+<p>The public and private keys will be read from the standard files in
+<tt class=" docutils literal"><span class="pre">~/.ssh/</span></tt>. Note that S3QL will refuse to connect to a computer with
+unknown host key; to add the key to your local keyring you have to
+establish a connection to that computer with the standard SSH command
+line programs first.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mkfs.html" title="File System Creation"
+ >next</a></li>
+ <li class="right" >
+ <a href="installation.html" title="Installation"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/contrib.html b/doc/html/contrib.html
new file mode 100644
index 0000000..cd77857
--- /dev/null
+++ b/doc/html/contrib.html
@@ -0,0 +1,237 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Contributed Programs &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Tips &amp; Tricks" href="tips.html" />
+ <link rel="prev" title="Checking for Errors" href="fsck.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="tips.html" title="Tips &amp; Tricks"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="fsck.html" title="Checking for Errors"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Contributed Programs</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#benchmark-py">benchmark.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#s3-copy-py">s3_copy.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#pcp-py">pcp.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#s3-backup-sh">s3_backup.sh</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#expire-backups-py">expire_backups.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#s3ql-conf">s3ql.conf</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="contributed-programs">
+<h1>Contributed Programs<a class="headerlink" href="#contributed-programs" title="Permalink to this headline">¶</a></h1>
+<p>S3QL comes with a few contributed programs that are not part of the
+core distribution (and are therefore not installed automatically by
+default), but which may nevertheless be useful. These programs are in
+the <tt class=" docutils literal"><span class="pre">contrib</span></tt> directory of the source distribution or in
+<tt class=" docutils literal"><span class="pre">/usr/share/doc/s3ql/contrib</span></tt> if you installed S3QL from a package.</p>
+<div class="section" id="benchmark-py">
+<h2>benchmark.py<a class="headerlink" href="#benchmark-py" title="Permalink to this headline">¶</a></h2>
+<p>This program measures your uplink bandwidth and compression speed and
+recommends a compression algorithm for optimal throughput.</p>
+</div>
+<div class="section" id="s3-copy-py">
+<h2>s3_copy.py<a class="headerlink" href="#s3-copy-py" title="Permalink to this headline">¶</a></h2>
+<p>This program physically duplicates Amazon S3 bucket. It can be used to
+migrate buckets to a different storage region or storage class
+(standard or reduced redundancy).</p>
+</div>
+<div class="section" id="pcp-py">
+<span id="pcp"></span><h2>pcp.py<a class="headerlink" href="#pcp-py" title="Permalink to this headline">¶</a></h2>
+<p><tt class="docutils literal"><span class="pre">pcp.py</span></tt> is a wrapper program that starts several rsync processes to
+copy directory trees in parallel. This is important because
+transferring files in parallel significantly enhances performance when
+copying data from an S3QL file system (see <a class="reference internal" href="tips.html#copy-performance"><em>Permanently mounted backup file system</em></a> for
+details).</p>
+<p>To recursively copy the directory <tt class="docutils literal"><span class="pre">/mnt/home-backup</span></tt> into
+<tt class="docutils literal"><span class="pre">/home/joe</span></tt> using 8 parallel processes and preserving permissions,
+you would execute</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">pcp.py -a --processes=8 /mnt/home-backup/ /home/joe</span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="s3-backup-sh">
+<h2>s3_backup.sh<a class="headerlink" href="#s3-backup-sh" title="Permalink to this headline">¶</a></h2>
+<p>This is an example script that demonstrates how to set up a simple but
+powerful backup solution using S3QL and <a class="reference external" href="http://samba.org/rsync">rsync</a>.</p>
+<p>The <tt class=" docutils literal"><span class="pre">s3_backup.sh</span></tt> script automates the following steps:</p>
+<ol class="arabic simple">
+<li>Mount the file system</li>
+<li>Replicate the previous backup with <a class="reference internal" href="special.html#s3qlcp"><em>s3qlcp</em></a></li>
+<li>Update the new copy with the data from the backup source using rsync</li>
+<li>Make the new backup immutable with <a class="reference internal" href="special.html#s3qllock"><em>s3qllock</em></a></li>
+<li>Delete old backups that are no longer needed</li>
+<li>Unmount the file system</li>
+</ol>
+<p>The backups are stored in directories of the form
+<tt class=" docutils literal"><span class="pre">YYYY-MM-DD_HH:mm:SS</span></tt> and the <a class="reference internal" href="#expire-backups-py">expire_backups.py</a> command is used to
+delete old backups.</p>
+</div>
+<div class="section" id="expire-backups-py">
+<h2>expire_backups.py<a class="headerlink" href="#expire-backups-py" title="Permalink to this headline">¶</a></h2>
+<p><strong class="program">expire_backups.py</strong> is a program to intelligently remove old
+backups that are no longer needed.</p>
+<p>To define what backups you want to keep for how long, you define a
+number of <em>age ranges</em>. <strong class="program">expire_backups</strong> ensures that you
+will have at least one backup in each age range at all times. It will
+keep exactly as many backups as are required for that and delete any
+backups that become redundant.</p>
+<p>Age ranges are specified by giving a list of range boundaries in terms
+of backup cycles. Every time you create a new backup, the existing
+backups age by one cycle.</p>
+<p>Example: when <strong class="program">expire_backups</strong> is called with the age range
+definition <tt class="docutils literal"><span class="pre">1</span> <span class="pre">3</span> <span class="pre">7</span> <span class="pre">14</span> <span class="pre">31</span></tt>, it will guarantee that you always have the
+following backups available:</p>
+<ol class="arabic simple">
+<li>A backup that is 0 to 1 cycles old (i.e, the most recent backup)</li>
+<li>A backup that is 1 to 3 cycles old</li>
+<li>A backup that is 3 to 7 cycles old</li>
+<li>A backup that is 7 to 14 cycles old</li>
+<li>A backup that is 14 to 31 cycles old</li>
+</ol>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">If you do backups in fixed intervals, then one cycle will be
+equivalent to the backup interval. The advantage of specifying the
+age ranges in terms of backup cycles rather than days or weeks is
+that it allows you to gracefully handle irregular backup intervals.
+Imagine that for some reason you do not turn on your computer for
+one month. Now all your backups are at least a month old, and if you
+had specified the above backup strategy in terms of absolute ages,
+they would all be deleted! Specifying age ranges in terms of backup
+cycles avoids these sort of problems.</p>
+</div>
+<p><strong class="program">expire_backups</strong> usage is simple. It requires backups to have
+names of the forms <tt class="docutils literal"><span class="pre">year-month-day_hour:minute:seconds</span></tt>
+(<tt class="docutils literal"><span class="pre">YYYY-MM-DD_HH:mm:ss</span></tt>) and works on all backups in the current
+directory. So for the above backup strategy, the correct invocation
+would be:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">expire_backups.py 1 3 7 14 31</span>
+</pre></div>
+</div>
+<p>When storing your backups on an S3QL file system, you probably want to
+specify the <tt class="docutils literal"><span class="pre">--use-s3qlrm</span></tt> option as well. This tells
+<strong class="program">expire_backups</strong> to use the <a class="reference internal" href="special.html#s3qlrm"><em>s3qlrm</em></a> command to
+delete directories.</p>
+<p><strong class="program">expire_backups</strong> uses a &#8220;state file&#8221; to keep track which
+backups are how many cycles old (since this cannot be inferred from
+the dates contained in the directory names). The standard name for
+this state file is <tt class="file docutils literal"><span class="pre">.expire_backups.dat</span></tt>. If this file gets
+damaged or deleted, <strong class="program">expire_backups</strong> no longer knows the ages
+of the backups and refuses to work. In this case you can use the
+<tt class="cmdopt docutils literal"><span class="pre">--reconstruct-state</span></tt> option to try to reconstruct the state
+from the backup dates. However, the accuracy of this reconstruction
+depends strongly on how rigorous you have been with making backups (it
+is only completely correct if the time between subsequent backups has
+always been exactly the same), so it&#8217;s generally a good idea not to
+tamper with the state file.</p>
+<p>For a full list of available options, run <strong class="program">expire_backups.py
+&#8211;help</strong>.</p>
+</div>
+<div class="section" id="s3ql-conf">
+<h2>s3ql.conf<a class="headerlink" href="#s3ql-conf" title="Permalink to this headline">¶</a></h2>
+<p><tt class="docutils literal"><span class="pre">s3ql.conf</span></tt> is an example upstart job definition file. It defines a
+job that automatically mounts an S3QL file system on system start, and
+properly unmounts it when the system is shut down.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="tips.html" title="Tips &amp; Tricks"
+ >next</a></li>
+ <li class="right" >
+ <a href="fsck.html" title="Checking for Errors"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/fsck.html b/doc/html/fsck.html
new file mode 100644
index 0000000..df56342
--- /dev/null
+++ b/doc/html/fsck.html
@@ -0,0 +1,160 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Checking for Errors &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Contributed Programs" href="contrib.html" />
+ <link rel="prev" title="Unmounting" href="umount.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="contrib.html" title="Contributed Programs"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="umount.html" title="Unmounting"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="checking-for-errors">
+<h1>Checking for Errors<a class="headerlink" href="#checking-for-errors" title="Permalink to this headline">¶</a></h1>
+<p>If, for some reason, the filesystem has not been correctly unmounted,
+or if you suspect that there might be errors, you should run the
+<tt class=" docutils literal"><span class="pre">fsck.s3ql</span></tt> utility. It has the following syntax:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">fsck.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>This command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication info.
+(default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to get
+debug messages from all modules. This option can be
+specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext even
+for unencrypted file systems.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--batch</span></kbd></td>
+<td>If user input is required, exit without prompting.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--force</span></kbd></td>
+<td>Force checking even if file system is marked clean.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="contrib.html" title="Contributed Programs"
+ >next</a></li>
+ <li class="right" >
+ <a href="umount.html" title="Unmounting"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/index.html b/doc/html/index.html
new file mode 100644
index 0000000..f2e42d1
--- /dev/null
+++ b/doc/html/index.html
@@ -0,0 +1,188 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>S3QL User’s Guide &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="#" />
+ <link rel="next" title="About S3QL" href="about.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="about.html" title="About S3QL"
+ accesskey="N">next</a></li>
+ <li><a href="#">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="#">Table Of Contents</a></h3>
+ <ul>
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="s3ql-user-s-guide">
+<h1>S3QL User&#8217;s Guide<a class="headerlink" href="#s3ql-user-s-guide" title="Permalink to this headline">¶</a></h1>
+<div class="toctree-wrapper compound">
+<ul>
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="about.html#features">Features</a></li>
+<li class="toctree-l2"><a class="reference internal" href="about.html#development-status">Development Status</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="installation.html#dependencies">Dependencies</a></li>
+<li class="toctree-l2"><a class="reference internal" href="installation.html#installing-s3ql">Installing S3QL</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="backends.html#on-backend-reliability">On Backend Reliability</a></li>
+<li class="toctree-l2"><a class="reference internal" href="backends.html#the-authinfo-file">The <tt class=" docutils literal"><span class="pre">authinfo</span></tt> file</a></li>
+<li class="toctree-l2"><a class="reference internal" href="backends.html#consistency-guarantees">Consistency Guarantees</a></li>
+<li class="toctree-l2"><a class="reference internal" href="backends.html#the-amazon-s3-backend">The Amazon S3 Backend</a></li>
+<li class="toctree-l2"><a class="reference internal" href="backends.html#the-local-backend">The Local Backend</a></li>
+<li class="toctree-l2"><a class="reference internal" href="backends.html#the-sftp-backend">The SFTP Backend</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="adm.html#changing-the-passphrase">Changing the Passphrase</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html#upgrading-the-file-system">Upgrading the file system</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html#deleting-a-file-system">Deleting a file system</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html#restoring-metadata-backups">Restoring Metadata Backups</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="mount.html#storing-encryption-passwords">Storing Encryption Passwords</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html#compression-algorithms">Compression Algorithms</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html#parallel-compression">Parallel Compression</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html#notes-about-caching">Notes about Caching</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html#automatic-mounting">Automatic Mounting</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="special.html#snapshotting-and-copy-on-write">Snapshotting and Copy-on-Write</a></li>
+<li class="toctree-l2"><a class="reference internal" href="special.html#getting-statistics">Getting Statistics</a></li>
+<li class="toctree-l2"><a class="reference internal" href="special.html#immutable-trees">Immutable Trees</a></li>
+<li class="toctree-l2"><a class="reference internal" href="special.html#fast-recursive-removal">Fast Recursive Removal</a></li>
+<li class="toctree-l2"><a class="reference internal" href="special.html#runtime-configuration">Runtime Configuration</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="contrib.html#benchmark-py">benchmark.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="contrib.html#s3-copy-py">s3_copy.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="contrib.html#pcp-py">pcp.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="contrib.html#s3-backup-sh">s3_backup.sh</a></li>
+<li class="toctree-l2"><a class="reference internal" href="contrib.html#expire-backups-py">expire_backups.py</a></li>
+<li class="toctree-l2"><a class="reference internal" href="contrib.html#s3ql-conf">s3ql.conf</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="tips.html#permanently-mounted-backup-file-system">Permanently mounted backup file system</a></li>
+<li class="toctree-l2"><a class="reference internal" href="tips.html#improving-copy-performance">Improving copy performance</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="man/mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="man/expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="about.html" title="About S3QL"
+ >next</a></li>
+ <li><a href="#">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/installation.html b/doc/html/installation.html
new file mode 100644
index 0000000..cb7912b
--- /dev/null
+++ b/doc/html/installation.html
@@ -0,0 +1,200 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Installation &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Storage Backends" href="backends.html" />
+ <link rel="prev" title="About S3QL" href="about.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="backends.html" title="Storage Backends"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="about.html" title="About S3QL"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Installation</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#dependencies">Dependencies</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#installing-s3ql">Installing S3QL</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="installation">
+<h1>Installation<a class="headerlink" href="#installation" title="Permalink to this headline">¶</a></h1>
+<p>S3QL depends on several other programs and libraries that have to be
+installed first. The best method to satisfy these dependencies depends
+on your distribution. In some cases S3QL and all its dependencies can
+be installed with as little as three commands, while in other cases more work
+may be required.</p>
+<p>The <a class="reference external" href="http://code.google.com/p/s3ql/w/list">S3QL Wiki</a> contains
+installation instructions for quite a few different Linux
+distributions. You should only use the generic instructions in this
+manual if your distribution is not included in the <a class="reference external" href="http://code.google.com/p/s3ql/w/list?q=label:Installation">distribution-specific
+installation instructions</a> on the wiki.</p>
+<div class="section" id="dependencies">
+<h2>Dependencies<a class="headerlink" href="#dependencies" title="Permalink to this headline">¶</a></h2>
+<p>The following is a list of the programs and libraries required for
+running S3QL. Generally, you should first check if your distribution
+already provides a suitable packages and only install from source if
+that is not the case.</p>
+<ul>
+<li><p class="first">Kernel version 2.6.9 or newer. Starting with kernel 2.6.26
+you will get significantly better write performance, so you should
+actually use <em>2.6.26 or newer whenever possible</em>.</p>
+</li>
+<li><p class="first">The <a class="reference external" href="http://fuse.sourceforge.net/">FUSE Library</a> should already be
+installed on your system. However, you have to make sure that you
+have at least version 2.8.0.</p>
+</li>
+<li><p class="first">The <a class="reference external" href="http://pypi.python.org/pypi/pycryptopp">PyCrypto++ Python Module</a>. To check if this module
+is installed, try to execute <tt class=" docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span> <span class="pre">pycryptopp'</span></tt>.</p>
+</li>
+<li><p class="first">The <a class="reference external" href="http://pypi.python.org/pypi/argparse">argparse Python Module</a>. To check if this module is
+installed, try to execute <tt class=" docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span> <span class="pre">argparse;</span> <span class="pre">print</span>
+<span class="pre">argparse.__version__'</span></tt>. If argparse is installed, this will print
+the version number. You need version 1.1 or later.</p>
+</li>
+<li><p class="first">The <a class="reference external" href="http://code.google.com/p/apsw/">APSW Python Module</a>. To check
+which (if any) version of APWS is installed, run the command</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">python -c &#39;import apsw; print apsw.apswversion(), apsw.sqlitelibversion()&#39;</span>
+</pre></div>
+</div>
+<p>If APSW is installed, this should print two version numbers which
+both have to be at least 3.7.0.</p>
+</li>
+<li><p class="first">The <a class="reference external" href="http://pypi.python.org/pypi/pyliblzma">PyLibLZMA Python module</a>. To check if this module
+is installed, execute <tt class=" docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span> <span class="pre">lzma;</span> <span class="pre">print</span>
+<span class="pre">lzma.__version__'</span></tt>. This should print a version number. You need at
+least version 0.5.3.</p>
+</li>
+<li><p class="first">The <a class="reference external" href="http://code.google.com/p/python-llfuse/">Python LLFUSE module</a>. To check if this module
+is installed, execute <tt class=" docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span> <span class="pre">llfuse;</span> <span class="pre">print</span>
+<span class="pre">llfuse.__version__'</span></tt>. This should print a version number. You need at
+least version 0.29.</p>
+<p>Note that earlier S3QL versions shipped with a builtin version of
+this module. If you are upgrading from such a version, make sure to
+completely remove the old S3QL version first.</p>
+</li>
+<li><p class="first">If you want to use the SFTP backend, then you also need the
+<a class="reference external" href="http://www.lag.net/paramiko/">Paramiko Python Module</a>. To check
+if this module is installed, try to execute <tt class=" docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span>
+<span class="pre">paramiko'</span></tt>.</p>
+</li>
+</ul>
+</div>
+<div class="section" id="installing-s3ql">
+<span id="inst-s3ql"></span><h2>Installing S3QL<a class="headerlink" href="#installing-s3ql" title="Permalink to this headline">¶</a></h2>
+<p>To install S3QL itself, proceed as follows:</p>
+<ol class="arabic simple">
+<li>Download S3QL from <a class="reference external" href="http://code.google.com/p/s3ql/downloads/list">http://code.google.com/p/s3ql/downloads/list</a></li>
+<li>Unpack it into a folder of your choice</li>
+<li>Run <tt class=" docutils literal"><span class="pre">python</span> <span class="pre">setup.py</span> <span class="pre">test</span></tt> to run a self-test. If this fails, ask
+for help on the <a class="reference external" href="http://groups.google.com/group/s3ql">mailing list</a> or report a bug in the
+<a class="reference external" href="http://code.google.com/p/s3ql/issues/list">issue tracker</a>.</li>
+</ol>
+<p>Now you have three options:</p>
+<ul class="simple">
+<li>You can run the S3QL commands from the <tt class=" docutils literal"><span class="pre">bin/</span></tt> directory.</li>
+<li>You can install S3QL system-wide for all users. To do that, you
+have to run <tt class=" docutils literal"><span class="pre">sudo</span> <span class="pre">python</span> <span class="pre">setup.py</span> <span class="pre">install</span></tt>.</li>
+<li>You can install S3QL into <tt class=" docutils literal"><span class="pre">~/.local</span></tt> by executing <tt class=" docutils literal"><span class="pre">python</span>
+<span class="pre">setup.py</span> <span class="pre">install</span> <span class="pre">--user</span></tt>. In this case you should make sure that
+<tt class=" docutils literal"><span class="pre">~/.local/bin</span></tt> is in your <tt class=" docutils literal"><span class="pre">$PATH</span></tt> environment variable.</li>
+</ul>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="backends.html" title="Storage Backends"
+ >next</a></li>
+ <li class="right" >
+ <a href="about.html" title="About S3QL"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/issues.html b/doc/html/issues.html
new file mode 100644
index 0000000..3acafe7
--- /dev/null
+++ b/doc/html/issues.html
@@ -0,0 +1,191 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Known Issues &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Manpages" href="man/index.html" />
+ <link rel="prev" title="Tips &amp; Tricks" href="tips.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="man/index.html" title="Manpages"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="tips.html" title="Tips &amp; Tricks"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="known-issues">
+<h1>Known Issues<a class="headerlink" href="#known-issues" title="Permalink to this headline">¶</a></h1>
+<ul>
+<li><p class="first">S3QL is rather slow when an application tries to write data in
+unreasonably small chunks. If a 1 MB file is copied in chunks of 1
+KB, this will take more than 10 times as long as when it&#8217;s copied
+with the (recommended) chunk size of 128 KB.</p>
+<p>This is a limitation of the FUSE library (which does not yet support
+write caching) which will hopefully be addressed in some future FUSE
+version.</p>
+<p>Most applications, including e.g. GNU <tt class=" docutils literal"><span class="pre">cp</span></tt> and <tt class=" docutils literal"><span class="pre">rsync</span></tt>, use
+reasonably large buffers and are therefore not affected by this
+problem and perform very efficient on S3QL file systems.</p>
+<p>However, if you encounter unexpectedly slow performance with a
+specific program, this might be due to the program using very small
+write buffers. Although this is not really a bug in the program,
+it might be worth to ask the program&#8217;s authors for help.</p>
+</li>
+<li><p class="first">S3QL always updates file and directory access times as if the <tt class="docutils literal"><span class="pre">relatime</span></tt>
+mount option has been specified: the access time (&#8220;atime&#8221;) is only updated
+if it is currently earlier than either the status change time
+(&#8220;ctime&#8221;) or modification time (&#8220;mtime&#8221;).</p>
+</li>
+<li><p class="first">S3QL directories always have an <tt class=" docutils literal"><span class="pre">st_nlink</span></tt> value of 1. This may confuse
+programs that rely on directories having <tt class=" docutils literal"><span class="pre">st_nlink</span></tt> values of <em>(2 +
+number of sub directories)</em>.</p>
+<p>Note that this is not a bug in S3QL. Including sub directories in
+the <tt class=" docutils literal"><span class="pre">st_nlink</span></tt> value is a Unix convention, but by no means a
+requirement. If an application blindly relies on this convention
+being followed, then this is a bug in the application.</p>
+<p>A prominent example are early versions of GNU find, which required
+the <tt class=" docutils literal"><span class="pre">--noleaf</span></tt> option to work correctly on S3QL file systems. This
+bug has already been fixed in recent find versions.</p>
+</li>
+<li><p class="first">In theory, S3QL is not fully compatible with NFS. Since S3QL does
+not support <em>inode generation numbers</em>, NFS clients may (once again,
+in theory) accidentally read or write the wrong file in the
+following situation:</p>
+<ol class="arabic simple">
+<li>An S3QL file system is exported over NFS</li>
+<li>NFS client 1 opens a file A</li>
+<li>Another NFS client 2 (or the server itself) deletes file A (without
+client 1 knowing about this)</li>
+<li>A new file B is created by either of the clients or the server</li>
+<li>NFS client 1 tries to read or write file A (which has actually already been deleted).</li>
+</ol>
+<p>In this situation it is possible that NFS client 1 actually writes
+or reads the newly created file B instead. The chances of this are 1
+to (2^32 - <em>n</em>) where <em>n</em> is the total number of directory entries
+in the S3QL file system (as displayed by <tt class=" docutils literal"><span class="pre">s3qlstat</span></tt>).</p>
+<p>Luckily enough, as long as you have less than about 2 thousand
+million directory entries (2^31), the chances for this are totally
+irrelevant and you don&#8217;t have to worry about it.</p>
+</li>
+<li><p class="first">The <tt class=" docutils literal"><span class="pre">umount</span></tt> and <tt class=" docutils literal"><span class="pre">fusermount</span> <span class="pre">-u</span></tt> commands will <em>not</em> block until all
+data has been uploaded to the backend. (this is a FUSE limitation
+that will hopefully be removed in the future, see <a class="reference external" href="http://code.google.com/p/s3ql/issues/detail?id=159">issue 159</a>). If you use
+either command to unmount an S3QL file system, you have to take care
+to explicitly wait for the <tt class=" docutils literal"><span class="pre">mount.s3ql</span></tt> process to terminate before
+you shut down or restart the system. Therefore it is generally not a
+good idea to mount an S3QL file system in <tt class=" docutils literal"><span class="pre">/etc/fstab</span></tt> (you should
+use a dedicated init script instead).</p>
+</li>
+<li><p class="first">S3QL relies on the backends not to run out of space. This is a given
+for big storage providers like Amazon S3, but you may stumble upon
+this if you store buckets e.g. on a small sftp server.</p>
+<p>If there is no space left in the backend, attempts to write more
+data into the S3QL file system will fail and the file system will be
+in an inconsistent state and require a file system check (and you
+should make sure to make space available in the backend before
+running the check).</p>
+<p>Unfortunately, there is no way to handle insufficient space in the
+backend without leaving the file system inconsistent. Since
+S3QL first writes data into the cache, it can no longer return an
+error when it later turns out that the cache can not be committed to
+the backend.</p>
+</li>
+</ul>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="man/index.html" title="Manpages"
+ >next</a></li>
+ <li class="right" >
+ <a href="tips.html" title="Tips &amp; Tricks"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/adm.html b/doc/html/man/adm.html
new file mode 100644
index 0000000..7345230
--- /dev/null
+++ b/doc/html/man/adm.html
@@ -0,0 +1,239 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The s3qladm command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The mount.s3ql command" href="mount.html" />
+ <link rel="prev" title="The mkfs.s3ql command" href="mkfs.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mount.html" title="The mount.s3ql command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="mkfs.html" title="The mkfs.s3ql command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-s3qladm-command">
+<h1>The <strong class="program">s3qladm</strong> command<a class="headerlink" href="#the-s3qladm-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qladm </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;action&gt;</span><span class="l"> </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>where <tt class="var docutils literal"><span class="pre">action</span></tt> may be either of <strong class="program">passphrase</strong>,
+<strong class="program">upgrade</strong>, <strong class="program">delete</strong> or <strong class="program">download-metadata</strong>.</p>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="program">s3qladm</strong> command performs various operations on S3QL buckets.
+The file system contained in the bucket <em>must not be mounted</em> when
+using <strong class="program">s3qladm</strong> or things will go wrong badly.</p>
+<p>The form of the storage url depends on the backend that is used. The
+following backends are supported:</p>
+<div class="section" id="amazon-s3">
+<h3>Amazon S3<a class="headerlink" href="#amazon-s3" title="Permalink to this headline">¶</a></h3>
+<p>To store your file system in an Amazon S3 bucket, use a storage URL of
+the form <tt class=" docutils literal"><span class="pre">s3://&lt;bucketname&gt;</span></tt>. Bucket names must conform to the S3 Bucket
+Name Restrictions.</p>
+</div>
+<div class="section" id="local">
+<h3>Local<a class="headerlink" href="#local" title="Permalink to this headline">¶</a></h3>
+<p>The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+<tt class=" docutils literal"><span class="pre">local://&lt;path&gt;</span></tt>. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. <tt class=" docutils literal"><span class="pre">local:///var/archive</span></tt>.</p>
+</div>
+<div class="section" id="sftp">
+<h3>SFTP<a class="headerlink" href="#sftp" title="Permalink to this headline">¶</a></h3>
+<p>The storage URL for SFTP connections has the form</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">sftp://</span><span class="nv">&lt;host&gt;</span><span class="ge">[:port]</span><span class="l">/</span><span class="nv">&lt;path&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="program">s3qladm</strong> command accepts the following options.</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to get
+debug messages from all modules. This option can be
+specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication info.
+(default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+<p>Hint: run <tt class=" docutils literal"><span class="pre">s3qladm</span> <span class="pre">&lt;action&gt;</span> <span class="pre">--help</span></tt> to get help on the additional
+arguments that the different actions take.</p>
+</div>
+<div class="section" id="actions">
+<h2>Actions<a class="headerlink" href="#actions" title="Permalink to this headline">¶</a></h2>
+<p>The following actions may be specified:</p>
+<dl class="docutils">
+<dt>passphrase</dt>
+<dd>Changes the encryption passphrase of the bucket.</dd>
+<dt>upgrade</dt>
+<dd>Upgrade the file system contained in the bucket to the newest revision.</dd>
+<dt>delete</dt>
+<dd>Delete the bucket and all its contents.</dd>
+<dt>download-metadata</dt>
+<dd>Interactively download backups of the file system metadata.</dd>
+</dl>
+</div>
+<div class="section" id="files">
+<h2>Files<a class="headerlink" href="#files" title="Permalink to this headline">¶</a></h2>
+<p>Authentication data for backends and bucket encryption passphrases are
+read from <tt class="file docutils literal"><span class="pre">authinfo</span></tt> in <tt class="file docutils literal"><span class="pre">~/.s3ql</span></tt> or the directory
+specified with <tt class="cmdopt docutils literal"><span class="pre">--homedir</span></tt>. Log files are placed in the same
+directory.</p>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="program">s3qladm</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mount.html" title="The mount.s3ql command"
+ >next</a></li>
+ <li class="right" >
+ <a href="mkfs.html" title="The mkfs.s3ql command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/cp.html b/doc/html/man/cp.html
new file mode 100644
index 0000000..bfc9248
--- /dev/null
+++ b/doc/html/man/cp.html
@@ -0,0 +1,225 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The s3qlcp command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The s3qlrm command" href="rm.html" />
+ <link rel="prev" title="The s3qlctrl command" href="ctrl.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="rm.html" title="The s3qlrm command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="ctrl.html" title="The s3qlctrl command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-s3qlcp-command">
+<h1>The <strong class="program">s3qlcp</strong> command<a class="headerlink" href="#the-s3qlcp-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlcp </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;source-dir&gt;</span><span class="l"> </span><span class="nv">&lt;dest-dir&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlcp</strong> command duplicates the directory tree <tt class="var docutils literal"><span class="pre">source-dir</span></tt>
+into <tt class="var docutils literal"><span class="pre">dest-dir</span></tt> without physically copying the file contents.
+Both source and destination must lie inside the same S3QL file system.</p>
+<p>The replication will not take any additional space. Only if one of
+directories is modified later on, the modified data will take
+additional storage space.</p>
+<p><tt class=" docutils literal"><span class="pre">s3qlcp</span></tt> can only be called by the user that mounted the file system
+and (if the file system was mounted with <tt class=" docutils literal"><span class="pre">--allow-other</span></tt> or <tt class=" docutils literal"><span class="pre">--allow-root</span></tt>)
+the root user. This limitation might be removed in the future (see <a class="reference external" href="http://code.google.com/p/s3ql/issues/detail?id=155">issue 155</a>).</p>
+<p>Note that:</p>
+<ul class="simple">
+<li>After the replication, both source and target directory will still
+be completely ordinary directories. You can regard <tt class=" docutils literal"><span class="pre">&lt;src&gt;</span></tt> as a
+snapshot of <tt class=" docutils literal"><span class="pre">&lt;target&gt;</span></tt> or vice versa. However, the most common
+usage of <tt class=" docutils literal"><span class="pre">s3qlcp</span></tt> is to regularly duplicate the same source
+directory, say <tt class=" docutils literal"><span class="pre">documents</span></tt>, to different target directories. For a
+e.g. monthly replication, the target directories would typically be
+named something like <tt class=" docutils literal"><span class="pre">documents_Januray</span></tt> for the replication in
+January, <tt class=" docutils literal"><span class="pre">documents_February</span></tt> for the replication in February etc.
+In this case it is clear that the target directories should be
+regarded as snapshots of the source directory.</li>
+<li>Exactly the same effect could be achieved by an ordinary copy
+program like <tt class=" docutils literal"><span class="pre">cp</span> <span class="pre">-a</span></tt>. However, this procedure would be orders of
+magnitude slower, because <tt class=" docutils literal"><span class="pre">cp</span></tt> would have to read every file
+completely (so that S3QL had to fetch all the data over the network
+from the backend) before writing them into the destination folder.</li>
+<li>Before starting with the replication, S3QL has to flush the local
+cache. So if you just copied lots of new data into the file system
+that has not yet been uploaded, replication will take longer than
+usual.</li>
+</ul>
+<div class="section" id="snapshotting-vs-hardlinking">
+<h3>Snapshotting vs Hardlinking<a class="headerlink" href="#snapshotting-vs-hardlinking" title="Permalink to this headline">¶</a></h3>
+<p>Snapshot support in S3QL is inspired by the hardlinking feature that
+is offered by programs like <a class="reference external" href="http://www.samba.org/rsync">rsync</a> or
+<a class="reference external" href="http://savannah.nongnu.org/projects/storebackup">storeBackup</a>.
+These programs can create a hardlink instead of copying a file if an
+identical file already exists in the backup. However, using hardlinks
+has two large disadvantages:</p>
+<ul class="simple">
+<li>backups and restores always have to be made with a special program
+that takes care of the hardlinking. The backup must not be touched
+by any other programs (they may make changes that inadvertently
+affect other hardlinked files)</li>
+<li>special care needs to be taken to handle files which are already
+hardlinked (the restore program needs to know that the hardlink was
+not just introduced by the backup program to safe space)</li>
+</ul>
+<p>S3QL snapshots do not have these problems, and they can be used with
+any backup program.</p>
+</div>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlcp</strong> command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">s3qlcp</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="rm.html" title="The s3qlrm command"
+ >next</a></li>
+ <li class="right" >
+ <a href="ctrl.html" title="The s3qlctrl command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/ctrl.html b/doc/html/man/ctrl.html
new file mode 100644
index 0000000..b6acd2e
--- /dev/null
+++ b/doc/html/man/ctrl.html
@@ -0,0 +1,208 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The s3qlctrl command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The s3qlcp command" href="cp.html" />
+ <link rel="prev" title="The s3qlstat command" href="stat.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="cp.html" title="The s3qlcp command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="stat.html" title="The s3qlstat command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-s3qlctrl-command">
+<h1>The <strong class="program">s3qlctrl</strong> command<a class="headerlink" href="#the-s3qlctrl-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlctrl </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;action&gt;</span><span class="l"> </span><span class="nv">&lt;mountpoint&gt;</span><span class="l"> ...</span>
+</pre></div>
+</div>
+<p>where <tt class="var docutils literal"><span class="pre">action</span></tt> may be either of <strong class="program">flushcache</strong>,
+<strong class="program">upload-meta</strong>, <strong class="program">cachesize</strong> or
+<strong class="program">log-metadata</strong>.</p>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlctrl</strong> command performs various actions on the S3QL file system mounted
+in <tt class="var docutils literal"><span class="pre">mountpoint</span></tt>.</p>
+<p>The following actions may be specified:</p>
+<dl class="docutils">
+<dt>flushcache</dt>
+<dd>Uploads all changed file data to the backend.</dd>
+<dt>upload-meta</dt>
+<dd>Upload metadata to the backend. All file system operations will
+block while a snapshot of the metadata is prepared for upload.</dd>
+<dt>cachesize</dt>
+<dd><p class="first">Changes the cache size of the file system. This action requires an
+additional argument that specifies the new cache size in kB, so the
+complete command line is:</p>
+<div class="last highlight-commandline"><div class="highlight"><pre><span class="l">s3qlctrl </span><span class="ge">[options]</span><span class="l"> cachesize </span><span class="nv">&lt;mountpoint&gt;</span><span class="l"> </span><span class="nv">&lt;new-cache-size&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</dd>
+<dt>log</dt>
+<dd><p class="first">Change the amount of information that is logged into
+<tt class="file docutils literal"><span class="pre">~/.s3ql/mount.log</span></tt> file. The complete syntax is:</p>
+<div class="highlight-commandline"><pre>s3qlctrl [options] log &lt;mountpoint&gt; &lt;level&gt; [&lt;module&gt; [&lt;module&gt; ...]]</pre>
+</div>
+<p class="last">here <tt class="var docutils literal"><span class="pre">level</span></tt> is the desired new log level and may be either of
+<em>debug</em>, <em>info</em> or <em>warn</em>. One or more <tt class="var docutils literal"><span class="pre">module</span></tt> may only be
+specified with the <em>debug</em> level and allow to restrict the debug
+output to just the listed modules.</p>
+</dd>
+</dl>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlctrl</strong> command also accepts the following options, no matter
+what specific action is being invoked:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+<p>Hint: run <tt class=" docutils literal"><span class="pre">s3qlctrl</span> <span class="pre">&lt;action&gt;</span> <span class="pre">--help</span></tt> to get help on the additional
+arguments that the different actions take.</p>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">s3qlctrl</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="cp.html" title="The s3qlcp command"
+ >next</a></li>
+ <li class="right" >
+ <a href="stat.html" title="The s3qlstat command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/expire_backups.html b/doc/html/man/expire_backups.html
new file mode 100644
index 0000000..e50daa6
--- /dev/null
+++ b/doc/html/man/expire_backups.html
@@ -0,0 +1,242 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The expire_backups command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="Further Resources / Getting Help" href="../resources.html" />
+ <link rel="prev" title="The pcp command" href="pcp.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="../resources.html" title="Further Resources / Getting Help"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="pcp.html" title="The pcp command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-expire-backups-command">
+<h1>The <strong class="program">expire_backups</strong> command<a class="headerlink" href="#the-expire-backups-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">expire_backups </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;age&gt;</span><span class="l"> </span><span class="ge">[&lt;age&gt; ...]</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">expire_backups</strong> command intelligently remove old backups that are no
+longer needed.</p>
+<p>To define what backups you want to keep for how long, you define a
+number of <em>age ranges</em>. <strong class="program">expire_backups</strong> ensures that you
+will have at least one backup in each age range at all times. It will
+keep exactly as many backups as are required for that and delete any
+backups that become redundant.</p>
+<p>Age ranges are specified by giving a list of range boundaries in terms
+of backup cycles. Every time you create a new backup, the existing
+backups age by one cycle.</p>
+<p>Example: when <strong class="program">expire_backups</strong> is called with the age range
+definition <tt class="docutils literal"><span class="pre">1</span> <span class="pre">3</span> <span class="pre">7</span> <span class="pre">14</span> <span class="pre">31</span></tt>, it will guarantee that you always have the
+following backups available:</p>
+<ol class="arabic simple">
+<li>A backup that is 0 to 1 cycles old (i.e, the most recent backup)</li>
+<li>A backup that is 1 to 3 cycles old</li>
+<li>A backup that is 3 to 7 cycles old</li>
+<li>A backup that is 7 to 14 cycles old</li>
+<li>A backup that is 14 to 31 cycles old</li>
+</ol>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">If you do backups in fixed intervals, then one cycle will be
+equivalent to the backup interval. The advantage of specifying the
+age ranges in terms of backup cycles rather than days or weeks is
+that it allows you to gracefully handle irregular backup intervals.
+Imagine that for some reason you do not turn on your computer for
+one month. Now all your backups are at least a month old, and if you
+had specified the above backup strategy in terms of absolute ages,
+they would all be deleted! Specifying age ranges in terms of backup
+cycles avoids these sort of problems.</p>
+</div>
+<p><strong class="program">expire_backups</strong> usage is simple. It requires backups to have
+names of the forms <tt class="docutils literal"><span class="pre">year-month-day_hour:minute:seconds</span></tt>
+(<tt class="docutils literal"><span class="pre">YYYY-MM-DD_HH:mm:ss</span></tt>) and works on all backups in the current
+directory. So for the above backup strategy, the correct invocation
+would be:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">expire_backups.py 1 3 7 14 31</span>
+</pre></div>
+</div>
+<p>When storing your backups on an S3QL file system, you probably want to
+specify the <tt class="docutils literal"><span class="pre">--use-s3qlrm</span></tt> option as well. This tells
+<strong class="program">expire_backups</strong> to use the <a class="reference internal" href="../special.html#s3qlrm"><em>s3qlrm</em></a> command to
+delete directories.</p>
+<p><strong class="program">expire_backups</strong> uses a &#8220;state file&#8221; to keep track which
+backups are how many cycles old (since this cannot be inferred from
+the dates contained in the directory names). The standard name for
+this state file is <tt class="file docutils literal"><span class="pre">.expire_backups.dat</span></tt>. If this file gets
+damaged or deleted, <strong class="program">expire_backups</strong> no longer knows the ages
+of the backups and refuses to work. In this case you can use the
+<tt class="cmdopt docutils literal"><span class="pre">--reconstruct-state</span></tt> option to try to reconstruct the state
+from the backup dates. However, the accuracy of this reconstruction
+depends strongly on how rigorous you have been with making backups (it
+is only completely correct if the time between subsequent backups has
+always been exactly the same), so it&#8217;s generally a good idea not to
+tamper with the state file.</p>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">expire_backups</strong> command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--state <var>&lt;file&gt;</var></span></kbd></td>
+<td>File to save state information in (default:
+&#8221;.expire_backups.dat&#8221;)</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">-n</span></kbd></td>
+<td>Dry run. Just show which backups would be deleted.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--reconstruct-state</span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Try to reconstruct a missing state file from backup
+dates.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--use-s3qlrm</span></kbd></td>
+<td>Use <tt class=" docutils literal"><span class="pre">s3qlrm</span></tt> command to delete backups.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">expire_backups</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">expire_backups</strong> is shipped as part of S3QL, <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="../resources.html" title="Further Resources / Getting Help"
+ >next</a></li>
+ <li class="right" >
+ <a href="pcp.html" title="The pcp command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/fsck.html b/doc/html/man/fsck.html
new file mode 100644
index 0000000..363ea51
--- /dev/null
+++ b/doc/html/man/fsck.html
@@ -0,0 +1,227 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The fsck.s3ql command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The pcp command" href="pcp.html" />
+ <link rel="prev" title="The umount.s3ql command" href="umount.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="pcp.html" title="The pcp command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="umount.html" title="The umount.s3ql command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-fsck-s3ql-command">
+<h1>The <strong class="program">fsck.s3ql</strong> command<a class="headerlink" href="#the-fsck-s3ql-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">fsck.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">mkfs.s3ql</strong> command checks the new file system in the location
+specified by <em>storage url</em> for errors and attempts to repair any
+problems.</p>
+<p>The form of the storage url depends on the backend that is used. The
+following backends are supported:</p>
+<div class="section" id="amazon-s3">
+<h3>Amazon S3<a class="headerlink" href="#amazon-s3" title="Permalink to this headline">¶</a></h3>
+<p>To store your file system in an Amazon S3 bucket, use a storage URL of
+the form <tt class=" docutils literal"><span class="pre">s3://&lt;bucketname&gt;</span></tt>. Bucket names must conform to the S3 Bucket
+Name Restrictions.</p>
+</div>
+<div class="section" id="local">
+<h3>Local<a class="headerlink" href="#local" title="Permalink to this headline">¶</a></h3>
+<p>The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+<tt class=" docutils literal"><span class="pre">local://&lt;path&gt;</span></tt>. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. <tt class=" docutils literal"><span class="pre">local:///var/archive</span></tt>.</p>
+</div>
+<div class="section" id="sftp">
+<h3>SFTP<a class="headerlink" href="#sftp" title="Permalink to this headline">¶</a></h3>
+<p>The storage URL for SFTP connections has the form</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">sftp://</span><span class="nv">&lt;host&gt;</span><span class="ge">[:port]</span><span class="l">/</span><span class="nv">&lt;path&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">mkfs.s3ql</strong> command accepts the following options.</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication info.
+(default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to get
+debug messages from all modules. This option can be
+specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext even
+for unencrypted file systems.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--batch</span></kbd></td>
+<td>If user input is required, exit without prompting.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--force</span></kbd></td>
+<td>Force checking even if file system is marked clean.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="files">
+<h2>Files<a class="headerlink" href="#files" title="Permalink to this headline">¶</a></h2>
+<p>Authentication data for backends and bucket encryption passphrases are
+read from <tt class="file docutils literal"><span class="pre">authinfo</span></tt> in <tt class="file docutils literal"><span class="pre">~/.s3ql</span></tt> or the directory
+specified with <tt class="cmdopt docutils literal"><span class="pre">--homedir</span></tt>. Log files are placed in the same
+directory.</p>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">mkfs.s3ql</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="pcp.html" title="The pcp command"
+ >next</a></li>
+ <li class="right" >
+ <a href="umount.html" title="The umount.s3ql command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/index.html b/doc/html/man/index.html
new file mode 100644
index 0000000..a3141b9
--- /dev/null
+++ b/doc/html/man/index.html
@@ -0,0 +1,148 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Manpages &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="next" title="The mkfs.s3ql command" href="mkfs.html" />
+ <link rel="prev" title="Known Issues" href="../issues.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mkfs.html" title="The mkfs.s3ql command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="../issues.html" title="Known Issues"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Manpages</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="manpages">
+<h1>Manpages<a class="headerlink" href="#manpages" title="Permalink to this headline">¶</a></h1>
+<p>The man pages are installed with S3QL on your system and can be viewed
+with the <strong class="command">man</strong> command. For reference, they are also included
+here in the User&#8217;s Guide.</p>
+<div class="toctree-wrapper compound">
+<ul>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l1"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="mkfs.html" title="The mkfs.s3ql command"
+ >next</a></li>
+ <li class="right" >
+ <a href="../issues.html" title="Known Issues"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/lock.html b/doc/html/man/lock.html
new file mode 100644
index 0000000..acb65da
--- /dev/null
+++ b/doc/html/man/lock.html
@@ -0,0 +1,208 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The s3qllock command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The umount.s3ql command" href="umount.html" />
+ <link rel="prev" title="The s3qlrm command" href="rm.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="umount.html" title="The umount.s3ql command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="rm.html" title="The s3qlrm command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-s3qllock-command">
+<h1>The <strong class="program">s3qllock</strong> command<a class="headerlink" href="#the-s3qllock-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qllock </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;directory&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="program">s3qllock</strong> command makes a directory tree in an S3QL file
+system immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the <strong class="program">s3qlrm</strong> command.</p>
+</div>
+<div class="section" id="rationale">
+<h2>Rationale<a class="headerlink" href="#rationale" title="Permalink to this headline">¶</a></h2>
+<p>Immutability is a feature designed for backups. Traditionally, backups
+have been made on external tape drives. Once a backup was made, the
+tape drive was removed and locked somewhere in a shelf. This has the
+great advantage that the contents of the backup are now permanently
+fixed. Nothing (short of physical destruction) can change or delete
+files in the backup.</p>
+<p>In contrast, when backing up into an online storage system like S3QL,
+all backups are available every time the file system is mounted.
+Nothing prevents a file in an old backup from being changed again
+later on. In the worst case, this may make your entire backup system
+worthless. Imagine that your system gets infected by a nasty virus
+that simply deletes all files it can find &#8211; if the virus is active
+while the backup file system is mounted, the virus will destroy all
+your old backups as well!</p>
+<p>Even if the possibility of a malicious virus or trojan horse is
+excluded, being able to change a backup after it has been made is
+generally not a good idea. A common S3QL use case is to keep the file
+system mounted at all times and periodically create backups with
+<strong class="program">rsync -a</strong>. This allows every user to recover her files from a
+backup without having to call the system administrator. However, this
+also allows every user to accidentally change or delete files <em>in</em> one
+of the old backups.</p>
+<p>Making a backup immutable protects you against all these problems.
+Unless you happen to run into a virus that was specifically programmed
+to attack S3QL file systems, backups can be neither deleted nor
+changed after they have been made immutable.</p>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qllock</strong> command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">s3qllock</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="umount.html" title="The umount.s3ql command"
+ >next</a></li>
+ <li class="right" >
+ <a href="rm.html" title="The s3qlrm command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/mkfs.html b/doc/html/man/mkfs.html
new file mode 100644
index 0000000..8117601
--- /dev/null
+++ b/doc/html/man/mkfs.html
@@ -0,0 +1,239 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The mkfs.s3ql command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The s3qladm command" href="adm.html" />
+ <link rel="prev" title="Manpages" href="index.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="adm.html" title="The s3qladm command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="index.html" title="Manpages"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-mkfs-s3ql-command">
+<h1>The <strong class="program">mkfs.s3ql</strong> command<a class="headerlink" href="#the-mkfs-s3ql-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">mkfs.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">mkfs.s3ql</strong> command creates a new file system in the location
+specified by <em>storage url</em>.</p>
+<p>The form of the storage url depends on the backend that is used. The
+following backends are supported:</p>
+<div class="section" id="amazon-s3">
+<h3>Amazon S3<a class="headerlink" href="#amazon-s3" title="Permalink to this headline">¶</a></h3>
+<p>To store your file system in an Amazon S3 bucket, use a storage URL of
+the form <tt class=" docutils literal"><span class="pre">s3://&lt;bucketname&gt;</span></tt>. Bucket names must conform to the S3 Bucket
+Name Restrictions.</p>
+</div>
+<div class="section" id="local">
+<h3>Local<a class="headerlink" href="#local" title="Permalink to this headline">¶</a></h3>
+<p>The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+<tt class=" docutils literal"><span class="pre">local://&lt;path&gt;</span></tt>. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. <tt class=" docutils literal"><span class="pre">local:///var/archive</span></tt>.</p>
+</div>
+<div class="section" id="sftp">
+<h3>SFTP<a class="headerlink" href="#sftp" title="Permalink to this headline">¶</a></h3>
+<p>The storage URL for SFTP connections has the form</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">sftp://</span><span class="nv">&lt;host&gt;</span><span class="ge">[:port]</span><span class="l">/</span><span class="nv">&lt;path&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">mkfs.s3ql</strong> command accepts the following options.</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication
+info. (default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to
+get debug messages from all modules. This option can
+be specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--s3-location <var>&lt;name&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Storage location for new S3 buckets. Allowed values:
+<tt class=" docutils literal"><span class="pre">EU</span></tt>, <tt class=" docutils literal"><span class="pre">us-west-1</span></tt>, <tt class=" docutils literal"><span class="pre">ap-southeast-1</span></tt>, or <tt class=" docutils literal"><span class="pre">us-standard</span></tt>.
+(default: EU)</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">-L <var>&lt;name&gt;</var></span></kbd></td>
+<td>Filesystem label</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--blocksize <var>&lt;size&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Maximum block size in KB (default: 10240)</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--plain</span></kbd></td>
+<td>Create unencrypted file system.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--force</span></kbd></td>
+<td>Overwrite any existing data.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="files">
+<h2>Files<a class="headerlink" href="#files" title="Permalink to this headline">¶</a></h2>
+<p>Authentication data for backends and bucket encryption passphrases are
+read from <tt class="file docutils literal"><span class="pre">authinfo</span></tt> in <tt class="file docutils literal"><span class="pre">~/.s3ql</span></tt> or the directory
+specified with <tt class="cmdopt docutils literal"><span class="pre">--homedir</span></tt>. Log files are placed in the same
+directory.</p>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">mkfs.s3ql</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="adm.html" title="The s3qladm command"
+ >next</a></li>
+ <li class="right" >
+ <a href="index.html" title="Manpages"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/mount.html b/doc/html/man/mount.html
new file mode 100644
index 0000000..5f91bc6
--- /dev/null
+++ b/doc/html/man/mount.html
@@ -0,0 +1,281 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The mount.s3ql command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The s3qlstat command" href="stat.html" />
+ <link rel="prev" title="The s3qladm command" href="adm.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="stat.html" title="The s3qlstat command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="adm.html" title="The s3qladm command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-mount-s3ql-command">
+<h1>The <strong class="program">mount.s3ql</strong> command<a class="headerlink" href="#the-mount-s3ql-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">mount.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;storage url&gt;</span><span class="l"> </span><span class="nv">&lt;mount point&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">mount.s3ql</strong> command mounts the S3QL file system stored in <em>storage
+url</em> in the directory <em>mount point</em>.</p>
+<p>The form of the storage url depends on the backend that is used. The
+following backends are supported:</p>
+<div class="section" id="amazon-s3">
+<h3>Amazon S3<a class="headerlink" href="#amazon-s3" title="Permalink to this headline">¶</a></h3>
+<p>To store your file system in an Amazon S3 bucket, use a storage URL of
+the form <tt class=" docutils literal"><span class="pre">s3://&lt;bucketname&gt;</span></tt>. Bucket names must conform to the S3 Bucket
+Name Restrictions.</p>
+</div>
+<div class="section" id="local">
+<h3>Local<a class="headerlink" href="#local" title="Permalink to this headline">¶</a></h3>
+<p>The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+<tt class=" docutils literal"><span class="pre">local://&lt;path&gt;</span></tt>. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. <tt class=" docutils literal"><span class="pre">local:///var/archive</span></tt>.</p>
+</div>
+<div class="section" id="sftp">
+<h3>SFTP<a class="headerlink" href="#sftp" title="Permalink to this headline">¶</a></h3>
+<p>The storage URL for SFTP connections has the form</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">sftp://</span><span class="nv">&lt;host&gt;</span><span class="ge">[:port]</span><span class="l">/</span><span class="nv">&lt;path&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">mount.s3ql</strong> command accepts the following options.</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication
+info. (default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to
+get debug messages from all modules. This option can
+be specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--cachesize <var>&lt;size&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Cache size in kb (default: 102400 (100 MB)). Should be
+at least 10 times the blocksize of the filesystem,
+otherwise an object may be retrieved and written
+several times during a single write() or read()
+operation.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--max-cache-entries <var>&lt;num&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Maximum number of entries in cache (default: 768).
+Each cache entry requires one file descriptor, so if
+you increase this number you have to make sure that
+your process file descriptor limit (as set with
+<tt class=" docutils literal"><span class="pre">ulimit</span> <span class="pre">-n</span></tt>) is high enough (at least the number of
+cache entries + 100).</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--allow-other</span></kbd></td>
+<td>Normally, only the user who called <tt class=" docutils literal"><span class="pre">mount.s3ql</span></tt> can
+access the mount point. This user then also has full
+access to it, independent of individual file
+permissions. If the <tt class=" docutils literal"><span class="pre">--allow-other</span></tt> option is
+specified, other users can access the mount point as
+well and individual file permissions are taken into
+account for all users.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--allow-root</span></kbd></td>
+<td>Like <tt class=" docutils literal"><span class="pre">--allow-other</span></tt>, but restrict access to the
+mounting user and the root user.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--fg</span></kbd></td>
+<td>Do not daemonize, stay in foreground</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--single</span></kbd></td>
+<td>Run in single threaded mode. If you don&#8217;t understand
+this, then you don&#8217;t need it.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--upstart</span></kbd></td>
+<td>Stay in foreground and raise SIGSTOP once mountpoint
+is up.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--profile</span></kbd></td>
+<td>Create profiling information. If you don&#8217;t understand
+this, then you don&#8217;t need it.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--compress <var>&lt;name&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Compression algorithm to use when storing new data.
+Allowed values: <tt class=" docutils literal"><span class="pre">lzma</span></tt>, <tt class=" docutils literal"><span class="pre">bzip2</span></tt>, <tt class=" docutils literal"><span class="pre">zlib</span></tt>, none.
+(default: <tt class=" docutils literal"><span class="pre">lzma</span></tt>)</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--metadata-upload-interval <var>&lt;seconds&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Interval in seconds between complete metadata uploads.
+Set to 0 to disable. Default: 24h.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--compression-threads <var>&lt;no&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Number of parallel compression and encryption threads
+to use (default: 1).</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="files">
+<h2>Files<a class="headerlink" href="#files" title="Permalink to this headline">¶</a></h2>
+<p>Authentication data for backends and bucket encryption passphrases are
+read from <tt class="file docutils literal"><span class="pre">authinfo</span></tt> in <tt class="file docutils literal"><span class="pre">~/.s3ql</span></tt> or the directory
+specified with <tt class="cmdopt docutils literal"><span class="pre">--homedir</span></tt>. Log files are placed in the same
+directory.</p>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">mount.s3ql</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="stat.html" title="The s3qlstat command"
+ >next</a></li>
+ <li class="right" >
+ <a href="adm.html" title="The s3qladm command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/pcp.html b/doc/html/man/pcp.html
new file mode 100644
index 0000000..4dff9aa
--- /dev/null
+++ b/doc/html/man/pcp.html
@@ -0,0 +1,182 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The pcp command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The expire_backups command" href="expire_backups.html" />
+ <link rel="prev" title="The fsck.s3ql command" href="fsck.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="expire_backups.html" title="The expire_backups command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="fsck.html" title="The fsck.s3ql command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-pcp-command">
+<h1>The <strong class="program">pcp</strong> command<a class="headerlink" href="#the-pcp-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">pcp </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;source&gt;</span><span class="l"> </span><span class="ge">[&lt;source&gt; ...]</span><span class="l"> </span><span class="nv">&lt;destination&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">pcp</strong> command is a is a wrapper that starts several
+<strong class="program">sync</strong> processes to copy directory trees in parallel. This is
+allows much better copying performance on file system that have
+relatively high latency when retrieving individual files like S3QL.</p>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">pcp</strong> command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">-a</span></kbd></td>
+<td>Pass -aHAX option to rsync.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--processes <var>&lt;no&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Number of rsync processes to use (default: 10).</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">pcp</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">pcp</strong> is shipped as part of S3QL, <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="expire_backups.html" title="The expire_backups command"
+ >next</a></li>
+ <li class="right" >
+ <a href="fsck.html" title="The fsck.s3ql command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/rm.html b/doc/html/man/rm.html
new file mode 100644
index 0000000..2ab13b1
--- /dev/null
+++ b/doc/html/man/rm.html
@@ -0,0 +1,181 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The s3qlrm command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The s3qllock command" href="lock.html" />
+ <link rel="prev" title="The s3qlcp command" href="cp.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="lock.html" title="The s3qllock command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="cp.html" title="The s3qlcp command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-s3qlrm-command">
+<h1>The <strong class="program">s3qlrm</strong> command<a class="headerlink" href="#the-s3qlrm-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlrm </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;directory&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlrm</strong> command recursively deletes files and directories on an
+S3QL file system. Although <strong class="command">s3qlrm</strong> is faster than using e.g.
+<strong class="command">rm -r`</strong>, the main reason for its existence is that it allows
+you to delete immutable trees (which can be created with
+<strong class="program">s3qllock</strong>) as well.</p>
+<p>Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.</p>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlrm</strong> command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">s3qlrm</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="lock.html" title="The s3qllock command"
+ >next</a></li>
+ <li class="right" >
+ <a href="cp.html" title="The s3qlcp command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/stat.html b/doc/html/man/stat.html
new file mode 100644
index 0000000..9182687
--- /dev/null
+++ b/doc/html/man/stat.html
@@ -0,0 +1,180 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The s3qlstat command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The s3qlctrl command" href="ctrl.html" />
+ <link rel="prev" title="The mount.s3ql command" href="mount.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="ctrl.html" title="The s3qlctrl command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="mount.html" title="The mount.s3ql command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="umount.html">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-s3qlstat-command">
+<h1>The <strong class="program">s3qlstat</strong> command<a class="headerlink" href="#the-s3qlstat-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlstat </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;mountpoint&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlstat</strong> command prints statistics about the S3QL file system mounted
+at <tt class="var docutils literal"><span class="pre">mountpoint</span></tt>.</p>
+<p><strong class="command">s3qlstat</strong> can only be called by the user that mounted the file system
+and (if the file system was mounted with <tt class="cmdopt docutils literal"><span class="pre">--allow-other</span></tt> or
+<tt class="cmdopt docutils literal"><span class="pre">--allow-root</span></tt>) the root user. This limitation might be
+removed in the future (see <a class="reference external" href="http://code.google.com/p/s3ql/issues/detail?id=155">issue 155</a>).</p>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">s3qlstat</strong> command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">s3qlstat</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="ctrl.html" title="The s3qlctrl command"
+ >next</a></li>
+ <li class="right" >
+ <a href="mount.html" title="The mount.s3ql command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/man/umount.html b/doc/html/man/umount.html
new file mode 100644
index 0000000..30039fc
--- /dev/null
+++ b/doc/html/man/umount.html
@@ -0,0 +1,190 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>The umount.s3ql command &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '../',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="../_static/jquery.js"></script>
+ <script type="text/javascript" src="../_static/underscore.js"></script>
+ <script type="text/javascript" src="../_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="../about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="../index.html" />
+ <link rel="up" title="Manpages" href="index.html" />
+ <link rel="next" title="The fsck.s3ql command" href="fsck.html" />
+ <link rel="prev" title="The s3qllock command" href="lock.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="fsck.html" title="The fsck.s3ql command"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="lock.html" title="The s3qllock command"
+ accesskey="P">previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="../index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="../about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="../issues.html">Known Issues</a></li>
+<li class="toctree-l1 current"><a class="reference internal" href="index.html">Manpages</a><ul class="current">
+<li class="toctree-l2"><a class="reference internal" href="mkfs.html">The <strong class="program">mkfs.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="adm.html">The <strong class="program">s3qladm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="mount.html">The <strong class="program">mount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="stat.html">The <strong class="program">s3qlstat</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="ctrl.html">The <strong class="program">s3qlctrl</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="cp.html">The <strong class="program">s3qlcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="rm.html">The <strong class="program">s3qlrm</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="lock.html">The <strong class="program">s3qllock</strong> command</a></li>
+<li class="toctree-l2 current"><a class="current reference internal" href="">The <strong class="program">umount.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="fsck.html">The <strong class="program">fsck.s3ql</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="pcp.html">The <strong class="program">pcp</strong> command</a></li>
+<li class="toctree-l2"><a class="reference internal" href="expire_backups.html">The <strong class="program">expire_backups</strong> command</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="../resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="../search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="the-umount-s3ql-command">
+<h1>The <strong class="program">umount.s3ql</strong> command<a class="headerlink" href="#the-umount-s3ql-command" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="synopsis">
+<h2>Synopsis<a class="headerlink" href="#synopsis" title="Permalink to this headline">¶</a></h2>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">umount.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;mount point&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+</div>
+<div class="section" id="description">
+<h2>Description<a class="headerlink" href="#description" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">umount.s3ql</strong> command unmounts the S3QL file system mounted in the
+directory <em>mount point</em> and blocks until all data has been uploaded to
+the storage backend.</p>
+<p>Only the user who mounted the file system with <strong class="program">mount.s3ql</strong>
+is able to unmount it with <strong class="command">umount.s3ql</strong>. If you are root and want to
+unmount an S3QL file system mounted by an ordinary user, you have to
+use the <strong class="program">fusermount -u</strong> or <strong class="command">umount</strong> command instead.
+Note that these commands do not block until all data has been
+uploaded, so if you use them instead of <strong class="program">umount.s3ql</strong> then
+you should manually wait for the <strong class="program">mount.s3ql</strong> process to
+terminate before shutting down the system.</p>
+</div>
+<div class="section" id="options">
+<h2>Options<a class="headerlink" href="#options" title="Permalink to this headline">¶</a></h2>
+<p>The <strong class="command">umount.s3ql</strong> command accepts the following options.</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--lazy</span>, <span class="option">-z</span></kbd></td>
+<td>Lazy umount. Detaches the file system immediately, even if
+there are still open files. The data will be uploaded in the
+background once all open files have been closed.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+<div class="section" id="exit-status">
+<h2>Exit Status<a class="headerlink" href="#exit-status" title="Permalink to this headline">¶</a></h2>
+<p><strong class="command">umount.s3ql</strong> returns exit code 0 if the operation succeeded and 1 if some
+error occured.</p>
+</div>
+<div class="section" id="see-also">
+<h2>See Also<a class="headerlink" href="#see-also" title="Permalink to this headline">¶</a></h2>
+<p>The S3QL homepage is at <a class="reference external" href="http://code.google.com/p/s3ql/">http://code.google.com/p/s3ql/</a>.</p>
+<p>The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are <tt class="file docutils literal"><span class="pre">/usr/share/doc/s3ql</span></tt> or
+<tt class="file docutils literal"><span class="pre">/usr/local/doc/s3ql</span></tt>.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="fsck.html" title="The fsck.s3ql command"
+ >next</a></li>
+ <li class="right" >
+ <a href="lock.html" title="The s3qllock command"
+ >previous</a> |</li>
+ <li><a href="../index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ <li><a href="index.html" >Manpages</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/mkfs.html b/doc/html/mkfs.html
new file mode 100644
index 0000000..4acbe34
--- /dev/null
+++ b/doc/html/mkfs.html
@@ -0,0 +1,176 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>File System Creation &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Managing Buckets" href="adm.html" />
+ <link rel="prev" title="Storage Backends" href="backends.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="adm.html" title="Managing Buckets"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="backends.html" title="Storage Backends"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="file-system-creation">
+<h1>File System Creation<a class="headerlink" href="#file-system-creation" title="Permalink to this headline">¶</a></h1>
+<p>A S3QL file system is created with the <tt class=" docutils literal"><span class="pre">mkfs.s3ql</span></tt> command. It has the
+following syntax:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">mkfs.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;storage url&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>This command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication
+info. (default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to
+get debug messages from all modules. This option can
+be specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--s3-location <var>&lt;name&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Storage location for new S3 buckets. Allowed values:
+<tt class=" docutils literal"><span class="pre">EU</span></tt>, <tt class=" docutils literal"><span class="pre">us-west-1</span></tt>, <tt class=" docutils literal"><span class="pre">ap-southeast-1</span></tt>, or <tt class=" docutils literal"><span class="pre">us-standard</span></tt>.
+(default: EU)</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">-L <var>&lt;name&gt;</var></span></kbd></td>
+<td>Filesystem label</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--blocksize <var>&lt;size&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Maximum block size in KB (default: 10240)</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--plain</span></kbd></td>
+<td>Create unencrypted file system.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--force</span></kbd></td>
+<td>Overwrite any existing data.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+<p>Unless you have specified the <tt class=" docutils literal"><span class="pre">--plain</span></tt> option, <tt class=" docutils literal"><span class="pre">mkfs.s3ql</span></tt> will ask you
+to enter an encryption password. If you do not want to enter this
+password every time that you mount the file system, you can store it
+in the <tt class=" docutils literal"><span class="pre">~/.s3ql/authinfo</span></tt> file, see <a class="reference internal" href="mount.html#bucket-pw"><em>Storing Encryption Passwords</em></a>.</p>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="adm.html" title="Managing Buckets"
+ >next</a></li>
+ <li class="right" >
+ <a href="backends.html" title="Storage Backends"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/mount.html b/doc/html/mount.html
new file mode 100644
index 0000000..f8de8a4
--- /dev/null
+++ b/doc/html/mount.html
@@ -0,0 +1,407 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Mounting &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Advanced S3QL Features" href="special.html" />
+ <link rel="prev" title="Managing Buckets" href="adm.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="special.html" title="Advanced S3QL Features"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="adm.html" title="Managing Buckets"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Mounting</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#storing-encryption-passwords">Storing Encryption Passwords</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#compression-algorithms">Compression Algorithms</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#parallel-compression">Parallel Compression</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#notes-about-caching">Notes about Caching</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#automatic-mounting">Automatic Mounting</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="mounting">
+<h1>Mounting<a class="headerlink" href="#mounting" title="Permalink to this headline">¶</a></h1>
+<p>A S3QL file system is mounted with the <tt class=" docutils literal"><span class="pre">mount.s3ql</span></tt> command. It has
+the following syntax:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">mount.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;storage url&gt;</span><span class="l"> </span><span class="nv">&lt;mountpoint&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">S3QL is not a network file system like <a class="reference external" href="http://en.wikipedia.org/wiki/Network_File_System_%28protocol%29">NFS</a>
+or <a class="reference external" href="http://en.wikipedia.org/wiki/CIFS">CIFS</a>. It can only be
+mounted on one computer at a time.</p>
+</div>
+<p>This command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--homedir <var>&lt;path&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Directory for log files, cache and authentication
+info. (default: <tt class=" docutils literal"><span class="pre">~/.s3ql)</span></tt></td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--debug <var>&lt;module&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>activate debugging output from &lt;module&gt;. Use <tt class=" docutils literal"><span class="pre">all</span></tt> to
+get debug messages from all modules. This option can
+be specified multiple times.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--ssl</span></kbd></td>
+<td>Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--cachesize <var>&lt;size&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Cache size in kb (default: 102400 (100 MB)). Should be
+at least 10 times the blocksize of the filesystem,
+otherwise an object may be retrieved and written
+several times during a single write() or read()
+operation.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--max-cache-entries <var>&lt;num&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Maximum number of entries in cache (default: 768).
+Each cache entry requires one file descriptor, so if
+you increase this number you have to make sure that
+your process file descriptor limit (as set with
+<tt class=" docutils literal"><span class="pre">ulimit</span> <span class="pre">-n</span></tt>) is high enough (at least the number of
+cache entries + 100).</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--allow-other</span></kbd></td>
+<td>Normally, only the user who called <tt class=" docutils literal"><span class="pre">mount.s3ql</span></tt> can
+access the mount point. This user then also has full
+access to it, independent of individual file
+permissions. If the <tt class=" docutils literal"><span class="pre">--allow-other</span></tt> option is
+specified, other users can access the mount point as
+well and individual file permissions are taken into
+account for all users.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--allow-root</span></kbd></td>
+<td>Like <tt class=" docutils literal"><span class="pre">--allow-other</span></tt>, but restrict access to the
+mounting user and the root user.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--fg</span></kbd></td>
+<td>Do not daemonize, stay in foreground</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--single</span></kbd></td>
+<td>Run in single threaded mode. If you don&#8217;t understand
+this, then you don&#8217;t need it.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--upstart</span></kbd></td>
+<td>Stay in foreground and raise SIGSTOP once mountpoint
+is up.</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--profile</span></kbd></td>
+<td>Create profiling information. If you don&#8217;t understand
+this, then you don&#8217;t need it.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--compress <var>&lt;name&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Compression algorithm to use when storing new data.
+Allowed values: <tt class=" docutils literal"><span class="pre">lzma</span></tt>, <tt class=" docutils literal"><span class="pre">bzip2</span></tt>, <tt class=" docutils literal"><span class="pre">zlib</span></tt>, none.
+(default: <tt class=" docutils literal"><span class="pre">lzma</span></tt>)</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--metadata-upload-interval <var>&lt;seconds&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Interval in seconds between complete metadata uploads.
+Set to 0 to disable. Default: 24h.</td></tr>
+<tr><td class="option-group" colspan="2">
+<kbd><span class="option">--compression-threads <var>&lt;no&gt;</var></span></kbd></td>
+</tr>
+<tr><td>&nbsp;</td><td>Number of parallel compression and encryption threads
+to use (default: 1).</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+<div class="section" id="storing-encryption-passwords">
+<span id="bucket-pw"></span><h2>Storing Encryption Passwords<a class="headerlink" href="#storing-encryption-passwords" title="Permalink to this headline">¶</a></h2>
+<p>If you are trying to mount an encrypted bucket, <tt class=" docutils literal"><span class="pre">mount.s3ql</span></tt> will first
+try to read the password from the <tt class=" docutils literal"><span class="pre">.s3ql/authinfo</span></tt> file (the same file
+that is used to read the backend authentication data) and prompt the
+user to enter the password only if this fails.</p>
+<p>The <tt class=" docutils literal"><span class="pre">authinfo</span></tt> entries to specify bucket passwords are of the form</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">storage-url </span><span class="nv">&lt;storage-url&gt;</span><span class="l"> password </span><span class="nv">&lt;password&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>So to always use the password <tt class=" docutils literal"><span class="pre">topsecret</span></tt> when mounting <tt class=" docutils literal"><span class="pre">s3://joes_bucket</span></tt>,
+the entry would be</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">storage-url s3://joes_bucket password topsecret</span>
+</pre></div>
+</div>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">If you are using the local backend, the storage url will
+always be converted to an absolute path. So if you are in the
+<tt class=" docutils literal"><span class="pre">/home/john</span></tt> directory and try to mount <tt class=" docutils literal"><span class="pre">local://bucket</span></tt>, the matching
+<tt class=" docutils literal"><span class="pre">authinfo</span></tt> entry has to have a storage url of
+<tt class=" docutils literal"><span class="pre">local:///home/john/bucket</span></tt>.</p>
+</div>
+</div>
+<div class="section" id="compression-algorithms">
+<h2>Compression Algorithms<a class="headerlink" href="#compression-algorithms" title="Permalink to this headline">¶</a></h2>
+<p>S3QL supports three compression algorithms, LZMA, Bzip2 and zlib (with
+LZMA being the default). The compression algorithm can be specified
+freely whenever the file system is mounted, since it affects only the
+compression of new data blocks.</p>
+<p>Roughly speaking, LZMA is slower but achieves better compression
+ratios than Bzip2, while Bzip2 in turn is slower but achieves better
+compression ratios than zlib.</p>
+<p>For maximum file system performance, the best algorithm therefore
+depends on your network connection speed: the compression algorithm
+should be fast enough to saturate your network connection.</p>
+<p>To find the optimal algorithm for your system, S3QL ships with a
+program called <tt class=" docutils literal"><span class="pre">benchmark.py</span></tt> in the <tt class=" docutils literal"><span class="pre">contrib</span></tt> directory. You should
+run this program on a file that has a size that is roughly equal to
+the block size of your file system and has similar contents. It will
+then determine the compression speeds for the different algorithms and
+the upload speeds for the specified backend and recommend the best
+algorithm that is fast enough to saturate your network connection.</p>
+<p>Obviously you should make sure that there is little other system load
+when you run <tt class=" docutils literal"><span class="pre">benchmark.py</span></tt> (i.e., don&#8217;t compile software or encode
+videos at the same time).</p>
+</div>
+<div class="section" id="parallel-compression">
+<h2>Parallel Compression<a class="headerlink" href="#parallel-compression" title="Permalink to this headline">¶</a></h2>
+<p>If you are running S3QL on a system with multiple cores, you might
+want to set <tt class="docutils literal"><span class="pre">--compression-threads</span></tt> to a value bigger than one. This
+will instruct S3QL to compress and encrypt several blocks at the same
+time.</p>
+<p>If you want to do this in combination with using the LZMA compression
+algorithm, you should keep an eye on memory usage though. Every
+LZMA compression threads requires about 200 MB of RAM.</p>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p class="last">To determine the optimal compression algorithm for your network
+connection when using multiple threads, you can pass the
+<tt class="docutils literal"><span class="pre">--compression-threads</span></tt> option to <tt class=" docutils literal"><span class="pre">contrib/benchmark.py</span></tt>.</p>
+</div>
+</div>
+<div class="section" id="notes-about-caching">
+<h2>Notes about Caching<a class="headerlink" href="#notes-about-caching" title="Permalink to this headline">¶</a></h2>
+<p>S3QL maintains a local cache of the file system data to speed up
+access. The cache is block based, so it is possible that only parts of
+a file are in the cache.</p>
+<div class="section" id="maximum-number-of-cache-entries">
+<h3>Maximum Number of Cache Entries<a class="headerlink" href="#maximum-number-of-cache-entries" title="Permalink to this headline">¶</a></h3>
+<p>The maximum size of the cache can be configured with the <tt class=" docutils literal"><span class="pre">--cachesize</span></tt>
+option. In addition to that, the maximum number of objects in the
+cache is limited by the <tt class=" docutils literal"><span class="pre">--max-cache-entries</span></tt> option, so it is
+possible that the cache does not grow up to the maximum cache size
+because the maximum number of cache elements has been reached. The
+reason for this limit is that each cache entry requires one open
+file descriptor, and Linux distributions usually limit the total
+number of file descriptors per process to about a thousand.</p>
+<p>If you specify a value for <tt class=" docutils literal"><span class="pre">--max-cache-entries</span></tt>, you should therefore
+make sure to also configure your system to increase the maximum number
+of open file handles. This can be done temporarily with the <tt class=" docutils literal"><span class="pre">umask</span> <span class="pre">-n</span></tt>
+command. The method to permanently change this limit system-wide
+depends on your distribution.</p>
+</div>
+<div class="section" id="cache-flushing-and-expiration">
+<h3>Cache Flushing and Expiration<a class="headerlink" href="#cache-flushing-and-expiration" title="Permalink to this headline">¶</a></h3>
+<p>S3QL flushes changed blocks in the cache to the backend whenever a block
+has not been accessed for at least 10 seconds. Note that when a block is
+flushed, it still remains in the cache.</p>
+<p>Cache expiration (i.e., removal of blocks from the cache) is only done
+when the maximum cache size is reached. S3QL always expires the least
+recently used blocks first.</p>
+</div>
+</div>
+<div class="section" id="automatic-mounting">
+<h2>Automatic Mounting<a class="headerlink" href="#automatic-mounting" title="Permalink to this headline">¶</a></h2>
+<p>If you want to mount and umount an S3QL file system automatically at
+system startup and shutdown, you should do so with one dedicated S3QL
+init script for each S3QL file system.</p>
+<p>If your system is using upstart, an appropriate job can be defined
+as follows (and should be placed in <tt class=" docutils literal"><span class="pre">/etc/init/</span></tt>):</p>
+<div class="highlight-commandline"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre> 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29</pre></div></td><td class="code"><div class="highlight"><pre><span class="l">description &quot;S3QL Backup File System&quot;</span>
+<span class="l">author &quot;Nikolaus Rath </span><span class="nv">&lt;Nikolaus@rath.org&gt;</span><span class="l">&quot;</span>
+
+<span class="l"># This assumes that eth0 provides your internet connection</span>
+<span class="l">start on (filesystem and net-device-up IFACE=eth0)</span>
+<span class="l">stop on runlevel </span><span class="ge">[016]</span><span class="l"></span>
+
+<span class="l">env BUCKET=&quot;s3://my-backup-bla&quot;</span>
+<span class="l">env MOUNTPOINT=&quot;/mnt/backup&quot;</span>
+
+<span class="l">expect stop</span>
+
+<span class="l">script</span>
+<span class="l"> # Redirect stdout and stderr into the system log</span>
+<span class="l"> DIR=$(mktemp -d)</span>
+<span class="l"> mkfifo &quot;$DIR/LOG_FIFO&quot;</span>
+<span class="l"> logger -t s3ql -p local0.info </span><span class="nv">&lt; &quot;$DIR/LOG_FIFO&quot; &amp;</span>
+<span class="nv"> exec &gt;</span><span class="l"> &quot;$DIR/LOG_FIFO&quot;</span>
+<span class="l"> exec 2&gt;&amp;1</span>
+<span class="l"> rm -rf &quot;$DIR&quot;</span>
+
+<span class="l"> # Check and mount file system</span>
+<span class="l"> fsck.s3ql --batch &quot;$BUCKET&quot;</span>
+<span class="l"> exec mount.s3ql --upstart &quot;$BUCKET&quot; &quot;$MOUNTPOINT&quot;</span>
+<span class="l">end script</span>
+
+<span class="l">pre-stop script</span>
+<span class="l"> umount.s3ql &quot;$MOUNTPOINT&quot;</span>
+<span class="l">end script</span>
+</pre></div>
+</td></tr></table></div>
+<div class="admonition note">
+<p class="first admonition-title">Note</p>
+<p>In principle, it is also possible to automatically mount an S3QL
+file system with an appropriate entry in <tt class=" docutils literal"><span class="pre">/etc/fstab</span></tt>. However,
+this is not recommended for several reasons:</p>
+<ul class="last simple">
+<li>file systems mounted in <tt class=" docutils literal"><span class="pre">/etc/fstab</span></tt> will be unmounted with the
+<tt class=" docutils literal"><span class="pre">umount</span></tt> command, so your system will not wait until all data has
+been uploaded but shutdown (or restart) immediately (this is a
+FUSE limitation, see <a class="reference external" href="http://code.google.com/p/s3ql/issues/detail?id=159">issue 159</a>).</li>
+<li>There is no way to tell the system that mounting S3QL requires a
+Python interpreter to be available, so it may attempt to run
+<tt class=" docutils literal"><span class="pre">mount.s3ql</span></tt> before it has mounted the volume containing the
+Python interpreter.</li>
+<li>There is no standard way to tell the system that internet
+connection has to be up before the S3QL file system can be
+mounted.</li>
+</ul>
+</div>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="special.html" title="Advanced S3QL Features"
+ >next</a></li>
+ <li class="right" >
+ <a href="adm.html" title="Managing Buckets"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/objects.inv b/doc/html/objects.inv
new file mode 100644
index 0000000..be510e2
--- /dev/null
+++ b/doc/html/objects.inv
Binary files differ
diff --git a/doc/html/resources.html b/doc/html/resources.html
new file mode 100644
index 0000000..37e568d
--- /dev/null
+++ b/doc/html/resources.html
@@ -0,0 +1,118 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Further Resources / Getting Help &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="prev" title="The expire_backups command" href="man/expire_backups.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="man/expire_backups.html" title="The expire_backups command"
+ accesskey="P">previous</a></li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="further-resources-getting-help">
+<span id="resources"></span><h1>Further Resources / Getting Help<a class="headerlink" href="#further-resources-getting-help" title="Permalink to this headline">¶</a></h1>
+<p>If you have questions or problems with S3QL that you weren&#8217;t able to
+resolve with this manual, you might want to consider the following other resources:</p>
+<ul class="simple">
+<li>The <a class="reference external" href="http://code.google.com/p/s3ql/w/list">S3QL Wiki</a></li>
+<li>The <a class="reference external" href="http://code.google.com/p/s3ql/wiki/FAQ">S3QL FAQ</a></li>
+<li>The <a class="reference external" href="http://groups.google.com/group/s3ql">S3QL Mailing List</a>. You
+can subscribe by sending a mail to
+<a class="reference external" href="mailto:s3ql+subscribe&#37;&#52;&#48;googlegroups&#46;com">s3ql+subscribe<span>&#64;</span>googlegroups<span>&#46;</span>com</a>.</li>
+</ul>
+<p>Please report any bugs you may encounter in the <a class="reference external" href="http://code.google.com/p/s3ql/issues/list">Issue Tracker</a>.</p>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="man/expire_backups.html" title="The expire_backups command"
+ >previous</a></li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/search.html b/doc/html/search.html
new file mode 100644
index 0000000..30b0686
--- /dev/null
+++ b/doc/html/search.html
@@ -0,0 +1,113 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Search &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <script type="text/javascript" src="_static/searchtools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <script type="text/javascript">
+ jQuery(function() { Search.loadIndex("searchindex.js"); });
+ </script>
+
+
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul>
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <h1 id="search-documentation">Search</h1>
+ <div id="fallback" class="admonition warning">
+ <script type="text/javascript">$('#fallback').hide();</script>
+ <p>
+ Please activate JavaScript to enable the search
+ functionality.
+ </p>
+ </div>
+ <p>
+ From here you can search these documents. Enter your search
+ words into the box below and click "search". Note that the search
+ function will automatically search for all of the words. Pages
+ containing fewer words won't appear in the result list.
+ </p>
+ <form action="" method="get">
+ <input type="text" name="q" value="" />
+ <input type="submit" value="search" />
+ <span id="search-progress" style="padding-left: 10px"></span>
+ </form>
+
+ <div id="search-results">
+
+ </div>
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/searchindex.js b/doc/html/searchindex.js
new file mode 100644
index 0000000..d4a910d
--- /dev/null
+++ b/doc/html/searchindex.js
@@ -0,0 +1 @@
+Search.setIndex({objects:{},terms:{all:[7,16,0,19,21,10,22,20,23,18,24,1,11,26,6,2,4,13,14,25,12],code:[7,16,0,21,10,22,9,24,20,2,3,25,5,19],partial:11,global:11,mnt:[4,13,26],month:[4,24],stumbl:12,per:[26,11],follow:[0,1,2,3,4,5,6,7,9,10,11,12,13,14,15,16,18,19,21,22,23,24,25,26],disk:[13,20],intrins:13,locat:[7,16,0,10,22,9,1,11,2,25,5,19,6],whose:11,decid:[13,11],depend:[7,8,21,22,24,2,4,13,19,26],system:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,25,26],ulimit:[26,19],readabl:20,specif:[0,21,12,25,14,6],send:15,rel:[11,3],init:[26,12],program:[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,19,21,22,23,24,25,26],present:11,under:11,aris:11,sha256:20,neglig:11,worth:12,introduc:[16,6],sourc:[16,20,21,3,4,6],everi:[16,0,26,24,1,11,4,13,6],risk:11,far:13,faq:15,account:[19,11,26],util:[13,18],volum:26,failur:11,veri:[20,11,12],appar:11,tri:[14,11,12],administr:[0,6],level:[25,11,6],did:20,list:[20,21,24,25,11,6,4,14,15],upload:[16,20,10,26,23,11,12,25,13,19,6],"try":[4,24,26,11,21],stderr:26,small:[13,12],refer:[11,17],servic:20,dir:[16,26],pleas:[20,11,15],upper:11,smaller:[13,20],slower:[16,26,6],ifac:26,ten:11,compens:11,sync:3,histor:11,consequ:11,hors:[0,6],design:[0,11,20,6],pass:[26,3],download:[14,21,2],further:[8,14,15],port:[7,19,11,22,2],rath:26,even:[7,0,19,10,22,23,18,1,11,2,14,26,6],what:[4,14,11,25,24],bzip2:[20,19,26],sub:12,descriptor:[26,19],preserv:[4,20],section:11,abl:[0,10,23,11,6,15],invok:[25,14],asia:11,anywai:[7,14,22,18,1,2,19,26],access:[20,19,26,11,14,12],delet:[8,0,9,24,11,6,2,4,14,12],version:[7,16,0,19,21,10,22,9,23,18,24,1,11,20,2,3,25,5,14,26,12],suspect:18,consecut:[7,19,11,22,2],"new":[7,16,0,19,22,26,24,1,11,6,4,14,25,12],net:26,significantli:[4,11,21],ever:11,"public":11,contrast:[0,6],metadata:[8,19,26,2,25,13,14,6],elimin:11,full:[7,16,0,10,22,9,25,20,2,4,5,19,26,6],pacif:11,gener:[0,21,24,12,4,14,6],never:[7,14,22,18,1,2,19,26],privat:11,here:[25,17],satisfi:21,slow:[11,12],modif:12,address:12,path:[7,19,21,22,18,1,11,2,14,26],becom:[4,24,13,11],modifi:[16,20,6],sinc:[4,24,26,11,12],valu:[1,22,19,26,12],wait:[10,26,23,11,13,12],dry:24,convert:26,joes_bucket:26,checksum:20,current:[4,14,11,24,12],host:[7,19,11,22,2],loos:11,amount:[25,13,11,20],throughput:4,shrink:20,action:[25,14,6,2],chang:[16,8,0,26,20,11,6,2,25,14,12],magnitud:[16,6],chanc:[11,12],control:6,configur:[8,26,6],retriev:[13,19,26,3],appli:11,modul:[7,14,21,22,18,1,2,25,19,26],apw:21,filenam:13,unix:[13,20,12],leav:12,visibl:11,instal:[7,8,0,19,17,21,16,10,22,9,25,2,4,5,14],total:[26,11,6,12],establish:11,kei:[20,11],from:[7,16,0,19,21,22,20,18,24,1,11,2,4,13,14,26,6],describ:[],would:[16,26,24,11,4,6],apswvers:21,upgrad:[8,14,21,2],few:[4,11,21],concret:11,call:[16,0,26,24,11,4,5,13,19,6],usr:[7,16,0,10,22,9,25,2,4,5,19],recommend:[4,26,11,12],taken:[16,20,26,11,19,6],suppos:11,type:11,until:[10,26,6,12,23],more:[20,21,11,12,25,6],sort:[4,24],desir:25,st_nlink:12,comparison:[],peopl:11,hundr:11,relat:11,benchmark:[4,8,26],deflat:20,notic:[13,11],enhanc:4,warn:[25,14,9,11,6],sqlitelibvers:21,depth:[],sens:13,known:[8,20,12],actual:[14,13,11,21,12],destin:[16,6,3],unpack:21,cach:[7,8,20,19,16,22,18,1,12,2,25,14,26,6],must:[7,16,19,22,11,2,14,6],worst:[0,6],none:[19,11,26],word:11,restor:[16,8,13,14,6],setup:21,work:[20,21,23,24,11,4,13,12],conceptu:20,remain:[26,11],wors:11,obvious:26,can:[0,1,2,4,5,6,7,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,26],root:[16,10,26,23,5,19,6],fetch:[16,6],overrid:11,prompt:[7,26,18],tar:13,give:[4,24,14],process:[10,26,23,11,3,4,13,19,12],lock:[0,6],sudo:21,share:[7,16,0,10,22,9,25,2,4,5,19],accept:[7,16,0,19,10,22,9,23,18,24,1,2,3,25,5,14,26],high:[20,19,26,3],critic:20,minimum:[13,20],want:[21,10,26,23,24,1,11,4,13,15],onlin:[0,20,6],acl:20,occur:[7,16,0,10,22,9,24,11,2,3,25,5,19],alwai:[16,26,24,11,12,4,6],algorithm:[4,8,20,19,26],end:[26,14],manipul:20,thing:[14,2],rather:[24,11,12,4,13,6],anoth:[11,12],ordinari:[16,10,23,6],write:[7,8,20,21,16,22,11,12,2,13,19,26,6],how:[4,24,13,11],env:26,instead:[16,10,23,11,12,6],simpl:[4,24,6],confid:[],product:[],resourc:[8,14,15],haven:11,max:[26,19],earlier:[11,21,12],pyliblzma:21,badli:[14,2],wrong:[14,12,2],compat:12,endang:20,mai:[16,0,19,21,26,15,20,23,25,11,6,2,4,13,14,12],multipl:[7,14,22,18,1,2,19,26],redund:[4,24,20,11],autom:[4,20],data:[7,16,20,19,10,22,26,23,18,1,11,6,2,4,13,14,25,12],grow:[20,26],physic:[4,16,0,14,6],man:17,handl:[16,20,26,24,11,12,4,13,6],"short":[0,11,6],attempt:[7,26,11,12],practic:[13,11],third:13,read:[7,16,20,22,11,12,2,13,19,26,6],minim:20,favorit:11,apsw:21,element:26,issu:[16,8,20,21,12,11,6,5,26,15],inform:[26,24,11,25,19,6],maintain:26,combin:[26,11],allow:[16,0,19,22,9,24,1,11,26,3,4,5,13,14,25,6],enter:[1,26],exclus:13,pycryptopp:21,order:[16,6],origin:[],help:[8,21,15,25,12,2,4,14,6],over:[16,20,13,6,12],move:[14,20,11],soon:11,topsecret:26,report:[20,11,21,15],affect:[16,26,11,6,12],effici:12,still:[16,20,10,26,23,11,6],dynam:20,fix:[0,24,11,12,4,6],inadvert:[16,6],better:[26,11,21,3],window:11,requir:[7,20,21,26,18,24,25,4,19,12],persist:11,mail:[20,14,21,15],main:[9,6],might:[16,12,18,6,5,26,15],split:20,them:[16,10,23,11,14,6],good:[0,24,12,4,13,6],synopsi:[7,16,0,10,22,9,24,2,3,25,5,19],thei:[16,0,17,20,24,11,4,13,6],python:[26,21],promin:12,safe:[16,13,11,6],dai:[4,24],initi:11,dat:[4,24],therebi:11,interrupt:14,now:[0,21,24,11,4,6],bigger:26,choic:21,term:[4,24],document:[7,16,0,10,22,9,2,25,5,19,6],somewher:[7,16,0,10,22,9,2,25,5,19,6],name:[7,16,22,24,1,11,2,4,13,19,26,6],hardlink:[16,20,6],joesdomain:11,authent:[7,19,22,18,1,11,2,14,26],achiev:[16,20,26,6],mode:[26,19],each:[4,24,26,19],debug:[7,16,0,19,10,22,9,23,18,24,1,2,3,25,5,14,26],found:[13,11],beneath:6,updat:[4,20,11,12],side:11,mean:[20,11,12],compil:26,s3ql:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26],mkfifo:26,chunk:12,hard:[13,20],idea:[4,24,0,6,12],procedur:[16,6],realli:[7,16,0,19,10,22,9,23,18,24,1,11,2,3,25,5,13,14,26,12],ensur:[4,24,13,20],meta:[25,13,6],expect:[26,11],year:[4,24,20,11],s3_copi:[4,8],happen:[0,11,6],special:[16,6],out:[20,11,12],lzma:[20,19,21,26],ftp:11,shown:11,network:[16,13,26,20,6],space:[16,20,6,12],miss:[24,11],newli:12,content:[16,20,26,0,11,2,6],suitabl:[20,21],rational:0,internet:[20,26],print:[7,16,0,19,21,10,22,9,23,18,24,1,2,3,25,5,14,26],eth0:26,bla:26,correct:[4,24],statist:[5,6,8],foreground:[26,19],shut:[4,10,23,12],after:[16,0,20,11,13,14,6],insid:16,advanc:[8,6],migrat:4,documents_janurai:[16,6],million:[11,12],situat:[13,11,12],differ:[16,20,21,26,25,11,2,4,13,14,6],free:[20,11],standard:[20,22,24,1,11,4,26],inod:12,reason:[9,26,23,18,24,11,12,4,13,6],base:26,theori:[11,12],ask:[1,11,21,14,12],org:26,"byte":[],likelihood:11,afterward:13,bash:13,care:[16,20,11,6,12],wai:[20,26,0,11,12,13,6],thread:[26,19],befor:[16,20,10,26,23,11,12,6],guarante:[4,8,11,24],could:[16,6],refus:[4,24,11],keep:[0,26,24,11,4,6],recov:[0,6],turn:[4,24,26,12],fuse:[26,21,12],place:[7,26,11,2,19,22],perman:[8,0,26,11,4,13,6],principl:26,confus:12,neglect:11,first:[20,21,26,11,13,14,12],oper:[7,16,0,19,10,22,9,24,11,20,2,3,25,5,14,26],softwar:26,rang:[4,24],malici:[0,6],carri:20,onc:[0,10,26,20,23,12,13,19,6],independ:[19,11,26,20],number:[20,21,26,24,11,12,3,4,13,19,6],capac:20,restrict:[7,22,11,2,25,19,26],date:[4,24],instruct:[26,21],alreadi:[7,16,19,21,22,18,1,11,12,2,14,26,6],done:[26,11],least:[21,26,24,11,4,19],llfuse:21,indistinguish:20,open:[10,26,23,12],size:[20,22,1,12,25,13,19,26,6],given:12,"long":[4,24,13,11,12],convent:[7,16,0,10,22,9,2,25,5,19,12],script:[4,13,26,12],unknown:[20,11],interact:2,s3qllock:[8,0,17,9,4,13,6],sometim:14,wrapper:[4,3],parallel:[8,26,3,4,13,19],checkpoint:11,attack:[0,6],necessarili:11,demonstr:4,s3qlrm:[8,0,17,9,24,4,6],termin:[10,11,23,12],john:26,"final":6,store:[7,8,20,22,24,1,11,2,4,13,19,26,12],includ:[21,20,11,17,12],luckili:12,option:[0,1,2,3,4,5,6,7,9,10,12,13,14,16,18,19,21,22,23,24,25,26],especi:[13,20],shelf:[0,6],flushcach:[25,6],copi:[16,8,20,11,12,3,4,13,14,6],specifi:[7,19,22,26,18,24,1,11,2,4,14,25,12],part:[4,24,20,26,3],mostli:11,consult:[],exactli:[4,16,11,24,6],than:[16,9,26,24,11,12,4,13,6],wide:[26,21],target:[16,6],whenev:[13,21,26],prevent:[0,6],remov:[16,8,0,21,26,9,24,6,4,5,12],older:14,tree:[16,8,0,9,3,4,20,6],second:[26,24,11,4,13,19],structur:[20,14],matter:[25,13,14],optimum:13,num:[26,19],friend:13,video:26,minut:[4,24,11],pre:26,unencrypt:[7,19,22,18,1,11,2,14,26],sai:[16,6],comput:[7,20,22,24,11,2,4,19,26],entail:11,plaintext:[7,14,22,18,1,2,19,26],explicit:13,ram:26,mind:11,argument:[25,14,2],"13th":20,packag:[4,21],expir:26,increment:[],close:[10,23],need:[16,19,21,26,24,11,4,14,6],seem:[13,11],exclud:[0,6],paramiko:21,caus:11,equival:[4,24],irrepar:14,destroi:[0,6],moreov:6,blindli:12,atim:12,accuraci:[4,24],note:[7,8,20,19,21,16,10,22,23,24,11,6,2,4,14,26,12],also:[7,16,0,17,21,10,22,9,23,24,11,20,2,3,25,5,13,19,26,6],builtin:21,without:[7,16,0,20,18,11,12,13,14,6],take:[16,20,11,12,2,25,13,14,6],which:[16,9,21,20,24,11,12,4,6],discrep:[],transmit:[7,14,22,18,1,2,19,26],environ:21,uplink:4,singl:[13,19,26],mktemp:26,begin:11,sure:[19,21,26,11,13,14,12],unless:[1,0,6],distribut:[4,26,21],plenti:[],normal:[26,19],buffer:12,previou:[4,14],compress:[8,20,26,4,19,6],most:[16,24,11,12,4,14,6],beta:20,detect:[20,11],rigor:[4,24],plan:11,homepag:[7,16,0,10,22,9,2,25,5,19],"class":[7,16,0,10,22,9,24,25,2,3,4,5,19],relatim:12,simplic:20,renam:20,inconveni:20,url:[7,19,22,18,1,11,2,14,26],doc:[7,16,0,10,22,9,25,2,4,5,19],clear:[16,6],later:[16,0,21,11,12,6],cover:13,drive:[0,6],destruct:[0,6],doe:[20,11,26,23,12],declar:20,snapshot:[16,8,20,25,6],runtim:[8,6],determin:[26,11],occasion:11,left:12,hmac:20,gracefulli:[4,24],myawssecretaccesskei:11,show:24,carefulli:11,random:11,syntax:[26,18,1,25,14,6],permiss:[4,20,19,26],bucketnam:[7,19,11,22,2],newest:[13,2],find:[0,11,26,6,12],redirect:26,absolut:[7,22,24,11,2,4,19,26],onli:[16,0,19,21,10,26,20,23,24,25,11,6,4,5,13,14,12],explicitli:12,ratio:26,just:[7,16,0,19,10,22,9,23,18,24,1,2,3,25,5,13,14,26,6],transact:20,fstab:[26,12],activ:[7,16,0,19,10,22,9,23,18,24,1,2,3,25,5,14,26,6],enough:[13,19,26,12],start:[16,21,26,3,4,13,6],peculiar:11,authinfo:[7,8,22,1,11,2,19,26],latenc:[13,20,3],tape:[0,6],factor:11,folder:[16,21,6],local:[7,8,0,21,16,10,22,9,11,20,2,25,5,13,19,26,6],defin:[4,24,26,11],contribut:[4,8],variou:[25,14,2],get:[7,8,0,19,21,22,26,18,24,1,11,15,2,4,14,25,6],googlegroup:15,nasti:[0,6],stop:26,secondari:[],regularli:[16,6],ssl:[7,14,22,18,1,2,19,26],s3rr:11,cannot:[4,24],ssh:11,increas:[13,19,26],reconstruct:[4,24,11],restart:[26,12],myawsaccesskeyid:11,reveal:20,enabl:[7,14,22,18,1,2,19,26],dramat:13,"19283712_yourname_s3ql":11,method:[26,21],provid:[20,11,21,26,12],bad:13,common:[16,0,6],contain:[21,26,24,11,2,4,14],userspac:[],nowher:[],where:[25,14,11,12,2],caches:[25,26,19,6],wiki:[21,15],kernel:21,set:[4,13,19,26],noleaf:12,proce:21,startup:26,displai:12,see:[7,16,0,19,10,22,9,24,1,11,26,6,2,3,4,5,13,14,25,12],temporarili:26,s3qlcp:[4,8,16,17,6],corrupt:11,disadvantag:[16,6],"__version__":21,becaus:[7,16,20,19,22,18,1,11,2,4,14,26,6],whatsoev:[0,20,6],best:[13,21,26],concern:11,infinit:20,awar:11,statu:[7,8,0,16,10,22,9,24,11,20,2,3,25,5,19,12],said:11,extend:20,correctli:[12,18],hopefulli:12,databas:20,boundari:[4,24],label:[1,22],favor:20,state:[4,24,20,12],between:[4,24,19,11,26],"import":[4,11,21],neither:[0,6],param:14,across:20,attribut:20,amazon:[7,8,20,22,11,2,4,19,12],august:11,manpag:[8,17],weak:11,southeast:[1,22],job:[4,26],entir:[20,0,9,6],joe:[4,11],expire_backup:[4,8,17,24],solv:13,come:[4,13,11,20],local0:26,addit:[16,9,26,20,11,2,25,14,6],both:[16,20,11,21,6],protect:[0,20,6],accident:[0,6,12],irregular:[4,24],extens:[20,11],someth:[16,6],howev:[16,0,21,26,20,24,11,12,4,13,6],alon:11,lazi:[10,23],against:[0,20,6],etc:[16,26,6,12],inconsist:12,exec:26,freeli:26,login:11,com:[7,16,0,21,10,22,9,24,11,2,3,25,5,19,15],pcp:[4,8,13,17,3],load:26,simpli:[0,6],figur:11,inspir:[16,6],period:[0,11,6],insist:13,written:[19,11,26,20],littl:[26,21],shutdown:26,linux:[26,21],averag:11,typic:[16,6],guid:[8,17],assum:26,duplic:[4,16,20,11,6],quit:21,worthless:[0,6],strong:[7,16,0,10,22,9,24,11,2,3,25,5,19],west:[1,22,11],devic:26,three:[7,21,22,11,2,19,26],been:[16,0,10,26,20,23,18,24,11,12,4,13,14,6],mark:[7,18],compon:20,secret:11,much:3,interpret:26,interest:6,subscrib:15,monthli:[16,6],immedi:[9,10,26,23,11,6],strategi:[4,24],infect:[0,6],upstart:[4,26,19],great:[0,6],ani:[7,16,0,21,22,20,24,1,11,15,4,6],zero:13,understand:[26,19],els:14,s3qlstat:[5,17,6,12,8],those:11,"case":[16,0,21,20,24,11,4,13,14,6],replic:[4,16,20,6],trojan:[0,6],ident:[16,20,11,6],look:13,gnu:12,solid:20,plain:[1,22],mount:[16,8,0,19,17,10,26,23,1,11,6,2,4,5,13,14,25,12],zlib:[26,19],batch:[7,26,18],trick:[8,13],documents_februari:[16,6],weren:15,abov:[4,24,11],error:[7,8,0,16,10,22,9,18,24,11,20,2,3,25,5,19,12],invoc:[4,24],ahax:3,region:[4,11],jibbadup:11,bin:[13,21],argpars:21,have:[7,16,0,19,21,10,22,15,20,23,24,1,11,6,2,3,4,13,14,26,12],advantag:[4,24,0,6],stdout:26,almost:13,therefor:[4,26,11,12],remount:11,worri:[11,12],quiet:[7,16,0,19,10,22,9,23,18,24,1,2,3,25,5,14,26],exit:[7,16,0,19,10,22,9,23,18,24,1,2,3,25,5,14,26],conf:[4,8],incom:11,revis:[14,2],dedic:[26,12],sever:[20,21,26,11,3,4,13,19],tamper:[4,24],unmount:[8,10,26,23,18,11,4,13,12],develop:[8,20],author:[26,12],perform:[8,20,21,26,25,11,2,3,4,13,14,12],make:[16,0,19,21,26,20,24,11,12,4,13,14,6],same:[7,16,19,22,24,11,2,4,13,14,26,6],"while":[0,21,26,20,23,11,25,6],dest:16,instanc:13,unexpectedli:12,nikolau:26,pai:11,eventu:11,infer:[4,24],complet:[16,21,26,24,25,11,4,19,6],week:[4,24],archiv:[7,20,22,11,2,19],lie:16,optim:[4,26],keyr:11,confidenti:11,upon:12,effect:[16,20,11,6],solut:4,remot:[7,20,19,22,18,1,11,2,14,26],rais:[26,19],temporari:11,user:[7,8,0,17,21,16,10,26,20,23,18,11,5,19,6],mani:[4,24,20,11],extern:[0,6],encrypt:[7,8,20,19,22,18,1,11,2,14,26],tradition:[0,6],recent:[4,24,26,14,12],appropri:26,eleg:20,nevertheless:[4,11],entri:[19,11,26,12],irrelev:12,well:[0,19,26,9,24,11,4,20,14,6],object:[19,11,26],exampl:[20,24,11,12,4,13,6],command:[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,17,18,19,21,22,23,24,25,26],thi:[0,1,2,3,4,5,6,7,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26],filesystem:[1,22,19,26,18],gzip:20,fail:[26,11,21,12],spend:13,usual:[16,26,11,6],compromis:13,identifi:11,execut:[4,14,21,6],less:[11,12],conform:[7,19,11,22,2],when:[7,0,19,22,20,18,24,1,11,6,2,3,4,13,14,26,12],obtain:11,rest:13,detail:[4,13,11],bandwidth:[4,20],touch:[16,6],passphras:[7,8,14,22,2,19],roughli:26,speed:[4,26],yet:[16,11,6,12],damag:[4,14,11,24],viru:[0,6],detach:[10,23],homedir:[7,14,22,18,1,2,19,26],easi:13,hint:[25,14,2],point:[10,19,11,26,20],had:[4,16,24,6],s3qladm:[8,14,17,2],theoret:[13,11],add:[0,11,6],other:[16,20,21,26,11,6,5,19,15],nor:[0,6],versa:[16,6],runlevel:26,logger:26,subsequ:[4,24,11],match:26,futur:[16,5,6,12],earli:12,applic:12,transpar:20,webpag:11,big:[13,11,12],ctime:12,know:[16,24,11,12,4,6],background:[10,23],amp:13,bit:[20,11],password:[1,26,11,8],recurs:[4,8,9,6],you:[0,1,2,4,6,7,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,26],like:[16,0,26,20,11,12,3,19,6],loss:11,daemon:[26,19],lost:11,success:11,arbitrari:20,should:[7,16,0,19,21,10,22,9,23,18,11,6,2,25,5,13,14,26,12],anyth:[],manual:[10,21,23,15],resolv:[11,15],noth:[0,6],necessari:14,either:[11,12,2,25,14,6],furthermor:20,output:[7,16,0,19,10,22,9,23,18,24,1,2,3,25,5,14,26],page:17,two:[16,20,14,21,6],yyyi:[4,24],imagin:[4,24,0,6],right:13,old:[0,21,24,11,4,6],often:[13,11],deal:11,interv:[4,24,13,19,26],creation:[1,11,8],some:[7,16,0,21,10,22,9,23,18,24,25,11,20,2,3,4,5,19,12],umount:[8,17,10,26,23,12],self:21,strongest:11,"export":12,flush:[16,26,6],home:[4,13,26],server:[7,20,19,22,18,1,2,14,26,12],librari:[21,12],"24h":[26,19],basic:11,rsync:[16,0,12,3,4,13,6],confirm:[9,6],"function":14,avoid:[4,24],though:[26,11],definit:[4,24],februari:[16,6],protocol:11,backward:[],usernam:11,equal:26,larg:[16,13,6,12],slash:[7,19,11,22,2],cif:26,backend:[7,8,21,16,10,22,23,11,6,2,25,13,19,26,12],blocksiz:[1,22,19,26],machin:11,core:[4,26],plu:11,who:[10,26,19,23],run:[0,19,21,26,18,24,25,11,6,2,4,14,12],power:4,reach:26,view:17,usag:[4,16,26,24,6],symlink:20,speak:26,step:4,unreason:12,although:[20,9,6,12],januari:[16,6],immut:[8,0,9,4,20,13,6],impos:11,stage:[],continu:23,src:[16,6],about:[8,20,26,11,12,5,13,6],rare:11,memori:26,http:[7,16,0,21,10,22,9,24,11,2,3,25,5,19],storebackup:[16,6],unfortun:[13,11,12],her:[0,6],messag:[7,14,22,18,1,2,19,26],commit:[11,23,12],backup:[16,8,0,26,20,24,11,2,4,13,14,6],disabl:[26,19],block:[20,10,22,23,1,11,12,25,26,6],repair:7,client:12,real:11,encount:[15,12],within:6,encod:26,automat:[4,8,26,11,14],due:[11,6,12],down:[4,10,23,12],ahv:13,contrib:[4,13,26],insuffici:12,storag:[7,8,0,19,16,10,22,20,23,18,1,11,6,2,4,13,14,26,12],your:[7,16,0,17,21,10,22,9,24,25,11,2,4,5,13,19,26,6],durabl:11,manag:[8,14],mkf:[7,1,22,17,8],fast:[8,20,11,26,6],fusermount:[10,23,12],prepar:25,suffici:11,transfer:[4,13,11,20],support:[7,16,20,22,11,12,2,19,26,6],question:15,s3_backup:[4,8],overwrit:[1,22,11],avail:[0,26,24,12,4,14,6],intellig:[4,24,20],reli:[11,12],trigger:[11,6],low:[20,11],lot:[16,13,6],"var":[7,19,11,22,2],succeed:[7,16,0,10,22,9,24,2,3,25,5,19],individu:[26,19,3],lowest:11,properli:4,tracker:[20,21,15],form:[7,22,24,11,2,4,19,26],offer:[16,11,6],forc:[7,1,22,18],regard:[16,6],back:[0,6],sigstop:[26,19],satur:26,measur:4,newer:[11,21],don:[26,19,12],line:[25,20,11],bug:[20,21,15,12],longer:[16,0,20,23,24,11,12,4,14,6],info:[7,14,22,18,1,2,25,19,26],made:[16,20,0,11,13,6],input:[7,18],consist:[8,11],possibl:[20,21,26,0,11,12,14,6],"default":[7,19,22,18,24,1,11,2,3,4,14,26],bucket:[7,8,19,22,1,11,2,4,14,26,12],maximum:[1,22,19,26],tell:[4,24,26,11],asynchron:20,below:6,limit:[16,26,11,12,5,19,6],unnot:11,problem:[7,16,0,15,20,24,11,12,4,13,6],similar:[26,11],reliabl:[8,11],connect:[7,20,19,22,18,1,11,2,13,14,26],featur:[16,8,0,20,11,6],creat:[16,0,19,22,9,24,1,11,6,4,20,14,26,12],certain:11,dure:[14,19,11,26],day_hour:[4,24],stabl:20,s3qlctrl:[25,8,13,17,6],strongli:[4,24],workaround:13,exist:[16,0,22,9,24,1,11,4,6],file:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,16,18,19,20,22,23,24,25,26],mtime:12,request:11,ship:[24,26,21,3],improv:[8,13],check:[7,8,21,26,18,11,14,12],probabl:[20,24,4,13,14,6],otherwis:[26,19],again:[0,11,6,12,23],readi:[],rsync_arg:13,umask:26,googl:[7,16,0,21,10,22,9,24,2,3,25,5,19],tip:[8,13],exception:11,field:11,valid:6,creep:20,rememb:11,test:[20,11,21],presum:11,thousand:[26,11,12],rid:[0,6],particular:11,variabl:21,intend:[],clean:[7,18],fulli:12,mountpoint:[26,23,25,5,19,6],"return":[7,16,0,10,22,9,23,24,11,2,3,25,5,19,12],fsck:[7,8,17,26,18,11,14],briefli:[],releas:20,track:[4,24],log:[7,19,22,18,1,11,2,25,14,26,6],consid:[11,15],sql:23,log_fifo:26,dd_hh:[4,24],pool:11,stai:[19,11,26],reduc:[4,11],infrequ:11,faster:[9,6],pycrypto:21,vice:[16,6],directori:[7,16,0,19,21,10,22,9,18,24,1,11,6,2,3,4,20,13,14,26,12],cycl:[4,24],descript:[7,16,0,10,22,9,24,2,3,25,5,13,19,26],save:[24,13,11,20],rule:13,sftp:[7,8,20,21,22,11,2,19,12],itself:[11,21,12],ignor:11,time:[7,0,19,22,20,18,24,1,11,6,2,4,13,14,26,12],profil:[26,19],daili:[]},objtypes:{},titles:["The <strong class=\"program\">s3qllock</strong> command","File System Creation","The <strong class=\"program\">s3qladm</strong> command","The <strong class=\"program\">pcp</strong> command","Contributed Programs","The <strong class=\"program\">s3qlstat</strong> command","Advanced S3QL Features","The <strong class=\"program\">fsck.s3ql</strong> command","S3QL User&#8217;s Guide","The <strong class=\"program\">s3qlrm</strong> command","The <strong class=\"program\">umount.s3ql</strong> command","Storage Backends","Known Issues","Tips &amp; Tricks","Managing Buckets","Further Resources / Getting Help","The <strong class=\"program\">s3qlcp</strong> command","Manpages","Checking for Errors","The <strong class=\"program\">mount.s3ql</strong> command","About S3QL","Installation","The <strong class=\"program\">mkfs.s3ql</strong> command","Unmounting","The <strong class=\"program\">expire_backups</strong> command","The <strong class=\"program\">s3qlctrl</strong> command","Mounting"],objnames:{},filenames:["man/lock","mkfs","man/adm","man/pcp","contrib","man/stat","special","man/fsck","index","man/rm","man/umount","backends","issues","tips","adm","resources","man/cp","man/index","fsck","man/mount","about","installation","man/mkfs","umount","man/expire_backups","man/ctrl","mount"]}) \ No newline at end of file
diff --git a/doc/html/special.html b/doc/html/special.html
new file mode 100644
index 0000000..768f100
--- /dev/null
+++ b/doc/html/special.html
@@ -0,0 +1,280 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Advanced S3QL Features &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Unmounting" href="umount.html" />
+ <link rel="prev" title="Mounting" href="mount.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="umount.html" title="Unmounting"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="mount.html" title="Mounting"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Advanced S3QL Features</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#snapshotting-and-copy-on-write">Snapshotting and Copy-on-Write</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#getting-statistics">Getting Statistics</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#immutable-trees">Immutable Trees</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#fast-recursive-removal">Fast Recursive Removal</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#runtime-configuration">Runtime Configuration</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="advanced-s3ql-features">
+<h1>Advanced S3QL Features<a class="headerlink" href="#advanced-s3ql-features" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="snapshotting-and-copy-on-write">
+<span id="s3qlcp"></span><h2>Snapshotting and Copy-on-Write<a class="headerlink" href="#snapshotting-and-copy-on-write" title="Permalink to this headline">¶</a></h2>
+<p>The command <tt class=" docutils literal"><span class="pre">s3qlcp</span></tt> can be used to duplicate a directory tree without
+physically copying the file contents. This is possible due to the data
+de-duplication feature of S3QL.</p>
+<p>The syntax of <tt class=" docutils literal"><span class="pre">s3qlcp</span></tt> is:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlcp </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;src&gt;</span><span class="l"> </span><span class="nv">&lt;target&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>This will replicate the contents of the directory <tt class=" docutils literal"><span class="pre">&lt;src&gt;</span></tt> in the
+directory <tt class=" docutils literal"><span class="pre">&lt;target&gt;</span></tt>. <tt class=" docutils literal"><span class="pre">&lt;src&gt;</span></tt> has to be an existing directory and
+<tt class=" docutils literal"><span class="pre">&lt;target&gt;</span></tt> must not exist. Moreover, both directories have to be
+within the same S3QL file system.</p>
+<p>The replication will not take any additional space. Only if one of
+directories is modified later on, the modified data will take
+additional storage space.</p>
+<p><tt class=" docutils literal"><span class="pre">s3qlcp</span></tt> can only be called by the user that mounted the file system
+and (if the file system was mounted with <tt class=" docutils literal"><span class="pre">--allow-other</span></tt> or <tt class=" docutils literal"><span class="pre">--allow-root</span></tt>)
+the root user. This limitation might be removed in the future (see <a class="reference external" href="http://code.google.com/p/s3ql/issues/detail?id=155">issue 155</a>).</p>
+<p>Note that:</p>
+<ul class="simple">
+<li>After the replication, both source and target directory will still
+be completely ordinary directories. You can regard <tt class=" docutils literal"><span class="pre">&lt;src&gt;</span></tt> as a
+snapshot of <tt class=" docutils literal"><span class="pre">&lt;target&gt;</span></tt> or vice versa. However, the most common
+usage of <tt class=" docutils literal"><span class="pre">s3qlcp</span></tt> is to regularly duplicate the same source
+directory, say <tt class=" docutils literal"><span class="pre">documents</span></tt>, to different target directories. For a
+e.g. monthly replication, the target directories would typically be
+named something like <tt class=" docutils literal"><span class="pre">documents_Januray</span></tt> for the replication in
+January, <tt class=" docutils literal"><span class="pre">documents_February</span></tt> for the replication in February etc.
+In this case it is clear that the target directories should be
+regarded as snapshots of the source directory.</li>
+<li>Exactly the same effect could be achieved by an ordinary copy
+program like <tt class=" docutils literal"><span class="pre">cp</span> <span class="pre">-a</span></tt>. However, this procedure would be orders of
+magnitude slower, because <tt class=" docutils literal"><span class="pre">cp</span></tt> would have to read every file
+completely (so that S3QL had to fetch all the data over the network
+from the backend) before writing them into the destination folder.</li>
+<li>Before starting with the replication, S3QL has to flush the local
+cache. So if you just copied lots of new data into the file system
+that has not yet been uploaded, replication will take longer than
+usual.</li>
+</ul>
+<div class="section" id="snapshotting-vs-hardlinking">
+<h3>Snapshotting vs Hardlinking<a class="headerlink" href="#snapshotting-vs-hardlinking" title="Permalink to this headline">¶</a></h3>
+<p>Snapshot support in S3QL is inspired by the hardlinking feature that
+is offered by programs like <a class="reference external" href="http://www.samba.org/rsync">rsync</a> or
+<a class="reference external" href="http://savannah.nongnu.org/projects/storebackup">storeBackup</a>.
+These programs can create a hardlink instead of copying a file if an
+identical file already exists in the backup. However, using hardlinks
+has two large disadvantages:</p>
+<ul class="simple">
+<li>backups and restores always have to be made with a special program
+that takes care of the hardlinking. The backup must not be touched
+by any other programs (they may make changes that inadvertently
+affect other hardlinked files)</li>
+<li>special care needs to be taken to handle files which are already
+hardlinked (the restore program needs to know that the hardlink was
+not just introduced by the backup program to safe space)</li>
+</ul>
+<p>S3QL snapshots do not have these problems, and they can be used with
+any backup program.</p>
+</div>
+</div>
+<div class="section" id="getting-statistics">
+<span id="s3qlstat"></span><h2>Getting Statistics<a class="headerlink" href="#getting-statistics" title="Permalink to this headline">¶</a></h2>
+<p>You can get more information about a mounted S3QL file system with the
+<tt class=" docutils literal"><span class="pre">s3qlstat</span></tt> command. It has the following syntax:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlstat </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;mountpoint&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>Probably the most interesting numbers are the total size of your data,
+the total size after duplication, and the final size after
+de-duplication and compression.</p>
+<p><tt class=" docutils literal"><span class="pre">s3qlstat</span></tt> can only be called by the user that mounted the file system
+and (if the file system was mounted with <tt class=" docutils literal"><span class="pre">--allow-other</span></tt> or <tt class=" docutils literal"><span class="pre">--allow-root</span></tt>)
+the root user. This limitation might be removed in the future (see <a class="reference external" href="http://code.google.com/p/s3ql/issues/detail?id=155">issue 155</a>).</p>
+<p>For a full list of available options, run <tt class=" docutils literal"><span class="pre">s3qlstat</span> <span class="pre">--help</span></tt>.</p>
+</div>
+<div class="section" id="immutable-trees">
+<span id="s3qllock"></span><h2>Immutable Trees<a class="headerlink" href="#immutable-trees" title="Permalink to this headline">¶</a></h2>
+<p>The command <strong class="program">s3qllock</strong> can be used to make a directory tree
+immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the <strong class="program">s3qlrm</strong> command (see
+below).</p>
+<p>For example, to make the directory tree beneath the directory
+<tt class="docutils literal"><span class="pre">2010-04-21</span></tt> immutable, execute</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qllock 2010-04-21</span>
+</pre></div>
+</div>
+<p>Immutability is a feature designed for backups. Traditionally, backups
+have been made on external tape drives. Once a backup was made, the
+tape drive was removed and locked somewhere in a shelf. This has the
+great advantage that the contents of the backup are now permanently
+fixed. Nothing (short of physical destruction) can change or delete
+files in the backup.</p>
+<p>In contrast, when backing up into an online storage system like S3QL,
+all backups are available every time the file system is mounted.
+Nothing prevents a file in an old backup from being changed again
+later on. In the worst case, this may make your entire backup system
+worthless. Imagine that your system gets infected by a nasty virus
+that simply deletes all files it can find &#8211; if the virus is active
+while the backup file system is mounted, the virus will destroy all
+your old backups as well!</p>
+<p>Even if the possibility of a malicious virus or trojan horse is
+excluded, being able to change a backup after it has been made is
+generally not a good idea. A common S3QL use case is to keep the file
+system mounted at all times and periodically create backups with
+<strong class="program">rsync -a</strong>. This allows every user to recover her files from a
+backup without having to call the system administrator. However, this
+also allows every user to accidentally change or delete files <em>in</em> one
+of the old backups.</p>
+<p>Making a backup immutable protects you against all these problems.
+Unless you happen to run into a virus that was specifically programmed
+to attack S3QL file systems, backups can be neither deleted nor
+changed after they have been made immutable.</p>
+</div>
+<div class="section" id="fast-recursive-removal">
+<span id="s3qlrm"></span><h2>Fast Recursive Removal<a class="headerlink" href="#fast-recursive-removal" title="Permalink to this headline">¶</a></h2>
+<p>The <tt class="docutils literal"><span class="pre">s3qlrm</span></tt> command can be used to recursively delete files and
+directories on an S3QL file system. Although <tt class="docutils literal"><span class="pre">s3qlrm</span></tt> is faster than
+using e.g. <tt class="docutils literal"><span class="pre">rm</span> <span class="pre">-r</span></tt>, the main reason for its existence is that it
+allows you to delete immutable trees as well. The syntax is rather
+simple:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlrm </span><span class="nv">&lt;directory&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.</p>
+</div>
+<div class="section" id="runtime-configuration">
+<span id="s3qlctrl"></span><h2>Runtime Configuration<a class="headerlink" href="#runtime-configuration" title="Permalink to this headline">¶</a></h2>
+<p>The <tt class=" docutils literal"><span class="pre">s3qlctrl</span></tt> can be used to control a mounted S3QL file system. Its
+syntax is</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3qlctrl </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;action&gt;</span><span class="l"> </span><span class="nv">&lt;mountpoint&gt;</span><span class="l"> ...</span>
+</pre></div>
+</div>
+<p><tt class=" docutils literal"><span class="pre">&lt;mountpoint&gt;</span></tt> must be the location of a mounted S3QL file system.
+For a list of valid options, run <tt class=" docutils literal"><span class="pre">s3qlctrl</span> <span class="pre">--help</span></tt>. <tt class=" docutils literal"><span class="pre">&lt;action&gt;</span></tt>
+may be either of:</p>
+<blockquote>
+<div><table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">flushcache:</th><td class="field-body">Flush file system cache. The command blocks until the cache has
+been flushed.</td>
+</tr>
+<tr class="field-even field"><th class="field-name">log:</th><td class="field-body">Change log level.</td>
+</tr>
+<tr class="field-odd field"><th class="field-name">cachesize:</th><td class="field-body">Change file system cache size.</td>
+</tr>
+<tr class="field-even field"><th class="field-name">upload-meta:</th><td class="field-body">Trigger a metadata upload.</td>
+</tr>
+</tbody>
+</table>
+</div></blockquote>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="umount.html" title="Unmounting"
+ >next</a></li>
+ <li class="right" >
+ <a href="mount.html" title="Mounting"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/tips.html b/doc/html/tips.html
new file mode 100644
index 0000000..6560a99
--- /dev/null
+++ b/doc/html/tips.html
@@ -0,0 +1,181 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Tips &amp; Tricks &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Known Issues" href="issues.html" />
+ <link rel="prev" title="Contributed Programs" href="contrib.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="issues.html" title="Known Issues"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="contrib.html" title="Contributed Programs"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1"><a class="reference internal" href="umount.html">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Tips &amp; Tricks</a><ul>
+<li class="toctree-l2"><a class="reference internal" href="#permanently-mounted-backup-file-system">Permanently mounted backup file system</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#improving-copy-performance">Improving copy performance</a></li>
+</ul>
+</li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="tips-tricks">
+<h1>Tips &amp; Tricks<a class="headerlink" href="#tips-tricks" title="Permalink to this headline">¶</a></h1>
+<div class="section" id="permanently-mounted-backup-file-system">
+<span id="copy-performance"></span><h2>Permanently mounted backup file system<a class="headerlink" href="#permanently-mounted-backup-file-system" title="Permalink to this headline">¶</a></h2>
+<p>If you use S3QL as a backup file system, it can be useful to mount the
+file system permanently (rather than just mounting it for a backup and
+unmounting it afterwards). Especially if your file system becomes
+large, this saves you long mount- and unmount times if you only want
+to restore a single file.</p>
+<p>If you decide to do so, you should make sure to</p>
+<ul class="simple">
+<li>Use <a class="reference internal" href="special.html#s3qllock"><em>s3qllock</em></a> to ensure that backups are immutable
+after they have been made.</li>
+<li>Call <a class="reference internal" href="special.html#s3qlctrl"><em>s3qlctrl upload-meta</em></a> right after a every
+backup to make sure that the newest metadata is stored safely (if
+you do backups often enough, this may also allow you to set the
+<tt class="cmdopt docutils literal"><span class="pre">--metadata-upload-interval</span></tt> option of <strong class="program">mount.s3ql</strong>
+to zero).</li>
+</ul>
+</div>
+<div class="section" id="improving-copy-performance">
+<h2>Improving copy performance<a class="headerlink" href="#improving-copy-performance" title="Permalink to this headline">¶</a></h2>
+<p>If you want to copy a lot of smaller files <em>from</em> an S3QL file system
+(e.g. for a system restore) you will probably notice that the
+performance is rather bad.</p>
+<p>The reason for this is intrinsic to the way S3QL works. Whenever you
+read a file, S3QL first has to retrieve this file over the network
+from the storage backend. This takes a minimum amount of time (the
+network latency), no matter how big or small the file is. So when you
+copy lots of small files, 99% of the time is actually spend waiting
+for network data.</p>
+<p>Theoretically, this problem is easy to solve: you just have to copy
+several files at the same time. In practice, however, almost all unix
+utilities (<tt class="docutils literal"><span class="pre">cp</span></tt>, <tt class="docutils literal"><span class="pre">rsync</span></tt>, <tt class="docutils literal"><span class="pre">tar</span></tt> and friends) insist on copying
+data one file at a time. This makes a lot of sense when copying data
+on the local hard disk, but in case of S3QL this is really
+unfortunate.</p>
+<p>The best workaround that has been found so far is to copy files by
+starting several rsync processes at once and use exclusion rules to
+make sure that they work on different sets of files.</p>
+<p>For example, the following script will start 3 rsync instances. The
+first instance handles all filenames starting with a-f, the second the
+filenames from g-l and the third covers the rest. The <tt class="docutils literal"><span class="pre">+</span> <span class="pre">*/</span></tt> rule
+ensures that every instance looks into all directories.</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="c">#!/bin/bash</span>
+
+<span class="l">RSYNC_ARGS=&quot;-aHv /mnt/s3ql/ /home/restore/&quot;</span>
+
+<span class="l">rsync -f &quot;+ */&quot; -f &quot;-! </span><span class="ge">[a-f]</span><span class="l">*&quot; $RSYNC_ARGS &amp;</span>
+<span class="l">rsync -f &quot;+ */&quot; -f &quot;-! </span><span class="ge">[g-l]</span><span class="l">*&quot; $RSYNC_ARGS &amp;</span>
+<span class="l">rsync -f &quot;+ */&quot; -f &quot;- </span><span class="ge">[a-l]</span><span class="l">*&quot; $RSYNC_ARGS &amp;</span>
+
+<span class="l">wait</span>
+</pre></div>
+</div>
+<p>The optimum number of parallel processes depends on your network
+connection and the size of the files that you want to transfer.
+However, starting about 10 processes seems to be a good compromise
+that increases performance dramatically in almost all situations.</p>
+<p>S3QL comes with a script named <tt class="docutils literal"><span class="pre">pcp.py</span></tt> in the <tt class="docutils literal"><span class="pre">contrib</span></tt> directory
+that can be used to transfer files in parallel without having to write
+an explicit script first. See the description of <a class="reference internal" href="contrib.html#pcp"><em>pcp.py</em></a> for
+details.</p>
+</div>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="issues.html" title="Known Issues"
+ >next</a></li>
+ <li class="right" >
+ <a href="contrib.html" title="Contributed Programs"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/html/umount.html b/doc/html/umount.html
new file mode 100644
index 0000000..23e4c45
--- /dev/null
+++ b/doc/html/umount.html
@@ -0,0 +1,156 @@
+
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+ <title>Unmounting &mdash; S3QL 1.0.1 documentation</title>
+
+ <link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
+ <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: '',
+ VERSION: '1.0.1',
+ COLLAPSE_INDEX: false,
+ FILE_SUFFIX: '.html',
+ HAS_SOURCE: true
+ };
+ </script>
+ <script type="text/javascript" src="_static/jquery.js"></script>
+ <script type="text/javascript" src="_static/underscore.js"></script>
+ <script type="text/javascript" src="_static/doctools.js"></script>
+ <link rel="author" title="About these documents" href="about.html" />
+ <link rel="top" title="S3QL 1.0.1 documentation" href="index.html" />
+ <link rel="next" title="Checking for Errors" href="fsck.html" />
+ <link rel="prev" title="Advanced S3QL Features" href="special.html" />
+ </head>
+ <body>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="fsck.html" title="Checking for Errors"
+ accesskey="N">next</a></li>
+ <li class="right" >
+ <a href="special.html" title="Advanced S3QL Features"
+ accesskey="P">previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ <h3><a href="index.html">Table Of Contents</a></h3>
+ <ul class="current">
+<li class="toctree-l1"><a class="reference internal" href="about.html">About S3QL</a></li>
+<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="backends.html">Storage Backends</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mkfs.html">File System Creation</a></li>
+<li class="toctree-l1"><a class="reference internal" href="adm.html">Managing Buckets</a></li>
+<li class="toctree-l1"><a class="reference internal" href="mount.html">Mounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="special.html">Advanced S3QL Features</a></li>
+<li class="toctree-l1 current"><a class="current reference internal" href="">Unmounting</a></li>
+<li class="toctree-l1"><a class="reference internal" href="fsck.html">Checking for Errors</a></li>
+<li class="toctree-l1"><a class="reference internal" href="contrib.html">Contributed Programs</a></li>
+<li class="toctree-l1"><a class="reference internal" href="tips.html">Tips &amp; Tricks</a></li>
+<li class="toctree-l1"><a class="reference internal" href="issues.html">Known Issues</a></li>
+<li class="toctree-l1"><a class="reference internal" href="man/index.html">Manpages</a></li>
+<li class="toctree-l1"><a class="reference internal" href="resources.html">Further Resources / Getting Help</a></li>
+</ul>
+
+
+ <div id="searchbox" style="display: none">
+ <h3>Quick search</h3>
+ <form class="search" action="search.html" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="Go" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ Enter search terms.
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ </div>
+ </div>
+
+ <div class="document">
+ <div class="documentwrapper">
+ <div class="bodywrapper">
+ <div class="body">
+
+ <div class="section" id="unmounting">
+<h1>Unmounting<a class="headerlink" href="#unmounting" title="Permalink to this headline">¶</a></h1>
+<p>To unmount an S3QL file system, use the command:</p>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">umount.s3ql </span><span class="ge">[options]</span><span class="l"> </span><span class="nv">&lt;mountpoint&gt;</span><span class="l"></span>
+</pre></div>
+</div>
+<p>This will block until all data has been committed to the storage
+backend.</p>
+<p>Only the user who mounted the file system with <strong class="command">mount.s3ql</strong>
+is able to unmount it again. If you are root and want to unmount an
+S3QL file system mounted by an ordinary user, you have to use the
+<strong class="command">fusermount -u</strong> or <strong class="command">umount</strong> command instead. Note
+that these commands do not block until all data has been uploaded, so
+if you use them instead of <tt class=" docutils literal"><span class="pre">umount.s3ql</span></tt> then you should manually wait
+for the <tt class=" docutils literal"><span class="pre">mount.s3ql</span></tt> process to terminate before shutting down the
+system.</p>
+<p>The <strong class="command">umount.s3ql</strong> command accepts the following options:</p>
+<blockquote>
+<div><table class="docutils option-list" frame="void" rules="none">
+<col class="option" />
+<col class="description" />
+<tbody valign="top">
+<tr><td class="option-group">
+<kbd><span class="option">--debug</span></kbd></td>
+<td>activate debugging output</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--quiet</span></kbd></td>
+<td>be really quiet</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--version</span></kbd></td>
+<td>just print program version and exit</td></tr>
+<tr><td class="option-group">
+<kbd><span class="option">--lazy</span>, <span class="option">-z</span></kbd></td>
+<td>Lazy umount. Detaches the file system immediately, even if
+there are still open files. The data will be uploaded in the
+background once all open files have been closed.</td></tr>
+</tbody>
+</table>
+</div></blockquote>
+<p>If, for some reason, the <tt class=" docutils literal"><span class="pre">umount.sql</span></tt> command does not work, the file
+system can also be unmounted with <tt class=" docutils literal"><span class="pre">fusermount</span> <span class="pre">-u</span> <span class="pre">-z</span></tt>. Note that this
+command will return immediately and the file system may continue to
+upload data in the background for a while longer.</p>
+</div>
+
+
+ </div>
+ </div>
+ </div>
+ <div class="clearer"></div>
+ </div>
+ <div class="related">
+ <h3>Navigation</h3>
+ <ul>
+ <li class="right" style="margin-right: 10px">
+ <a href="fsck.html" title="Checking for Errors"
+ >next</a></li>
+ <li class="right" >
+ <a href="special.html" title="Advanced S3QL Features"
+ >previous</a> |</li>
+ <li><a href="index.html">S3QL 1.0.1 documentation</a> &raquo;</li>
+ </ul>
+ </div>
+ <div class="footer">
+ &copy; Copyright 2008-2011, Nikolaus Rath.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1pre.
+ </div>
+ </body>
+</html> \ No newline at end of file
diff --git a/doc/latex/Makefile b/doc/latex/Makefile
new file mode 100644
index 0000000..f219a2f
--- /dev/null
+++ b/doc/latex/Makefile
@@ -0,0 +1,64 @@
+# Makefile for Sphinx LaTeX output
+
+ALLDOCS = $(basename $(wildcard *.tex))
+ALLPDF = $(addsuffix .pdf,$(ALLDOCS))
+ALLDVI = $(addsuffix .dvi,$(ALLDOCS))
+
+# Prefix for archive names
+ARCHIVEPRREFIX =
+# Additional LaTeX options
+LATEXOPTS =
+
+all: $(ALLPDF)
+all-pdf: $(ALLPDF)
+all-dvi: $(ALLDVI)
+all-ps: all-dvi
+ for f in *.dvi; do dvips $$f; done
+all-pdf-ja: $(wildcard *.tex)
+ ebb $(wildcard *.pdf *.png *.gif *.jpeg)
+ platex -kanji=utf8 $(LATEXOPTS) '$<'
+ platex -kanji=utf8 $(LATEXOPTS) '$<'
+ platex -kanji=utf8 $(LATEXOPTS) '$<'
+ -mendex -U -f -d '$(basename $<).dic' -s python.ist '$(basename $<).idx'
+ platex -kanji=utf8 $(LATEXOPTS) '$<'
+ platex -kanji=utf8 $(LATEXOPTS) '$<'
+ dvipdfmx '$(basename $<).dvi'
+
+zip: all-$(FMT)
+ mkdir $(ARCHIVEPREFIX)docs-$(FMT)
+ cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT)
+ zip -q -r -9 $(ARCHIVEPREFIX)docs-$(FMT).zip $(ARCHIVEPREFIX)docs-$(FMT)
+ rm -r $(ARCHIVEPREFIX)docs-$(FMT)
+
+tar: all-$(FMT)
+ mkdir $(ARCHIVEPREFIX)docs-$(FMT)
+ cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT)
+ tar cf $(ARCHIVEPREFIX)docs-$(FMT).tar $(ARCHIVEPREFIX)docs-$(FMT)
+ rm -r $(ARCHIVEPREFIX)docs-$(FMT)
+
+bz2: tar
+ bzip2 -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar
+
+# The number of LaTeX runs is quite conservative, but I don't expect it
+# to get run often, so the little extra time won't hurt.
+%.dvi: %.tex
+ latex $(LATEXOPTS) '$<'
+ latex $(LATEXOPTS) '$<'
+ latex $(LATEXOPTS) '$<'
+ -makeindex -s python.ist '$(basename $<).idx'
+ latex $(LATEXOPTS) '$<'
+ latex $(LATEXOPTS) '$<'
+
+%.pdf: %.tex
+ pdflatex $(LATEXOPTS) '$<'
+ pdflatex $(LATEXOPTS) '$<'
+ pdflatex $(LATEXOPTS) '$<'
+ -makeindex -s python.ist '$(basename $<).idx'
+ pdflatex $(LATEXOPTS) '$<'
+ pdflatex $(LATEXOPTS) '$<'
+
+clean:
+ rm -f *.dvi *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla
+
+.PHONY: all all-pdf all-dvi all-ps clean
+
diff --git a/doc/latex/fncychap.sty b/doc/latex/fncychap.sty
new file mode 100644
index 0000000..9a56c04
--- /dev/null
+++ b/doc/latex/fncychap.sty
@@ -0,0 +1,683 @@
+%%% Copyright Ulf A. Lindgren
+%%%
+%%% Note Premission is granted to modify this file under
+%%% the condition that it is saved using another
+%%% file and package name.
+%%%
+%%% Revision 1.1 (1997)
+%%%
+%%% Jan. 8th Modified package name base date option
+%%% Jan. 22th Modified FmN and FmTi for error in book.cls
+%%% \MakeUppercase{#}->{\MakeUppercase#}
+%%% Apr. 6th Modified Lenny option to prevent undesired
+%%% skip of line.
+%%% Nov. 8th Fixed \@chapapp for AMS
+%%%
+%%% Revision 1.2 (1998)
+%%%
+%%% Feb. 11th Fixed appendix problem related to Bjarne
+%%% Aug. 11th Fixed problem related to 11pt and 12pt
+%%% suggested by Tomas Lundberg. THANKS!
+%%%
+%%% Revision 1.3 (2004)
+%%% Sep. 20th problem with frontmatter, mainmatter and
+%%% backmatter, pointed out by Lapo Mori
+%%%
+%%% Revision 1.31 (2004)
+%%% Sep. 21th problem with the Rejne definition streched text
+%%% caused ugly gaps in the vrule aligned with the title
+%%% text. Kindly pointed out to me by Hendri Adriaens
+%%%
+%%% Revision 1.32 (2005)
+%%% Jun. 23th compatibility problem with the KOMA class 'scrbook.cls'
+%%% a remedy is a redefinition of '\@schapter' in
+%%% line with that used in KOMA. The problem was pointed
+%%% out to me by Mikkel Holm Olsen
+%%%
+%%% Revision 1.33 (2005)
+%%% Aug. 9th misspelled ``TWELV'' corrected, the error was pointed
+%%% out to me by George Pearson
+%%%
+%%% Revision 1.34 (2007)
+%%% Added an alternative to Lenny provided by Peter
+%%% Osborne (2005-11-28)
+%%% Corrected front, main and back matter, based on input
+%%% from Bas van Gils (2006-04-24)
+%%% Jul. 30th Added Bjornstrup option provided by Jean-Marc
+%%% Francois (2007-01-05).
+%%% Reverted to \MakeUppercase{#} see rev 1.1, solved
+%%% problem with MakeUppercase and MakeLowercase pointed
+%%% out by Marco Feuerstein (2007-06-06)
+
+
+%%% Last modified Jul. 2007
+
+\NeedsTeXFormat{LaTeX2e}[1995/12/01]
+\ProvidesPackage{fncychap}
+ [2007/07/30 v1.34
+ LaTeX package (Revised chapters)]
+
+%%%% For conditional inclusion of color
+\newif\ifusecolor
+\usecolorfalse
+
+
+
+%%%% DEFINITION OF Chapapp variables
+\newcommand{\CNV}{\huge\bfseries}
+\newcommand{\ChNameVar}[1]{\renewcommand{\CNV}{#1}}
+
+
+%%%% DEFINITION OF TheChapter variables
+\newcommand{\CNoV}{\huge\bfseries}
+\newcommand{\ChNumVar}[1]{\renewcommand{\CNoV}{#1}}
+
+\newif\ifUCN
+\UCNfalse
+\newif\ifLCN
+\LCNfalse
+\def\ChNameLowerCase{\LCNtrue\UCNfalse}
+\def\ChNameUpperCase{\UCNtrue\LCNfalse}
+\def\ChNameAsIs{\UCNfalse\LCNfalse}
+
+%%%%% Fix for AMSBook 971008
+
+\@ifundefined{@chapapp}{\let\@chapapp\chaptername}{}
+
+
+%%%%% Fix for Bjarne and appendix 980211
+
+\newif\ifinapp
+\inappfalse
+\renewcommand\appendix{\par
+ \setcounter{chapter}{0}%
+ \setcounter{section}{0}%
+ \inapptrue%
+ \renewcommand\@chapapp{\appendixname}%
+ \renewcommand\thechapter{\@Alph\c@chapter}}
+
+%%%%% Fix for frontmatter, mainmatter, and backmatter 040920
+
+\@ifundefined{@mainmatter}{\newif\if@mainmatter \@mainmattertrue}{}
+
+%%%%%
+
+
+
+\newcommand{\FmN}[1]{%
+\ifUCN
+ {\MakeUppercase{#1}}\LCNfalse
+\else
+ \ifLCN
+ {\MakeLowercase{#1}}\UCNfalse
+ \else #1
+ \fi
+\fi}
+
+
+%%%% DEFINITION OF Title variables
+\newcommand{\CTV}{\Huge\bfseries}
+\newcommand{\ChTitleVar}[1]{\renewcommand{\CTV}{#1}}
+
+%%%% DEFINITION OF the basic rule width
+\newlength{\RW}
+\setlength{\RW}{1pt}
+\newcommand{\ChRuleWidth}[1]{\setlength{\RW}{#1}}
+
+\newif\ifUCT
+\UCTfalse
+\newif\ifLCT
+\LCTfalse
+\def\ChTitleLowerCase{\LCTtrue\UCTfalse}
+\def\ChTitleUpperCase{\UCTtrue\LCTfalse}
+\def\ChTitleAsIs{\UCTfalse\LCTfalse}
+\newcommand{\FmTi}[1]{%
+\ifUCT
+ {\MakeUppercase{#1}}\LCTfalse
+\else
+ \ifLCT
+ {\MakeLowercase{#1}}\UCTfalse
+ \else {#1}
+ \fi
+\fi}
+
+
+
+\newlength{\mylen}
+\newlength{\myhi}
+\newlength{\px}
+\newlength{\py}
+\newlength{\pyy}
+\newlength{\pxx}
+
+
+\def\mghrulefill#1{\leavevmode\leaders\hrule\@height #1\hfill\kern\z@}
+
+\newcommand{\DOCH}{%
+ \CNV\FmN{\@chapapp}\space \CNoV\thechapter
+ \par\nobreak
+ \vskip 20\p@
+ }
+\newcommand{\DOTI}[1]{%
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@
+ }
+\newcommand{\DOTIS}[1]{%
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@
+ }
+
+%%%%%% SONNY DEF
+
+\DeclareOption{Sonny}{%
+ \ChNameVar{\Large\sf}
+ \ChNumVar{\Huge}
+ \ChTitleVar{\Large\sf}
+ \ChRuleWidth{0.5pt}
+ \ChNameUpperCase
+ \renewcommand{\DOCH}{%
+ \raggedleft
+ \CNV\FmN{\@chapapp}\space \CNoV\thechapter
+ \par\nobreak
+ \vskip 40\p@}
+ \renewcommand{\DOTI}[1]{%
+ \CTV\raggedleft\mghrulefill{\RW}\par\nobreak
+ \vskip 5\p@
+ \CTV\FmTi{#1}\par\nobreak
+ \mghrulefill{\RW}\par\nobreak
+ \vskip 40\p@}
+ \renewcommand{\DOTIS}[1]{%
+ \CTV\raggedleft\mghrulefill{\RW}\par\nobreak
+ \vskip 5\p@
+ \CTV\FmTi{#1}\par\nobreak
+ \mghrulefill{\RW}\par\nobreak
+ \vskip 40\p@}
+}
+
+%%%%%% LENNY DEF
+
+\DeclareOption{Lenny}{%
+
+ \ChNameVar{\fontsize{14}{16}\usefont{OT1}{phv}{m}{n}\selectfont}
+ \ChNumVar{\fontsize{60}{62}\usefont{OT1}{ptm}{m}{n}\selectfont}
+ \ChTitleVar{\Huge\bfseries\rm}
+ \ChRuleWidth{1pt}
+ \renewcommand{\DOCH}{%
+ \settowidth{\px}{\CNV\FmN{\@chapapp}}
+ \addtolength{\px}{2pt}
+ \settoheight{\py}{\CNV\FmN{\@chapapp}}
+ \addtolength{\py}{1pt}
+
+ \settowidth{\mylen}{\CNV\FmN{\@chapapp}\space\CNoV\thechapter}
+ \addtolength{\mylen}{1pt}
+ \settowidth{\pxx}{\CNoV\thechapter}
+ \addtolength{\pxx}{-1pt}
+
+ \settoheight{\pyy}{\CNoV\thechapter}
+ \addtolength{\pyy}{-2pt}
+ \setlength{\myhi}{\pyy}
+ \addtolength{\myhi}{-1\py}
+ \par
+ \parbox[b]{\textwidth}{%
+ \rule[\py]{\RW}{\myhi}%
+ \hskip -\RW%
+ \rule[\pyy]{\px}{\RW}%
+ \hskip -\px%
+ \raggedright%
+ \CNV\FmN{\@chapapp}\space\CNoV\thechapter%
+ \hskip1pt%
+ \mghrulefill{\RW}%
+ \rule{\RW}{\pyy}\par\nobreak%
+ \vskip -\baselineskip%
+ \vskip -\pyy%
+ \hskip \mylen%
+ \mghrulefill{\RW}\par\nobreak%
+ \vskip \pyy}%
+ \vskip 20\p@}
+
+
+ \renewcommand{\DOTI}[1]{%
+ \raggedright
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@}
+
+ \renewcommand{\DOTIS}[1]{%
+ \raggedright
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@}
+ }
+
+%%%%%% Peter Osbornes' version of LENNY DEF
+
+\DeclareOption{PetersLenny}{%
+
+% five new lengths
+\newlength{\bl} % bottom left : orig \space
+\setlength{\bl}{6pt}
+\newcommand{\BL}[1]{\setlength{\bl}{#1}}
+\newlength{\br} % bottom right : orig 1pt
+\setlength{\br}{1pt}
+\newcommand{\BR}[1]{\setlength{\br}{#1}}
+\newlength{\tl} % top left : orig 2pt
+\setlength{\tl}{2pt}
+\newcommand{\TL}[1]{\setlength{\tl}{#1}}
+\newlength{\trr} % top right :orig 1pt
+\setlength{\trr}{1pt}
+\newcommand{\TR}[1]{\setlength{\trr}{#1}}
+\newlength{\blrule} % top right :orig 1pt
+\setlength{\trr}{0pt}
+\newcommand{\BLrule}[1]{\setlength{\blrule}{#1}}
+
+
+ \ChNameVar{\fontsize{14}{16}\usefont{OT1}{phv}{m}{n}\selectfont}
+ \ChNumVar{\fontsize{60}{62}\usefont{OT1}{ptm}{m}{n}\selectfont}
+ \ChTitleVar{\Huge\bfseries\rm}
+ \ChRuleWidth{1pt}
+\renewcommand{\DOCH}{%
+
+
+%%%%%%% tweaks for 1--9 and A--Z
+\ifcase\c@chapter\relax%
+\or\BL{-3pt}\TL{-4pt}\BR{0pt}\TR{-6pt}%1
+\or\BL{0pt}\TL{-4pt}\BR{2pt}\TR{-4pt}%2
+\or\BL{0pt}\TL{-4pt}\BR{2pt}\TR{-4pt}%3
+\or\BL{0pt}\TL{5pt}\BR{2pt}\TR{-4pt}%4
+\or\BL{0pt}\TL{3pt}\BR{2pt}\TR{-4pt}%5
+\or\BL{-1pt}\TL{0pt}\BR{2pt}\TR{-2pt}%6
+\or\BL{0pt}\TL{-3pt}\BR{2pt}\TR{-2pt}%7
+\or\BL{0pt}\TL{-3pt}\BR{2pt}\TR{-2pt}%8
+\or\BL{0pt}\TL{-3pt}\BR{-4pt}\TR{-2pt}%9
+\or\BL{-3pt}\TL{-3pt}\BR{2pt}\TR{-7pt}%10
+\or\BL{-6pt}\TL{-6pt}\BR{0pt}\TR{-9pt}%11
+\or\BL{-6pt}\TL{-6pt}\BR{2pt}\TR{-7pt}%12
+\or\BL{-5pt}\TL{-5pt}\BR{0pt}\TR{-9pt}%13
+\or\BL{-6pt}\TL{-6pt}\BR{0pt}\TR{-9pt}%14
+\or\BL{-3pt}\TL{-3pt}\BR{3pt}\TR{-6pt}%15
+\or\BL{-3pt}\TL{-3pt}\BR{3pt}\TR{-6pt}%16
+\or\BL{-5pt}\TL{-3pt}\BR{-8pt}\TR{-6pt}%17
+\or\BL{-5pt}\TL{-5pt}\BR{0pt}\TR{-9pt}%18
+\or\BL{-3pt}\TL{-3pt}\BR{-6pt}\TR{-9pt}%19
+\or\BL{0pt}\TL{0pt}\BR{0pt}\TR{-5pt}%20
+\fi
+
+\ifinapp\ifcase\c@chapter\relax%
+\or\BL{0pt}\TL{14pt}\BR{5pt}\TR{-19pt}%A
+\or\BL{0pt}\TL{-5pt}\BR{-3pt}\TR{-8pt}%B
+\or\BL{-3pt}\TL{-2pt}\BR{1pt}\TR{-6pt}\BLrule{0pt}%C
+\or\BL{0pt}\TL{-5pt}\BR{-3pt}\TR{-8pt}\BLrule{0pt}%D
+\or\BL{0pt}\TL{-5pt}\BR{2pt}\TR{-3pt}%E
+\or\BL{0pt}\TL{-5pt}\BR{-10pt}\TR{-1pt}%F
+\or\BL{-3pt}\TL{0pt}\BR{0pt}\TR{-7pt}%G
+\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%H
+\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%I
+\or\BL{2pt}\TL{0pt}\BR{-3pt}\TR{1pt}%J
+\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%K
+\or\BL{0pt}\TL{-5pt}\BR{2pt}\TR{-19pt}%L
+\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}%M
+\or\BL{0pt}\TL{-5pt}\BR{-2pt}\TR{-1pt}%N
+\or\BL{-3pt}\TL{-2pt}\BR{-3pt}\TR{-11pt}%O
+\or\BL{0pt}\TL{-5pt}\BR{-9pt}\TR{-3pt}%P
+\or\BL{-3pt}\TL{-2pt}\BR{-3pt}\TR{-11pt}%Q
+\or\BL{0pt}\TL{-5pt}\BR{4pt}\TR{-8pt}%R
+\or\BL{-2pt}\TL{-2pt}\BR{-2pt}\TR{-7pt}%S
+\or\BL{-3pt}\TL{0pt}\BR{-5pt}\TR{4pt}\BLrule{8pt}%T
+\or\BL{-7pt}\TL{-11pt}\BR{-5pt}\TR{-7pt}\BLrule{0pt}%U
+\or\BL{-14pt}\TL{-5pt}\BR{-14pt}\TR{-1pt}\BLrule{14pt}%V
+\or\BL{-10pt}\TL{-9pt}\BR{-13pt}\TR{-3pt}\BLrule{7pt}%W
+\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}\BLrule{0pt}%X
+\or\BL{-6pt}\TL{-4pt}\BR{-7pt}\TR{1pt}\BLrule{7pt}%Y
+\or\BL{0pt}\TL{-5pt}\BR{3pt}\TR{-1pt}\BLrule{0pt}%Z
+\fi\fi
+%%%%%%%
+ \settowidth{\px}{\CNV\FmN{\@chapapp}}
+ \addtolength{\px}{\tl} %MOD change 2pt to \tl
+ \settoheight{\py}{\CNV\FmN{\@chapapp}}
+ \addtolength{\py}{1pt}
+
+ \settowidth{\mylen}{\CNV\FmN{\@chapapp}\space\CNoV\thechapter}
+ \addtolength{\mylen}{\trr}% MOD change 1pt to \tr
+ \settowidth{\pxx}{\CNoV\thechapter}
+ \addtolength{\pxx}{-1pt}
+
+ \settoheight{\pyy}{\CNoV\thechapter}
+ \addtolength{\pyy}{-2pt}
+ \setlength{\myhi}{\pyy}
+ \addtolength{\myhi}{-1\py}
+ \par
+ \parbox[b]{\textwidth}{%
+ \rule[\py]{\RW}{\myhi}%
+ \hskip -\RW%
+ \rule[\pyy]{\px}{\RW}%
+ \hskip -\px%
+ \raggedright%
+ \CNV\FmN{\@chapapp}\rule{\blrule}{\RW}\hskip\bl\CNoV\thechapter%MOD
+% \CNV\FmN{\@chapapp}\space\CNoV\thechapter %ORIGINAL
+ \hskip\br% %MOD 1pt to \br
+ \mghrulefill{\RW}%
+ \rule{\RW}{\pyy}\par\nobreak%
+ \vskip -\baselineskip%
+ \vskip -\pyy%
+ \hskip \mylen%
+ \mghrulefill{\RW}\par\nobreak%
+ \vskip \pyy}%
+ \vskip 20\p@}
+
+
+ \renewcommand{\DOTI}[1]{%
+ \raggedright
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@}
+
+ \renewcommand{\DOTIS}[1]{%
+ \raggedright
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@}
+ }
+
+
+%
+
+
+%%%%%% BJORNSTRUP DEF
+
+\DeclareOption{Bjornstrup}{%
+ \usecolortrue
+ % pzc (Zapf Chancelery) is nice. ppl (Palatino) is cool too.
+ \ChNumVar{\fontsize{76}{80}\usefont{OT1}{pzc}{m}{n}\selectfont}
+ \ChTitleVar{\raggedleft\Large\sffamily\bfseries}
+
+ \setlength{\myhi}{10pt} % Space between grey box border and text
+ \setlength{\mylen}{\textwidth}
+ \addtolength{\mylen}{-2\myhi}
+ \renewcommand{\DOCH}{%
+ \settowidth{\py}{\CNoV\thechapter}
+ \addtolength{\py}{-10pt} % Amount of space by which the
+% % number is shifted right
+ \fboxsep=0pt%
+ \colorbox[gray]{.85}{\rule{0pt}{40pt}\parbox[b]{\textwidth}{\hfill}}%
+ \kern-\py\raise20pt%
+ \hbox{\color[gray]{.5}\CNoV\thechapter}\\%
+ }
+
+ \renewcommand{\DOTI}[1]{%
+ \nointerlineskip\raggedright%
+ \fboxsep=\myhi%
+ \vskip-1ex%
+ \colorbox[gray]{.85}{\parbox[t]{\mylen}{\CTV\FmTi{#1}}}\par\nobreak%
+ \vskip 40\p@%
+ }
+
+ \renewcommand{\DOTIS}[1]{%
+ \fboxsep=0pt
+ \colorbox[gray]{.85}{\rule{0pt}{40pt}\parbox[b]{\textwidth}{\hfill}}\\%
+ \nointerlineskip\raggedright%
+ \fboxsep=\myhi%
+ \colorbox[gray]{.85}{\parbox[t]{\mylen}{\CTV\FmTi{#1}}}\par\nobreak%
+ \vskip 40\p@%
+ }
+}
+
+
+%%%%%%% GLENN DEF
+
+
+\DeclareOption{Glenn}{%
+ \ChNameVar{\bfseries\Large\sf}
+ \ChNumVar{\Huge}
+ \ChTitleVar{\bfseries\Large\rm}
+ \ChRuleWidth{1pt}
+ \ChNameUpperCase
+ \ChTitleUpperCase
+ \renewcommand{\DOCH}{%
+ \settoheight{\myhi}{\CTV\FmTi{Test}}
+ \setlength{\py}{\baselineskip}
+ \addtolength{\py}{\RW}
+ \addtolength{\py}{\myhi}
+ \setlength{\pyy}{\py}
+ \addtolength{\pyy}{-1\RW}
+
+ \raggedright
+ \CNV\FmN{\@chapapp}\space\CNoV\thechapter
+ \hskip 3pt\mghrulefill{\RW}\rule[-1\pyy]{2\RW}{\py}\par\nobreak}
+
+ \renewcommand{\DOTI}[1]{%
+ \addtolength{\pyy}{-4pt}
+ \settoheight{\myhi}{\CTV\FmTi{#1}}
+ \addtolength{\myhi}{\py}
+ \addtolength{\myhi}{-1\RW}
+ \vskip -1\pyy
+ \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 2pt
+ \raggedleft\CTV\FmTi{#1}\par\nobreak
+ \vskip 80\p@}
+
+\newlength{\backskip}
+ \renewcommand{\DOTIS}[1]{%
+% \setlength{\py}{10pt}
+% \setlength{\pyy}{\py}
+% \addtolength{\pyy}{\RW}
+% \setlength{\myhi}{\baselineskip}
+% \addtolength{\myhi}{\pyy}
+% \mghrulefill{\RW}\rule[-1\py]{2\RW}{\pyy}\par\nobreak
+% \addtolength{}{}
+%\vskip -1\baselineskip
+% \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 2pt
+% \raggedleft\CTV\FmTi{#1}\par\nobreak
+% \vskip 60\p@}
+%% Fix suggested by Tomas Lundberg
+ \setlength{\py}{25pt} % eller vad man vill
+ \setlength{\pyy}{\py}
+ \setlength{\backskip}{\py}
+ \addtolength{\backskip}{2pt}
+ \addtolength{\pyy}{\RW}
+ \setlength{\myhi}{\baselineskip}
+ \addtolength{\myhi}{\pyy}
+ \mghrulefill{\RW}\rule[-1\py]{2\RW}{\pyy}\par\nobreak
+ \vskip -1\backskip
+ \rule{2\RW}{\myhi}\mghrulefill{\RW}\hskip 3pt %
+ \raggedleft\CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@}
+ }
+
+%%%%%%% CONNY DEF
+
+\DeclareOption{Conny}{%
+ \ChNameUpperCase
+ \ChTitleUpperCase
+ \ChNameVar{\centering\Huge\rm\bfseries}
+ \ChNumVar{\Huge}
+ \ChTitleVar{\centering\Huge\rm}
+ \ChRuleWidth{2pt}
+
+ \renewcommand{\DOCH}{%
+ \mghrulefill{3\RW}\par\nobreak
+ \vskip -0.5\baselineskip
+ \mghrulefill{\RW}\par\nobreak
+ \CNV\FmN{\@chapapp}\space \CNoV\thechapter
+ \par\nobreak
+ \vskip -0.5\baselineskip
+ }
+ \renewcommand{\DOTI}[1]{%
+ \mghrulefill{\RW}\par\nobreak
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 60\p@
+ }
+ \renewcommand{\DOTIS}[1]{%
+ \mghrulefill{\RW}\par\nobreak
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 60\p@
+ }
+ }
+
+%%%%%%% REJNE DEF
+
+\DeclareOption{Rejne}{%
+
+ \ChNameUpperCase
+ \ChTitleUpperCase
+ \ChNameVar{\centering\Large\rm}
+ \ChNumVar{\Huge}
+ \ChTitleVar{\centering\Huge\rm}
+ \ChRuleWidth{1pt}
+ \renewcommand{\DOCH}{%
+ \settoheight{\py}{\CNoV\thechapter}
+ \parskip=0pt plus 1pt % Set parskip to default, just in case v1.31
+ \addtolength{\py}{-1pt}
+ \CNV\FmN{\@chapapp}\par\nobreak
+ \vskip 20\p@
+ \setlength{\myhi}{2\baselineskip}
+ \setlength{\px}{\myhi}
+ \addtolength{\px}{-1\RW}
+ \rule[-1\px]{\RW}{\myhi}\mghrulefill{\RW}\hskip
+ 10pt\raisebox{-0.5\py}{\CNoV\thechapter}\hskip 10pt\mghrulefill{\RW}\rule[-1\px]{\RW}{\myhi}\par\nobreak
+ \vskip -3\p@% Added -2pt vskip to correct for streched text v1.31
+ }
+ \renewcommand{\DOTI}[1]{%
+ \setlength{\mylen}{\textwidth}
+ \parskip=0pt plus 1pt % Set parskip to default, just in case v1.31
+ \addtolength{\mylen}{-2\RW}
+ {\vrule width\RW}\parbox{\mylen}{\CTV\FmTi{#1}}{\vrule width\RW}\par\nobreak%
+ \vskip -3pt\rule{\RW}{2\baselineskip}\mghrulefill{\RW}\rule{\RW}{2\baselineskip}%
+ \vskip 60\p@% Added -2pt in vskip to correct for streched text v1.31
+ }
+ \renewcommand{\DOTIS}[1]{%
+ \setlength{\py}{\fboxrule}
+ \setlength{\fboxrule}{\RW}
+ \setlength{\mylen}{\textwidth}
+ \addtolength{\mylen}{-2\RW}
+ \fbox{\parbox{\mylen}{\vskip 2\baselineskip\CTV\FmTi{#1}\par\nobreak\vskip \baselineskip}}
+ \setlength{\fboxrule}{\py}
+ \vskip 60\p@
+ }
+ }
+
+
+%%%%%%% BJARNE DEF
+
+\DeclareOption{Bjarne}{%
+ \ChNameUpperCase
+ \ChTitleUpperCase
+ \ChNameVar{\raggedleft\normalsize\rm}
+ \ChNumVar{\raggedleft \bfseries\Large}
+ \ChTitleVar{\raggedleft \Large\rm}
+ \ChRuleWidth{1pt}
+
+
+%% Note thechapter -> c@chapter fix appendix bug
+%% Fixed misspelled 12
+
+ \newcounter{AlphaCnt}
+ \newcounter{AlphaDecCnt}
+ \newcommand{\AlphaNo}{%
+ \ifcase\number\theAlphaCnt
+ \ifnum\c@chapter=0
+ ZERO\else{}\fi
+ \or ONE\or TWO\or THREE\or FOUR\or FIVE
+ \or SIX\or SEVEN\or EIGHT\or NINE\or TEN
+ \or ELEVEN\or TWELVE\or THIRTEEN\or FOURTEEN\or FIFTEEN
+ \or SIXTEEN\or SEVENTEEN\or EIGHTEEN\or NINETEEN\fi
+}
+
+ \newcommand{\AlphaDecNo}{%
+ \setcounter{AlphaDecCnt}{0}
+ \@whilenum\number\theAlphaCnt>0\do
+ {\addtocounter{AlphaCnt}{-10}
+ \addtocounter{AlphaDecCnt}{1}}
+ \ifnum\number\theAlphaCnt=0
+ \else
+ \addtocounter{AlphaDecCnt}{-1}
+ \addtocounter{AlphaCnt}{10}
+ \fi
+
+
+ \ifcase\number\theAlphaDecCnt\or TEN\or TWENTY\or THIRTY\or
+ FORTY\or FIFTY\or SIXTY\or SEVENTY\or EIGHTY\or NINETY\fi
+ }
+ \newcommand{\TheAlphaChapter}{%
+
+ \ifinapp
+ \thechapter
+ \else
+ \setcounter{AlphaCnt}{\c@chapter}
+ \ifnum\c@chapter<20
+ \AlphaNo
+ \else
+ \AlphaDecNo\AlphaNo
+ \fi
+ \fi
+ }
+ \renewcommand{\DOCH}{%
+ \mghrulefill{\RW}\par\nobreak
+ \CNV\FmN{\@chapapp}\par\nobreak
+ \CNoV\TheAlphaChapter\par\nobreak
+ \vskip -1\baselineskip\vskip 5pt\mghrulefill{\RW}\par\nobreak
+ \vskip 20\p@
+ }
+ \renewcommand{\DOTI}[1]{%
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@
+ }
+ \renewcommand{\DOTIS}[1]{%
+ \CTV\FmTi{#1}\par\nobreak
+ \vskip 40\p@
+ }
+}
+
+\DeclareOption*{%
+ \PackageWarning{fancychapter}{unknown style option}
+ }
+
+\ProcessOptions* \relax
+
+\ifusecolor
+ \RequirePackage{color}
+\fi
+\def\@makechapterhead#1{%
+ \vspace*{50\p@}%
+ {\parindent \z@ \raggedright \normalfont
+ \ifnum \c@secnumdepth >\m@ne
+ \if@mainmatter%%%%% Fix for frontmatter, mainmatter, and backmatter 040920
+ \DOCH
+ \fi
+ \fi
+ \interlinepenalty\@M
+ \if@mainmatter%%%%% Fix for frontmatter, mainmatter, and backmatter 060424
+ \DOTI{#1}%
+ \else%
+ \DOTIS{#1}%
+ \fi
+ }}
+
+
+%%% Begin: To avoid problem with scrbook.cls (fncychap version 1.32)
+
+%%OUT:
+%\def\@schapter#1{\if@twocolumn
+% \@topnewpage[\@makeschapterhead{#1}]%
+% \else
+% \@makeschapterhead{#1}%
+% \@afterheading
+% \fi}
+
+%%IN:
+\def\@schapter#1{%
+\if@twocolumn%
+ \@makeschapterhead{#1}%
+\else%
+ \@makeschapterhead{#1}%
+ \@afterheading%
+\fi}
+
+%%% End: To avoid problem with scrbook.cls (fncychap version 1.32)
+
+\def\@makeschapterhead#1{%
+ \vspace*{50\p@}%
+ {\parindent \z@ \raggedright
+ \normalfont
+ \interlinepenalty\@M
+ \DOTIS{#1}
+ \vskip 40\p@
+ }}
+
+\endinput
+
+
diff --git a/doc/latex/manual.aux b/doc/latex/manual.aux
new file mode 100644
index 0000000..4f41234
--- /dev/null
+++ b/doc/latex/manual.aux
@@ -0,0 +1,366 @@
+\relax
+\ifx\hyper@anchor\@undefined
+\global \let \oldcontentsline\contentsline
+\gdef \contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
+\global \let \oldnewlabel\newlabel
+\gdef \newlabel#1#2{\newlabelxx{#1}#2}
+\gdef \newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
+\AtEndDocument{\let \contentsline\oldcontentsline
+\let \newlabel\oldnewlabel}
+\else
+\global \let \hyper@last\relax
+\fi
+
+\select@language{english}
+\@writefile{toc}{\select@language{english}}
+\@writefile{lof}{\select@language{english}}
+\@writefile{lot}{\select@language{english}}
+\newlabel{index::doc}{{}{1}{\relax }{section*.2}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {1}About S3QL}{1}{chapter.1}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{about:about-s3ql}{{1}{1}{About S3QL\relax }{chapter.1}{}}
+\newlabel{about::doc}{{1}{1}{About S3QL\relax }{chapter.1}{}}
+\newlabel{about:s3ql-user-s-guide}{{1}{1}{About S3QL\relax }{chapter.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.1}Features}{1}{section.1.1}}
+\newlabel{about:features}{{1.1}{1}{Features\relax }{section.1.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {1.2}Development Status}{2}{section.1.2}}
+\newlabel{about:development-status}{{1.2}{2}{Development Status\relax }{section.1.2}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {2}Installation}{3}{chapter.2}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{installation:installation}{{2}{3}{Installation\relax }{chapter.2}{}}
+\newlabel{installation::doc}{{2}{3}{Installation\relax }{chapter.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {2.1}Dependencies}{3}{section.2.1}}
+\newlabel{installation:dependencies}{{2.1}{3}{Dependencies\relax }{section.2.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {2.2}Installing S3QL}{4}{section.2.2}}
+\newlabel{installation:inst-s3ql}{{2.2}{4}{Installing S3QL\relax }{section.2.2}{}}
+\newlabel{installation:installing-s3ql}{{2.2}{4}{Installing S3QL\relax }{section.2.2}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {3}Storage Backends}{5}{chapter.3}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{backends::doc}{{3}{5}{Storage Backends\relax }{chapter.3}{}}
+\newlabel{backends:storage-backends}{{3}{5}{Storage Backends\relax }{chapter.3}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.1}On Backend Reliability}{5}{section.3.1}}
+\newlabel{backends:on-backend-reliability}{{3.1}{5}{On Backend Reliability\relax }{section.3.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.2}The \texttt {authinfo} file}{6}{section.3.2}}
+\newlabel{backends:the-authinfo-file}{{3.2}{6}{The \texttt {authinfo} file\relax }{section.3.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.3}Consistency Guarantees}{6}{section.3.3}}
+\newlabel{backends:consistency-guarantees}{{3.3}{6}{Consistency Guarantees\relax }{section.3.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {3.3.1}Dealing with Eventual Consistency}{6}{subsection.3.3.1}}
+\newlabel{backends:dealing-with-eventual-consistency}{{3.3.1}{6}{Dealing with Eventual Consistency\relax }{subsection.3.3.1}{}}
+\newlabel{backends:eventual-consistency}{{3.3.1}{6}{Dealing with Eventual Consistency\relax }{subsection.3.3.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.4}The Amazon S3 Backend}{7}{section.3.4}}
+\newlabel{backends:the-amazon-s3-backend}{{3.4}{7}{The Amazon S3 Backend\relax }{section.3.4}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.5}The Local Backend}{8}{section.3.5}}
+\newlabel{backends:the-local-backend}{{3.5}{8}{The Local Backend\relax }{section.3.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3.6}The SFTP Backend}{8}{section.3.6}}
+\newlabel{backends:the-sftp-backend}{{3.6}{8}{The SFTP Backend\relax }{section.3.6}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {4}File System Creation}{9}{chapter.4}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{mkfs::doc}{{4}{9}{File System Creation\relax }{chapter.4}{}}
+\newlabel{mkfs:file-system-creation}{{4}{9}{File System Creation\relax }{chapter.4}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {5}Managing Buckets}{11}{chapter.5}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{adm::doc}{{5}{11}{Managing Buckets\relax }{chapter.5}{}}
+\newlabel{adm:managing-buckets}{{5}{11}{Managing Buckets\relax }{chapter.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {5.1}Changing the Passphrase}{11}{section.5.1}}
+\newlabel{adm:changing-the-passphrase}{{5.1}{11}{Changing the Passphrase\relax }{section.5.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {5.2}Upgrading the file system}{11}{section.5.2}}
+\newlabel{adm:upgrading-the-file-system}{{5.2}{11}{Upgrading the file system\relax }{section.5.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {5.3}Deleting a file system}{12}{section.5.3}}
+\newlabel{adm:deleting-a-file-system}{{5.3}{12}{Deleting a file system\relax }{section.5.3}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {5.4}Restoring Metadata Backups}{12}{section.5.4}}
+\newlabel{adm:restoring-metadata-backups}{{5.4}{12}{Restoring Metadata Backups\relax }{section.5.4}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {6}Mounting}{13}{chapter.6}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{mount:mounting}{{6}{13}{Mounting\relax }{chapter.6}{}}
+\newlabel{mount::doc}{{6}{13}{Mounting\relax }{chapter.6}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.1}Storing Encryption Passwords}{14}{section.6.1}}
+\newlabel{mount:bucket-pw}{{6.1}{14}{Storing Encryption Passwords\relax }{section.6.1}{}}
+\newlabel{mount:storing-encryption-passwords}{{6.1}{14}{Storing Encryption Passwords\relax }{section.6.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.2}Compression Algorithms}{14}{section.6.2}}
+\newlabel{mount:compression-algorithms}{{6.2}{14}{Compression Algorithms\relax }{section.6.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.3}Parallel Compression}{15}{section.6.3}}
+\newlabel{mount:parallel-compression}{{6.3}{15}{Parallel Compression\relax }{section.6.3}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.4}Notes about Caching}{15}{section.6.4}}
+\newlabel{mount:notes-about-caching}{{6.4}{15}{Notes about Caching\relax }{section.6.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.4.1}Maximum Number of Cache Entries}{15}{subsection.6.4.1}}
+\newlabel{mount:maximum-number-of-cache-entries}{{6.4.1}{15}{Maximum Number of Cache Entries\relax }{subsection.6.4.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.4.2}Cache Flushing and Expiration}{15}{subsection.6.4.2}}
+\newlabel{mount:cache-flushing-and-expiration}{{6.4.2}{15}{Cache Flushing and Expiration\relax }{subsection.6.4.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {6.5}Automatic Mounting}{15}{section.6.5}}
+\newlabel{mount:automatic-mounting}{{6.5}{15}{Automatic Mounting\relax }{section.6.5}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {7}Advanced S3QL Features}{17}{chapter.7}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{special:advanced-s3ql-features}{{7}{17}{Advanced S3QL Features\relax }{chapter.7}{}}
+\newlabel{special::doc}{{7}{17}{Advanced S3QL Features\relax }{chapter.7}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {7.1}Snapshotting and Copy-on-Write}{17}{section.7.1}}
+\newlabel{special:snapshotting-and-copy-on-write}{{7.1}{17}{Snapshotting and Copy-on-Write\relax }{section.7.1}{}}
+\newlabel{special:s3qlcp}{{7.1}{17}{Snapshotting and Copy-on-Write\relax }{section.7.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {7.1.1}Snapshotting vs Hardlinking}{17}{subsection.7.1.1}}
+\newlabel{special:snapshotting-vs-hardlinking}{{7.1.1}{17}{Snapshotting vs Hardlinking\relax }{subsection.7.1.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {7.2}Getting Statistics}{18}{section.7.2}}
+\newlabel{special:s3qlstat}{{7.2}{18}{Getting Statistics\relax }{section.7.2}{}}
+\newlabel{special:getting-statistics}{{7.2}{18}{Getting Statistics\relax }{section.7.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {7.3}Immutable Trees}{18}{section.7.3}}
+\newlabel{special:immutable-trees}{{7.3}{18}{Immutable Trees\relax }{section.7.3}{}}
+\newlabel{special:s3qllock}{{7.3}{18}{Immutable Trees\relax }{section.7.3}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {7.4}Fast Recursive Removal}{19}{section.7.4}}
+\newlabel{special:s3qlrm}{{7.4}{19}{Fast Recursive Removal\relax }{section.7.4}{}}
+\newlabel{special:fast-recursive-removal}{{7.4}{19}{Fast Recursive Removal\relax }{section.7.4}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {7.5}Runtime Configuration}{19}{section.7.5}}
+\newlabel{special:runtime-configuration}{{7.5}{19}{Runtime Configuration\relax }{section.7.5}{}}
+\newlabel{special:s3qlctrl}{{7.5}{19}{Runtime Configuration\relax }{section.7.5}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {8}Unmounting}{21}{chapter.8}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{umount::doc}{{8}{21}{Unmounting\relax }{chapter.8}{}}
+\newlabel{umount:unmounting}{{8}{21}{Unmounting\relax }{chapter.8}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {9}Checking for Errors}{23}{chapter.9}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{fsck:checking-for-errors}{{9}{23}{Checking for Errors\relax }{chapter.9}{}}
+\newlabel{fsck::doc}{{9}{23}{Checking for Errors\relax }{chapter.9}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {10}Contributed Programs}{25}{chapter.10}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{contrib:contributed-programs}{{10}{25}{Contributed Programs\relax }{chapter.10}{}}
+\newlabel{contrib::doc}{{10}{25}{Contributed Programs\relax }{chapter.10}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {10.1}benchmark.py}{25}{section.10.1}}
+\newlabel{contrib:benchmark-py}{{10.1}{25}{benchmark.py\relax }{section.10.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {10.2}s3\_copy.py}{25}{section.10.2}}
+\newlabel{contrib:s3-copy-py}{{10.2}{25}{s3\_copy.py\relax }{section.10.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {10.3}pcp.py}{25}{section.10.3}}
+\newlabel{contrib:pcp-py}{{10.3}{25}{pcp.py\relax }{section.10.3}{}}
+\newlabel{contrib:pcp}{{10.3}{25}{pcp.py\relax }{section.10.3}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {10.4}s3\_backup.sh}{25}{section.10.4}}
+\newlabel{contrib:s3-backup-sh}{{10.4}{25}{s3\_backup.sh\relax }{section.10.4}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {10.5}expire\_backups.py}{26}{section.10.5}}
+\newlabel{contrib:expire-backups-py}{{10.5}{26}{expire\_backups.py\relax }{section.10.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {10.6}s3ql.conf}{27}{section.10.6}}
+\newlabel{contrib:s3ql-conf}{{10.6}{27}{s3ql.conf\relax }{section.10.6}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {11}Tips \& Tricks}{29}{chapter.11}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{tips:tips-tricks}{{11}{29}{Tips \& Tricks\relax }{chapter.11}{}}
+\newlabel{tips::doc}{{11}{29}{Tips \& Tricks\relax }{chapter.11}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {11.1}Permanently mounted backup file system}{29}{section.11.1}}
+\newlabel{tips:copy-performance}{{11.1}{29}{Permanently mounted backup file system\relax }{section.11.1}{}}
+\newlabel{tips:permanently-mounted-backup-file-system}{{11.1}{29}{Permanently mounted backup file system\relax }{section.11.1}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {11.2}Improving copy performance}{29}{section.11.2}}
+\newlabel{tips:improving-copy-performance}{{11.2}{29}{Improving copy performance\relax }{section.11.2}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {12}Known Issues}{31}{chapter.12}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{issues:known-issues}{{12}{31}{Known Issues\relax }{chapter.12}{}}
+\newlabel{issues::doc}{{12}{31}{Known Issues\relax }{chapter.12}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {13}Manpages}{33}{chapter.13}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{man/index:manpages}{{13}{33}{Manpages\relax }{chapter.13}{}}
+\newlabel{man/index::doc}{{13}{33}{Manpages\relax }{chapter.13}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.1}The \textbf {mkfs.s3ql} command}{33}{section.13.1}}
+\newlabel{man/mkfs:the-mkfs-s3ql-command}{{13.1}{33}{The \textbf {mkfs.s3ql} command\relax }{section.13.1}{}}
+\newlabel{man/mkfs::doc}{{13.1}{33}{The \textbf {mkfs.s3ql} command\relax }{section.13.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.1.1}Synopsis}{33}{subsection.13.1.1}}
+\newlabel{man/mkfs:synopsis}{{13.1.1}{33}{Synopsis\relax }{subsection.13.1.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.1.2}Description}{33}{subsection.13.1.2}}
+\newlabel{man/mkfs:description}{{13.1.2}{33}{Description\relax }{subsection.13.1.2}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Amazon S3}{33}{subsubsection*.3}}
+\newlabel{man/mkfs:amazon-s3}{{13.1.2}{33}{Amazon S3\relax }{subsubsection*.3}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Local}{33}{subsubsection*.4}}
+\newlabel{man/mkfs:local}{{13.1.2}{33}{Local\relax }{subsubsection*.4}{}}
+\@writefile{toc}{\contentsline {subsubsection}{SFTP}{33}{subsubsection*.5}}
+\newlabel{man/mkfs:sftp}{{13.1.2}{33}{SFTP\relax }{subsubsection*.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.1.3}Options}{34}{subsection.13.1.3}}
+\newlabel{man/mkfs:options}{{13.1.3}{34}{Options\relax }{subsection.13.1.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.1.4}Files}{34}{subsection.13.1.4}}
+\newlabel{man/mkfs:files}{{13.1.4}{34}{Files\relax }{subsection.13.1.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.1.5}Exit Status}{34}{subsection.13.1.5}}
+\newlabel{man/mkfs:exit-status}{{13.1.5}{34}{Exit Status\relax }{subsection.13.1.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.1.6}See Also}{34}{subsection.13.1.6}}
+\newlabel{man/mkfs:see-also}{{13.1.6}{34}{See Also\relax }{subsection.13.1.6}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.2}The \textbf {s3qladm} command}{34}{section.13.2}}
+\newlabel{man/adm::doc}{{13.2}{34}{The \textbf {s3qladm} command\relax }{section.13.2}{}}
+\newlabel{man/adm:the-s3qladm-command}{{13.2}{34}{The \textbf {s3qladm} command\relax }{section.13.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.2.1}Synopsis}{34}{subsection.13.2.1}}
+\newlabel{man/adm:synopsis}{{13.2.1}{34}{Synopsis\relax }{subsection.13.2.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.2.2}Description}{35}{subsection.13.2.2}}
+\newlabel{man/adm:description}{{13.2.2}{35}{Description\relax }{subsection.13.2.2}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Amazon S3}{35}{subsubsection*.6}}
+\newlabel{man/adm:amazon-s3}{{13.2.2}{35}{Amazon S3\relax }{subsubsection*.6}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Local}{35}{subsubsection*.7}}
+\newlabel{man/adm:local}{{13.2.2}{35}{Local\relax }{subsubsection*.7}{}}
+\@writefile{toc}{\contentsline {subsubsection}{SFTP}{35}{subsubsection*.8}}
+\newlabel{man/adm:sftp}{{13.2.2}{35}{SFTP\relax }{subsubsection*.8}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.2.3}Options}{35}{subsection.13.2.3}}
+\newlabel{man/adm:options}{{13.2.3}{35}{Options\relax }{subsection.13.2.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.2.4}Actions}{35}{subsection.13.2.4}}
+\newlabel{man/adm:actions}{{13.2.4}{35}{Actions\relax }{subsection.13.2.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.2.5}Files}{36}{subsection.13.2.5}}
+\newlabel{man/adm:files}{{13.2.5}{36}{Files\relax }{subsection.13.2.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.2.6}Exit Status}{36}{subsection.13.2.6}}
+\newlabel{man/adm:exit-status}{{13.2.6}{36}{Exit Status\relax }{subsection.13.2.6}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.2.7}See Also}{36}{subsection.13.2.7}}
+\newlabel{man/adm:see-also}{{13.2.7}{36}{See Also\relax }{subsection.13.2.7}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.3}The \textbf {mount.s3ql} command}{36}{section.13.3}}
+\newlabel{man/mount::doc}{{13.3}{36}{The \textbf {mount.s3ql} command\relax }{section.13.3}{}}
+\newlabel{man/mount:the-mount-s3ql-command}{{13.3}{36}{The \textbf {mount.s3ql} command\relax }{section.13.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.3.1}Synopsis}{36}{subsection.13.3.1}}
+\newlabel{man/mount:synopsis}{{13.3.1}{36}{Synopsis\relax }{subsection.13.3.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.3.2}Description}{36}{subsection.13.3.2}}
+\newlabel{man/mount:description}{{13.3.2}{36}{Description\relax }{subsection.13.3.2}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Amazon S3}{36}{subsubsection*.9}}
+\newlabel{man/mount:amazon-s3}{{13.3.2}{36}{Amazon S3\relax }{subsubsection*.9}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Local}{36}{subsubsection*.10}}
+\newlabel{man/mount:local}{{13.3.2}{36}{Local\relax }{subsubsection*.10}{}}
+\@writefile{toc}{\contentsline {subsubsection}{SFTP}{36}{subsubsection*.11}}
+\newlabel{man/mount:sftp}{{13.3.2}{36}{SFTP\relax }{subsubsection*.11}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.3.3}Options}{37}{subsection.13.3.3}}
+\newlabel{man/mount:options}{{13.3.3}{37}{Options\relax }{subsection.13.3.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.3.4}Files}{38}{subsection.13.3.4}}
+\newlabel{man/mount:files}{{13.3.4}{38}{Files\relax }{subsection.13.3.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.3.5}Exit Status}{38}{subsection.13.3.5}}
+\newlabel{man/mount:exit-status}{{13.3.5}{38}{Exit Status\relax }{subsection.13.3.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.3.6}See Also}{38}{subsection.13.3.6}}
+\newlabel{man/mount:see-also}{{13.3.6}{38}{See Also\relax }{subsection.13.3.6}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.4}The \textbf {s3qlstat} command}{38}{section.13.4}}
+\newlabel{man/stat:the-s3qlstat-command}{{13.4}{38}{The \textbf {s3qlstat} command\relax }{section.13.4}{}}
+\newlabel{man/stat::doc}{{13.4}{38}{The \textbf {s3qlstat} command\relax }{section.13.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.4.1}Synopsis}{38}{subsection.13.4.1}}
+\newlabel{man/stat:synopsis}{{13.4.1}{38}{Synopsis\relax }{subsection.13.4.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.4.2}Description}{38}{subsection.13.4.2}}
+\newlabel{man/stat:description}{{13.4.2}{38}{Description\relax }{subsection.13.4.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.4.3}Options}{38}{subsection.13.4.3}}
+\newlabel{man/stat:options}{{13.4.3}{38}{Options\relax }{subsection.13.4.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.4.4}Exit Status}{38}{subsection.13.4.4}}
+\newlabel{man/stat:exit-status}{{13.4.4}{38}{Exit Status\relax }{subsection.13.4.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.4.5}See Also}{38}{subsection.13.4.5}}
+\newlabel{man/stat:see-also}{{13.4.5}{38}{See Also\relax }{subsection.13.4.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.5}The \textbf {s3qlctrl} command}{39}{section.13.5}}
+\newlabel{man/ctrl:the-s3qlctrl-command}{{13.5}{39}{The \textbf {s3qlctrl} command\relax }{section.13.5}{}}
+\newlabel{man/ctrl::doc}{{13.5}{39}{The \textbf {s3qlctrl} command\relax }{section.13.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.5.1}Synopsis}{39}{subsection.13.5.1}}
+\newlabel{man/ctrl:synopsis}{{13.5.1}{39}{Synopsis\relax }{subsection.13.5.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.5.2}Description}{39}{subsection.13.5.2}}
+\newlabel{man/ctrl:description}{{13.5.2}{39}{Description\relax }{subsection.13.5.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.5.3}Options}{39}{subsection.13.5.3}}
+\newlabel{man/ctrl:options}{{13.5.3}{39}{Options\relax }{subsection.13.5.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.5.4}Exit Status}{39}{subsection.13.5.4}}
+\newlabel{man/ctrl:exit-status}{{13.5.4}{39}{Exit Status\relax }{subsection.13.5.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.5.5}See Also}{39}{subsection.13.5.5}}
+\newlabel{man/ctrl:see-also}{{13.5.5}{39}{See Also\relax }{subsection.13.5.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.6}The \textbf {s3qlcp} command}{40}{section.13.6}}
+\newlabel{man/cp:the-s3qlcp-command}{{13.6}{40}{The \textbf {s3qlcp} command\relax }{section.13.6}{}}
+\newlabel{man/cp::doc}{{13.6}{40}{The \textbf {s3qlcp} command\relax }{section.13.6}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.6.1}Synopsis}{40}{subsection.13.6.1}}
+\newlabel{man/cp:synopsis}{{13.6.1}{40}{Synopsis\relax }{subsection.13.6.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.6.2}Description}{40}{subsection.13.6.2}}
+\newlabel{man/cp:description}{{13.6.2}{40}{Description\relax }{subsection.13.6.2}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Snapshotting vs Hardlinking}{40}{subsubsection*.12}}
+\newlabel{man/cp:snapshotting-vs-hardlinking}{{13.6.2}{40}{Snapshotting vs Hardlinking\relax }{subsubsection*.12}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.6.3}Options}{40}{subsection.13.6.3}}
+\newlabel{man/cp:options}{{13.6.3}{40}{Options\relax }{subsection.13.6.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.6.4}Exit Status}{41}{subsection.13.6.4}}
+\newlabel{man/cp:exit-status}{{13.6.4}{41}{Exit Status\relax }{subsection.13.6.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.6.5}See Also}{41}{subsection.13.6.5}}
+\newlabel{man/cp:see-also}{{13.6.5}{41}{See Also\relax }{subsection.13.6.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.7}The \textbf {s3qlrm} command}{41}{section.13.7}}
+\newlabel{man/rm::doc}{{13.7}{41}{The \textbf {s3qlrm} command\relax }{section.13.7}{}}
+\newlabel{man/rm:the-s3qlrm-command}{{13.7}{41}{The \textbf {s3qlrm} command\relax }{section.13.7}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.7.1}Synopsis}{41}{subsection.13.7.1}}
+\newlabel{man/rm:synopsis}{{13.7.1}{41}{Synopsis\relax }{subsection.13.7.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.7.2}Description}{41}{subsection.13.7.2}}
+\newlabel{man/rm:description}{{13.7.2}{41}{Description\relax }{subsection.13.7.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.7.3}Options}{41}{subsection.13.7.3}}
+\newlabel{man/rm:options}{{13.7.3}{41}{Options\relax }{subsection.13.7.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.7.4}Exit Status}{41}{subsection.13.7.4}}
+\newlabel{man/rm:exit-status}{{13.7.4}{41}{Exit Status\relax }{subsection.13.7.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.7.5}See Also}{41}{subsection.13.7.5}}
+\newlabel{man/rm:see-also}{{13.7.5}{41}{See Also\relax }{subsection.13.7.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.8}The \textbf {s3qllock} command}{42}{section.13.8}}
+\newlabel{man/lock:the-s3qllock-command}{{13.8}{42}{The \textbf {s3qllock} command\relax }{section.13.8}{}}
+\newlabel{man/lock::doc}{{13.8}{42}{The \textbf {s3qllock} command\relax }{section.13.8}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.8.1}Synopsis}{42}{subsection.13.8.1}}
+\newlabel{man/lock:synopsis}{{13.8.1}{42}{Synopsis\relax }{subsection.13.8.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.8.2}Description}{42}{subsection.13.8.2}}
+\newlabel{man/lock:description}{{13.8.2}{42}{Description\relax }{subsection.13.8.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.8.3}Rationale}{42}{subsection.13.8.3}}
+\newlabel{man/lock:rationale}{{13.8.3}{42}{Rationale\relax }{subsection.13.8.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.8.4}Options}{42}{subsection.13.8.4}}
+\newlabel{man/lock:options}{{13.8.4}{42}{Options\relax }{subsection.13.8.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.8.5}Exit Status}{42}{subsection.13.8.5}}
+\newlabel{man/lock:exit-status}{{13.8.5}{42}{Exit Status\relax }{subsection.13.8.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.8.6}See Also}{43}{subsection.13.8.6}}
+\newlabel{man/lock:see-also}{{13.8.6}{43}{See Also\relax }{subsection.13.8.6}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.9}The \textbf {umount.s3ql} command}{43}{section.13.9}}
+\newlabel{man/umount::doc}{{13.9}{43}{The \textbf {umount.s3ql} command\relax }{section.13.9}{}}
+\newlabel{man/umount:the-umount-s3ql-command}{{13.9}{43}{The \textbf {umount.s3ql} command\relax }{section.13.9}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.9.1}Synopsis}{43}{subsection.13.9.1}}
+\newlabel{man/umount:synopsis}{{13.9.1}{43}{Synopsis\relax }{subsection.13.9.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.9.2}Description}{43}{subsection.13.9.2}}
+\newlabel{man/umount:description}{{13.9.2}{43}{Description\relax }{subsection.13.9.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.9.3}Options}{43}{subsection.13.9.3}}
+\newlabel{man/umount:options}{{13.9.3}{43}{Options\relax }{subsection.13.9.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.9.4}Exit Status}{43}{subsection.13.9.4}}
+\newlabel{man/umount:exit-status}{{13.9.4}{43}{Exit Status\relax }{subsection.13.9.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.9.5}See Also}{43}{subsection.13.9.5}}
+\newlabel{man/umount:see-also}{{13.9.5}{43}{See Also\relax }{subsection.13.9.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.10}The \textbf {fsck.s3ql} command}{44}{section.13.10}}
+\newlabel{man/fsck::doc}{{13.10}{44}{The \textbf {fsck.s3ql} command\relax }{section.13.10}{}}
+\newlabel{man/fsck:the-fsck-s3ql-command}{{13.10}{44}{The \textbf {fsck.s3ql} command\relax }{section.13.10}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.10.1}Synopsis}{44}{subsection.13.10.1}}
+\newlabel{man/fsck:synopsis}{{13.10.1}{44}{Synopsis\relax }{subsection.13.10.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.10.2}Description}{44}{subsection.13.10.2}}
+\newlabel{man/fsck:description}{{13.10.2}{44}{Description\relax }{subsection.13.10.2}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Amazon S3}{44}{subsubsection*.13}}
+\newlabel{man/fsck:amazon-s3}{{13.10.2}{44}{Amazon S3\relax }{subsubsection*.13}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Local}{44}{subsubsection*.14}}
+\newlabel{man/fsck:local}{{13.10.2}{44}{Local\relax }{subsubsection*.14}{}}
+\@writefile{toc}{\contentsline {subsubsection}{SFTP}{44}{subsubsection*.15}}
+\newlabel{man/fsck:sftp}{{13.10.2}{44}{SFTP\relax }{subsubsection*.15}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.10.3}Options}{44}{subsection.13.10.3}}
+\newlabel{man/fsck:options}{{13.10.3}{44}{Options\relax }{subsection.13.10.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.10.4}Files}{45}{subsection.13.10.4}}
+\newlabel{man/fsck:files}{{13.10.4}{45}{Files\relax }{subsection.13.10.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.10.5}Exit Status}{45}{subsection.13.10.5}}
+\newlabel{man/fsck:exit-status}{{13.10.5}{45}{Exit Status\relax }{subsection.13.10.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.10.6}See Also}{45}{subsection.13.10.6}}
+\newlabel{man/fsck:see-also}{{13.10.6}{45}{See Also\relax }{subsection.13.10.6}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.11}The \textbf {pcp} command}{45}{section.13.11}}
+\newlabel{man/pcp:the-pcp-command}{{13.11}{45}{The \textbf {pcp} command\relax }{section.13.11}{}}
+\newlabel{man/pcp::doc}{{13.11}{45}{The \textbf {pcp} command\relax }{section.13.11}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.11.1}Synopsis}{45}{subsection.13.11.1}}
+\newlabel{man/pcp:synopsis}{{13.11.1}{45}{Synopsis\relax }{subsection.13.11.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.11.2}Description}{45}{subsection.13.11.2}}
+\newlabel{man/pcp:description}{{13.11.2}{45}{Description\relax }{subsection.13.11.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.11.3}Options}{45}{subsection.13.11.3}}
+\newlabel{man/pcp:options}{{13.11.3}{45}{Options\relax }{subsection.13.11.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.11.4}Exit Status}{45}{subsection.13.11.4}}
+\newlabel{man/pcp:exit-status}{{13.11.4}{45}{Exit Status\relax }{subsection.13.11.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.11.5}See Also}{45}{subsection.13.11.5}}
+\newlabel{man/pcp:see-also}{{13.11.5}{45}{See Also\relax }{subsection.13.11.5}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {13.12}The \textbf {expire\_backups} command}{46}{section.13.12}}
+\newlabel{man/expire_backups::doc}{{13.12}{46}{The \textbf {expire\_backups} command\relax }{section.13.12}{}}
+\newlabel{man/expire_backups:the-expire-backups-command}{{13.12}{46}{The \textbf {expire\_backups} command\relax }{section.13.12}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.12.1}Synopsis}{46}{subsection.13.12.1}}
+\newlabel{man/expire_backups:synopsis}{{13.12.1}{46}{Synopsis\relax }{subsection.13.12.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.12.2}Description}{46}{subsection.13.12.2}}
+\newlabel{man/expire_backups:description}{{13.12.2}{46}{Description\relax }{subsection.13.12.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.12.3}Options}{47}{subsection.13.12.3}}
+\newlabel{man/expire_backups:options}{{13.12.3}{47}{Options\relax }{subsection.13.12.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.12.4}Exit Status}{47}{subsection.13.12.4}}
+\newlabel{man/expire_backups:exit-status}{{13.12.4}{47}{Exit Status\relax }{subsection.13.12.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {13.12.5}See Also}{47}{subsection.13.12.5}}
+\newlabel{man/expire_backups:see-also}{{13.12.5}{47}{See Also\relax }{subsection.13.12.5}{}}
+\@writefile{toc}{\contentsline {chapter}{\numberline {14}Further Resources / Getting Help}{49}{chapter.14}}
+\@writefile{lof}{\addvspace {10\p@ }}
+\@writefile{lot}{\addvspace {10\p@ }}
+\newlabel{resources::doc}{{14}{49}{Further Resources / Getting Help\relax }{chapter.14}{}}
+\newlabel{resources:further-resources-getting-help}{{14}{49}{Further Resources / Getting Help\relax }{chapter.14}{}}
+\newlabel{resources:resources}{{14}{49}{Further Resources / Getting Help\relax }{chapter.14}{}}
diff --git a/doc/latex/manual.idx b/doc/latex/manual.idx
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/doc/latex/manual.idx
diff --git a/doc/latex/manual.log b/doc/latex/manual.log
new file mode 100644
index 0000000..20029d7
--- /dev/null
+++ b/doc/latex/manual.log
@@ -0,0 +1,934 @@
+This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=pdflatex 2011.4.17) 20 MAY 2011 12:20
+entering extended mode
+ %&-line parsing enabled.
+**manual.tex
+(./manual.tex
+LaTeX2e <2009/09/24>
+Babel <v3.8l> and hyphenation patterns for english, usenglishmax, dumylang, noh
+yphenation, ngerman, german, german-x-2009-06-19, ngerman-x-2009-06-19, loaded.
+
+(./sphinxmanual.cls
+Document Class: sphinxmanual 2009/06/02 Document class (Sphinx manual)
+(/usr/share/texmf-texlive/tex/latex/base/report.cls
+Document Class: report 2007/10/19 v1.4h Standard LaTeX document class
+(/usr/share/texmf-texlive/tex/latex/base/size10.clo
+File: size10.clo 2007/10/19 v1.4h Standard LaTeX file (size option)
+)
+\c@part=\count79
+\c@chapter=\count80
+\c@section=\count81
+\c@subsection=\count82
+\c@subsubsection=\count83
+\c@paragraph=\count84
+\c@subparagraph=\count85
+\c@figure=\count86
+\c@table=\count87
+\abovecaptionskip=\skip41
+\belowcaptionskip=\skip42
+\bibindent=\dimen102
+)) (/usr/share/texmf-texlive/tex/latex/base/inputenc.sty
+Package: inputenc 2008/03/30 v1.1d Input encoding file
+\inpenc@prehook=\toks14
+\inpenc@posthook=\toks15
+(/usr/share/texmf-texlive/tex/latex/base/utf8.def
+File: utf8.def 2008/04/05 v1.1m UTF-8 support for inputenc
+Now handling font encoding OML ...
+... no UTF-8 mapping file for font encoding OML
+Now handling font encoding T1 ...
+... processing UTF-8 mapping file for font encoding T1
+(/usr/share/texmf-texlive/tex/latex/base/t1enc.dfu
+File: t1enc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc
+ defining Unicode char U+00A1 (decimal 161)
+ defining Unicode char U+00A3 (decimal 163)
+ defining Unicode char U+00AB (decimal 171)
+ defining Unicode char U+00BB (decimal 187)
+ defining Unicode char U+00BF (decimal 191)
+ defining Unicode char U+00C0 (decimal 192)
+ defining Unicode char U+00C1 (decimal 193)
+ defining Unicode char U+00C2 (decimal 194)
+ defining Unicode char U+00C3 (decimal 195)
+ defining Unicode char U+00C4 (decimal 196)
+ defining Unicode char U+00C5 (decimal 197)
+ defining Unicode char U+00C6 (decimal 198)
+ defining Unicode char U+00C7 (decimal 199)
+ defining Unicode char U+00C8 (decimal 200)
+ defining Unicode char U+00C9 (decimal 201)
+ defining Unicode char U+00CA (decimal 202)
+ defining Unicode char U+00CB (decimal 203)
+ defining Unicode char U+00CC (decimal 204)
+ defining Unicode char U+00CD (decimal 205)
+ defining Unicode char U+00CE (decimal 206)
+ defining Unicode char U+00CF (decimal 207)
+ defining Unicode char U+00D0 (decimal 208)
+ defining Unicode char U+00D1 (decimal 209)
+ defining Unicode char U+00D2 (decimal 210)
+ defining Unicode char U+00D3 (decimal 211)
+ defining Unicode char U+00D4 (decimal 212)
+ defining Unicode char U+00D5 (decimal 213)
+ defining Unicode char U+00D6 (decimal 214)
+ defining Unicode char U+00D8 (decimal 216)
+ defining Unicode char U+00D9 (decimal 217)
+ defining Unicode char U+00DA (decimal 218)
+ defining Unicode char U+00DB (decimal 219)
+ defining Unicode char U+00DC (decimal 220)
+ defining Unicode char U+00DD (decimal 221)
+ defining Unicode char U+00DE (decimal 222)
+ defining Unicode char U+00DF (decimal 223)
+ defining Unicode char U+00E0 (decimal 224)
+ defining Unicode char U+00E1 (decimal 225)
+ defining Unicode char U+00E2 (decimal 226)
+ defining Unicode char U+00E3 (decimal 227)
+ defining Unicode char U+00E4 (decimal 228)
+ defining Unicode char U+00E5 (decimal 229)
+ defining Unicode char U+00E6 (decimal 230)
+ defining Unicode char U+00E7 (decimal 231)
+ defining Unicode char U+00E8 (decimal 232)
+ defining Unicode char U+00E9 (decimal 233)
+ defining Unicode char U+00EA (decimal 234)
+ defining Unicode char U+00EB (decimal 235)
+ defining Unicode char U+00EC (decimal 236)
+ defining Unicode char U+00ED (decimal 237)
+ defining Unicode char U+00EE (decimal 238)
+ defining Unicode char U+00EF (decimal 239)
+ defining Unicode char U+00F0 (decimal 240)
+ defining Unicode char U+00F1 (decimal 241)
+ defining Unicode char U+00F2 (decimal 242)
+ defining Unicode char U+00F3 (decimal 243)
+ defining Unicode char U+00F4 (decimal 244)
+ defining Unicode char U+00F5 (decimal 245)
+ defining Unicode char U+00F6 (decimal 246)
+ defining Unicode char U+00F8 (decimal 248)
+ defining Unicode char U+00F9 (decimal 249)
+ defining Unicode char U+00FA (decimal 250)
+ defining Unicode char U+00FB (decimal 251)
+ defining Unicode char U+00FC (decimal 252)
+ defining Unicode char U+00FD (decimal 253)
+ defining Unicode char U+00FE (decimal 254)
+ defining Unicode char U+00FF (decimal 255)
+ defining Unicode char U+0102 (decimal 258)
+ defining Unicode char U+0103 (decimal 259)
+ defining Unicode char U+0104 (decimal 260)
+ defining Unicode char U+0105 (decimal 261)
+ defining Unicode char U+0106 (decimal 262)
+ defining Unicode char U+0107 (decimal 263)
+ defining Unicode char U+010C (decimal 268)
+ defining Unicode char U+010D (decimal 269)
+ defining Unicode char U+010E (decimal 270)
+ defining Unicode char U+010F (decimal 271)
+ defining Unicode char U+0110 (decimal 272)
+ defining Unicode char U+0111 (decimal 273)
+ defining Unicode char U+0118 (decimal 280)
+ defining Unicode char U+0119 (decimal 281)
+ defining Unicode char U+011A (decimal 282)
+ defining Unicode char U+011B (decimal 283)
+ defining Unicode char U+011E (decimal 286)
+ defining Unicode char U+011F (decimal 287)
+ defining Unicode char U+0130 (decimal 304)
+ defining Unicode char U+0131 (decimal 305)
+ defining Unicode char U+0132 (decimal 306)
+ defining Unicode char U+0133 (decimal 307)
+ defining Unicode char U+0139 (decimal 313)
+ defining Unicode char U+013A (decimal 314)
+ defining Unicode char U+013D (decimal 317)
+ defining Unicode char U+013E (decimal 318)
+ defining Unicode char U+0141 (decimal 321)
+ defining Unicode char U+0142 (decimal 322)
+ defining Unicode char U+0143 (decimal 323)
+ defining Unicode char U+0144 (decimal 324)
+ defining Unicode char U+0147 (decimal 327)
+ defining Unicode char U+0148 (decimal 328)
+ defining Unicode char U+014A (decimal 330)
+ defining Unicode char U+014B (decimal 331)
+ defining Unicode char U+0150 (decimal 336)
+ defining Unicode char U+0151 (decimal 337)
+ defining Unicode char U+0152 (decimal 338)
+ defining Unicode char U+0153 (decimal 339)
+ defining Unicode char U+0154 (decimal 340)
+ defining Unicode char U+0155 (decimal 341)
+ defining Unicode char U+0158 (decimal 344)
+ defining Unicode char U+0159 (decimal 345)
+ defining Unicode char U+015A (decimal 346)
+ defining Unicode char U+015B (decimal 347)
+ defining Unicode char U+015E (decimal 350)
+ defining Unicode char U+015F (decimal 351)
+ defining Unicode char U+0160 (decimal 352)
+ defining Unicode char U+0161 (decimal 353)
+ defining Unicode char U+0162 (decimal 354)
+ defining Unicode char U+0163 (decimal 355)
+ defining Unicode char U+0164 (decimal 356)
+ defining Unicode char U+0165 (decimal 357)
+ defining Unicode char U+016E (decimal 366)
+ defining Unicode char U+016F (decimal 367)
+ defining Unicode char U+0170 (decimal 368)
+ defining Unicode char U+0171 (decimal 369)
+ defining Unicode char U+0178 (decimal 376)
+ defining Unicode char U+0179 (decimal 377)
+ defining Unicode char U+017A (decimal 378)
+ defining Unicode char U+017B (decimal 379)
+ defining Unicode char U+017C (decimal 380)
+ defining Unicode char U+017D (decimal 381)
+ defining Unicode char U+017E (decimal 382)
+ defining Unicode char U+200C (decimal 8204)
+ defining Unicode char U+2013 (decimal 8211)
+ defining Unicode char U+2014 (decimal 8212)
+ defining Unicode char U+2018 (decimal 8216)
+ defining Unicode char U+2019 (decimal 8217)
+ defining Unicode char U+201A (decimal 8218)
+ defining Unicode char U+201C (decimal 8220)
+ defining Unicode char U+201D (decimal 8221)
+ defining Unicode char U+201E (decimal 8222)
+ defining Unicode char U+2030 (decimal 8240)
+ defining Unicode char U+2031 (decimal 8241)
+ defining Unicode char U+2039 (decimal 8249)
+ defining Unicode char U+203A (decimal 8250)
+ defining Unicode char U+2423 (decimal 9251)
+)
+Now handling font encoding OT1 ...
+... processing UTF-8 mapping file for font encoding OT1
+(/usr/share/texmf-texlive/tex/latex/base/ot1enc.dfu
+File: ot1enc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc
+ defining Unicode char U+00A1 (decimal 161)
+ defining Unicode char U+00A3 (decimal 163)
+ defining Unicode char U+00B8 (decimal 184)
+ defining Unicode char U+00BF (decimal 191)
+ defining Unicode char U+00C5 (decimal 197)
+ defining Unicode char U+00C6 (decimal 198)
+ defining Unicode char U+00D8 (decimal 216)
+ defining Unicode char U+00DF (decimal 223)
+ defining Unicode char U+00E6 (decimal 230)
+ defining Unicode char U+00EC (decimal 236)
+ defining Unicode char U+00ED (decimal 237)
+ defining Unicode char U+00EE (decimal 238)
+ defining Unicode char U+00EF (decimal 239)
+ defining Unicode char U+00F8 (decimal 248)
+ defining Unicode char U+0131 (decimal 305)
+ defining Unicode char U+0141 (decimal 321)
+ defining Unicode char U+0142 (decimal 322)
+ defining Unicode char U+0152 (decimal 338)
+ defining Unicode char U+0153 (decimal 339)
+ defining Unicode char U+2013 (decimal 8211)
+ defining Unicode char U+2014 (decimal 8212)
+ defining Unicode char U+2018 (decimal 8216)
+ defining Unicode char U+2019 (decimal 8217)
+ defining Unicode char U+201C (decimal 8220)
+ defining Unicode char U+201D (decimal 8221)
+)
+Now handling font encoding OMS ...
+... processing UTF-8 mapping file for font encoding OMS
+(/usr/share/texmf-texlive/tex/latex/base/omsenc.dfu
+File: omsenc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc
+ defining Unicode char U+00A7 (decimal 167)
+ defining Unicode char U+00B6 (decimal 182)
+ defining Unicode char U+00B7 (decimal 183)
+ defining Unicode char U+2020 (decimal 8224)
+ defining Unicode char U+2021 (decimal 8225)
+ defining Unicode char U+2022 (decimal 8226)
+)
+Now handling font encoding OMX ...
+... no UTF-8 mapping file for font encoding OMX
+Now handling font encoding U ...
+... no UTF-8 mapping file for font encoding U
+ defining Unicode char U+00A9 (decimal 169)
+ defining Unicode char U+00AA (decimal 170)
+ defining Unicode char U+00AE (decimal 174)
+ defining Unicode char U+00BA (decimal 186)
+ defining Unicode char U+02C6 (decimal 710)
+ defining Unicode char U+02DC (decimal 732)
+ defining Unicode char U+200C (decimal 8204)
+ defining Unicode char U+2026 (decimal 8230)
+ defining Unicode char U+2122 (decimal 8482)
+ defining Unicode char U+2423 (decimal 9251)
+))
+ defining Unicode char U+00A0 (decimal 160)
+(/usr/share/texmf-texlive/tex/latex/base/fontenc.sty
+Package: fontenc 2005/09/27 v1.99g Standard LaTeX package
+(/usr/share/texmf-texlive/tex/latex/base/t1enc.def
+File: t1enc.def 2005/09/27 v1.99g Standard LaTeX file
+LaTeX Font Info: Redeclaring font encoding T1 on input line 43.
+)) (/usr/share/texmf-texlive/tex/generic/babel/babel.sty
+Package: babel 2008/07/06 v3.8l The Babel package
+(/usr/share/texmf-texlive/tex/generic/babel/english.ldf
+Language: english 2005/03/30 v3.3o English support from the babel system
+(/usr/share/texmf-texlive/tex/generic/babel/babel.def
+File: babel.def 2008/07/06 v3.8l Babel common definitions
+\babel@savecnt=\count88
+\U@D=\dimen103
+)
+\l@british = a dialect from \language\l@english
+\l@UKenglish = a dialect from \language\l@english
+\l@canadian = a dialect from \language\l@american
+\l@australian = a dialect from \language\l@british
+\l@newzealand = a dialect from \language\l@british
+)) (/usr/share/texmf-texlive/tex/latex/psnfss/times.sty
+Package: times 2005/04/12 PSNFSS-v9.2a (SPQR)
+) (./fncychap.sty
+Package: fncychap 2007/07/30 v1.34 LaTeX package (Revised chapters)
+\RW=\skip43
+\mylen=\skip44
+\myhi=\skip45
+\px=\skip46
+\py=\skip47
+\pyy=\skip48
+\pxx=\skip49
+\c@AlphaCnt=\count89
+\c@AlphaDecCnt=\count90
+) (/usr/share/texmf-texlive/tex/latex/tools/longtable.sty
+Package: longtable 2004/02/01 v4.11 Multi-page Table package (DPC)
+\LTleft=\skip50
+\LTright=\skip51
+\LTpre=\skip52
+\LTpost=\skip53
+\LTchunksize=\count91
+\LTcapwidth=\dimen104
+\LT@head=\box26
+\LT@firsthead=\box27
+\LT@foot=\box28
+\LT@lastfoot=\box29
+\LT@cols=\count92
+\LT@rows=\count93
+\c@LT@tables=\count94
+\c@LT@chunks=\count95
+\LT@p@ftn=\toks16
+) (./sphinx.sty
+Package: sphinx 2010/01/15 LaTeX package (Sphinx markup)
+(/usr/share/texmf-texlive/tex/latex/base/textcomp.sty
+Package: textcomp 2005/09/27 v1.99g Standard LaTeX package
+Package textcomp Info: Sub-encoding information:
+(textcomp) 5 = only ISO-Adobe without \textcurrency
+(textcomp) 4 = 5 + \texteuro
+(textcomp) 3 = 4 + \textohm
+(textcomp) 2 = 3 + \textestimated + \textcurrency
+(textcomp) 1 = TS1 - \textcircled - \t
+(textcomp) 0 = TS1 (full)
+(textcomp) Font families with sub-encoding setting implement
+(textcomp) only a restricted character set as indicated.
+(textcomp) Family '?' is the default used for unknown fonts.
+(textcomp) See the documentation for details.
+Package textcomp Info: Setting ? sub-encoding to TS1/1 on input line 71.
+(/usr/share/texmf-texlive/tex/latex/base/ts1enc.def
+File: ts1enc.def 2001/06/05 v3.0e (jk/car/fm) Standard LaTeX file
+Now handling font encoding TS1 ...
+... processing UTF-8 mapping file for font encoding TS1
+(/usr/share/texmf-texlive/tex/latex/base/ts1enc.dfu
+File: ts1enc.dfu 2008/04/05 v1.1m UTF-8 support for inputenc
+ defining Unicode char U+00A2 (decimal 162)
+ defining Unicode char U+00A3 (decimal 163)
+ defining Unicode char U+00A4 (decimal 164)
+ defining Unicode char U+00A5 (decimal 165)
+ defining Unicode char U+00A6 (decimal 166)
+ defining Unicode char U+00A7 (decimal 167)
+ defining Unicode char U+00A8 (decimal 168)
+ defining Unicode char U+00A9 (decimal 169)
+ defining Unicode char U+00AA (decimal 170)
+ defining Unicode char U+00AC (decimal 172)
+ defining Unicode char U+00AE (decimal 174)
+ defining Unicode char U+00AF (decimal 175)
+ defining Unicode char U+00B0 (decimal 176)
+ defining Unicode char U+00B1 (decimal 177)
+ defining Unicode char U+00B2 (decimal 178)
+ defining Unicode char U+00B3 (decimal 179)
+ defining Unicode char U+00B4 (decimal 180)
+ defining Unicode char U+00B5 (decimal 181)
+ defining Unicode char U+00B6 (decimal 182)
+ defining Unicode char U+00B7 (decimal 183)
+ defining Unicode char U+00B9 (decimal 185)
+ defining Unicode char U+00BA (decimal 186)
+ defining Unicode char U+00BC (decimal 188)
+ defining Unicode char U+00BD (decimal 189)
+ defining Unicode char U+00BE (decimal 190)
+ defining Unicode char U+00D7 (decimal 215)
+ defining Unicode char U+00F7 (decimal 247)
+ defining Unicode char U+0192 (decimal 402)
+ defining Unicode char U+02C7 (decimal 711)
+ defining Unicode char U+02D8 (decimal 728)
+ defining Unicode char U+02DD (decimal 733)
+ defining Unicode char U+0E3F (decimal 3647)
+ defining Unicode char U+2016 (decimal 8214)
+ defining Unicode char U+2020 (decimal 8224)
+ defining Unicode char U+2021 (decimal 8225)
+ defining Unicode char U+2022 (decimal 8226)
+ defining Unicode char U+2030 (decimal 8240)
+ defining Unicode char U+2031 (decimal 8241)
+ defining Unicode char U+203B (decimal 8251)
+ defining Unicode char U+203D (decimal 8253)
+ defining Unicode char U+2044 (decimal 8260)
+ defining Unicode char U+204E (decimal 8270)
+ defining Unicode char U+2052 (decimal 8274)
+ defining Unicode char U+20A1 (decimal 8353)
+ defining Unicode char U+20A4 (decimal 8356)
+ defining Unicode char U+20A6 (decimal 8358)
+ defining Unicode char U+20A9 (decimal 8361)
+ defining Unicode char U+20AB (decimal 8363)
+ defining Unicode char U+20AC (decimal 8364)
+ defining Unicode char U+20B1 (decimal 8369)
+ defining Unicode char U+2103 (decimal 8451)
+ defining Unicode char U+2116 (decimal 8470)
+ defining Unicode char U+2117 (decimal 8471)
+ defining Unicode char U+211E (decimal 8478)
+ defining Unicode char U+2120 (decimal 8480)
+ defining Unicode char U+2122 (decimal 8482)
+ defining Unicode char U+2126 (decimal 8486)
+ defining Unicode char U+2127 (decimal 8487)
+ defining Unicode char U+212E (decimal 8494)
+ defining Unicode char U+2190 (decimal 8592)
+ defining Unicode char U+2191 (decimal 8593)
+ defining Unicode char U+2192 (decimal 8594)
+ defining Unicode char U+2193 (decimal 8595)
+ defining Unicode char U+2329 (decimal 9001)
+ defining Unicode char U+232A (decimal 9002)
+ defining Unicode char U+2422 (decimal 9250)
+ defining Unicode char U+25E6 (decimal 9702)
+ defining Unicode char U+25EF (decimal 9711)
+ defining Unicode char U+266A (decimal 9834)
+))
+LaTeX Info: Redefining \oldstylenums on input line 266.
+Package textcomp Info: Setting cmr sub-encoding to TS1/0 on input line 281.
+Package textcomp Info: Setting cmss sub-encoding to TS1/0 on input line 282.
+Package textcomp Info: Setting cmtt sub-encoding to TS1/0 on input line 283.
+Package textcomp Info: Setting cmvtt sub-encoding to TS1/0 on input line 284.
+Package textcomp Info: Setting cmbr sub-encoding to TS1/0 on input line 285.
+Package textcomp Info: Setting cmtl sub-encoding to TS1/0 on input line 286.
+Package textcomp Info: Setting ccr sub-encoding to TS1/0 on input line 287.
+Package textcomp Info: Setting ptm sub-encoding to TS1/4 on input line 288.
+Package textcomp Info: Setting pcr sub-encoding to TS1/4 on input line 289.
+Package textcomp Info: Setting phv sub-encoding to TS1/4 on input line 290.
+Package textcomp Info: Setting ppl sub-encoding to TS1/3 on input line 291.
+Package textcomp Info: Setting pag sub-encoding to TS1/4 on input line 292.
+Package textcomp Info: Setting pbk sub-encoding to TS1/4 on input line 293.
+Package textcomp Info: Setting pnc sub-encoding to TS1/4 on input line 294.
+Package textcomp Info: Setting pzc sub-encoding to TS1/4 on input line 295.
+Package textcomp Info: Setting bch sub-encoding to TS1/4 on input line 296.
+Package textcomp Info: Setting put sub-encoding to TS1/5 on input line 297.
+Package textcomp Info: Setting uag sub-encoding to TS1/5 on input line 298.
+Package textcomp Info: Setting ugq sub-encoding to TS1/5 on input line 299.
+Package textcomp Info: Setting ul8 sub-encoding to TS1/4 on input line 300.
+Package textcomp Info: Setting ul9 sub-encoding to TS1/4 on input line 301.
+Package textcomp Info: Setting augie sub-encoding to TS1/5 on input line 302.
+Package textcomp Info: Setting dayrom sub-encoding to TS1/3 on input line 303.
+Package textcomp Info: Setting dayroms sub-encoding to TS1/3 on input line 304.
+
+Package textcomp Info: Setting pxr sub-encoding to TS1/0 on input line 305.
+Package textcomp Info: Setting pxss sub-encoding to TS1/0 on input line 306.
+Package textcomp Info: Setting pxtt sub-encoding to TS1/0 on input line 307.
+Package textcomp Info: Setting txr sub-encoding to TS1/0 on input line 308.
+Package textcomp Info: Setting txss sub-encoding to TS1/0 on input line 309.
+Package textcomp Info: Setting txtt sub-encoding to TS1/0 on input line 310.
+Package textcomp Info: Setting futs sub-encoding to TS1/4 on input line 311.
+Package textcomp Info: Setting futx sub-encoding to TS1/4 on input line 312.
+Package textcomp Info: Setting futj sub-encoding to TS1/4 on input line 313.
+Package textcomp Info: Setting hlh sub-encoding to TS1/3 on input line 314.
+Package textcomp Info: Setting hls sub-encoding to TS1/3 on input line 315.
+Package textcomp Info: Setting hlst sub-encoding to TS1/3 on input line 316.
+Package textcomp Info: Setting hlct sub-encoding to TS1/5 on input line 317.
+Package textcomp Info: Setting hlx sub-encoding to TS1/5 on input line 318.
+Package textcomp Info: Setting hlce sub-encoding to TS1/5 on input line 319.
+Package textcomp Info: Setting hlcn sub-encoding to TS1/5 on input line 320.
+Package textcomp Info: Setting hlcw sub-encoding to TS1/5 on input line 321.
+Package textcomp Info: Setting hlcf sub-encoding to TS1/5 on input line 322.
+Package textcomp Info: Setting pplx sub-encoding to TS1/3 on input line 323.
+Package textcomp Info: Setting pplj sub-encoding to TS1/3 on input line 324.
+Package textcomp Info: Setting ptmx sub-encoding to TS1/4 on input line 325.
+Package textcomp Info: Setting ptmj sub-encoding to TS1/4 on input line 326.
+) (/usr/share/texmf-texlive/tex/latex/fancyhdr/fancyhdr.sty
+\fancy@headwidth=\skip54
+\f@ncyO@elh=\skip55
+\f@ncyO@erh=\skip56
+\f@ncyO@olh=\skip57
+\f@ncyO@orh=\skip58
+\f@ncyO@elf=\skip59
+\f@ncyO@erf=\skip60
+\f@ncyO@olf=\skip61
+\f@ncyO@orf=\skip62
+) (/usr/share/texmf-texlive/tex/latex/fancybox/fancybox.sty
+Package: fancybox 2000/09/19 1.3
+Style option: `fancybox' v1.3 <2000/09/19> (tvz)
+\@fancybox=\box30
+\shadowsize=\dimen105
+\@Sbox=\box31
+\do@VerbBox=\toks17
+\the@fancyput=\toks18
+\this@fancyput=\toks19
+\EndVerbatimTokens=\toks20
+\Verbatim@Outfile=\write3
+\Verbatim@Infile=\read1
+) (/usr/share/texmf-texlive/tex/latex/titlesec/titlesec.sty
+Package: titlesec 2007/08/12 v2.8 Sectioning titles
+\ttl@box=\box32
+\beforetitleunit=\skip63
+\aftertitleunit=\skip64
+\ttl@plus=\dimen106
+\ttl@minus=\dimen107
+\ttl@toksa=\toks21
+\titlewidth=\dimen108
+\titlewidthlast=\dimen109
+\titlewidthfirst=\dimen110
+) (./tabulary.sty
+Package: tabulary 2007/10/02 v0.9 tabulary package (DPC)
+(/usr/share/texmf-texlive/tex/latex/tools/array.sty
+Package: array 2008/09/09 v2.4c Tabular extension package (FMi)
+\col@sep=\dimen111
+\extrarowheight=\dimen112
+\NC@list=\toks22
+\extratabsurround=\skip65
+\backup@length=\skip66
+)
+\TY@count=\count96
+\TY@linewidth=\dimen113
+\tymin=\dimen114
+\tymax=\dimen115
+\TY@tablewidth=\dimen116
+) (/usr/share/texmf-texlive/tex/latex/amsmath/amsmath.sty
+Package: amsmath 2000/07/18 v2.13 AMS math features
+\@mathmargin=\skip67
+For additional information on amsmath, use the `?' option.
+(/usr/share/texmf-texlive/tex/latex/amsmath/amstext.sty
+Package: amstext 2000/06/29 v2.01
+(/usr/share/texmf-texlive/tex/latex/amsmath/amsgen.sty
+File: amsgen.sty 1999/11/30 v2.0
+\@emptytoks=\toks23
+\ex@=\dimen117
+)) (/usr/share/texmf-texlive/tex/latex/amsmath/amsbsy.sty
+Package: amsbsy 1999/11/29 v1.2d
+\pmbraise@=\dimen118
+) (/usr/share/texmf-texlive/tex/latex/amsmath/amsopn.sty
+Package: amsopn 1999/12/14 v2.01 operator names
+)
+\inf@bad=\count97
+LaTeX Info: Redefining \frac on input line 211.
+\uproot@=\count98
+\leftroot@=\count99
+LaTeX Info: Redefining \overline on input line 307.
+\classnum@=\count100
+\DOTSCASE@=\count101
+LaTeX Info: Redefining \ldots on input line 379.
+LaTeX Info: Redefining \dots on input line 382.
+LaTeX Info: Redefining \cdots on input line 467.
+\Mathstrutbox@=\box33
+\strutbox@=\box34
+\big@size=\dimen119
+LaTeX Font Info: Redeclaring font encoding OML on input line 567.
+LaTeX Font Info: Redeclaring font encoding OMS on input line 568.
+\macc@depth=\count102
+\c@MaxMatrixCols=\count103
+\dotsspace@=\muskip10
+\c@parentequation=\count104
+\dspbrk@lvl=\count105
+\tag@help=\toks24
+\row@=\count106
+\column@=\count107
+\maxfields@=\count108
+\andhelp@=\toks25
+\eqnshift@=\dimen120
+\alignsep@=\dimen121
+\tagshift@=\dimen122
+\tagwidth@=\dimen123
+\totwidth@=\dimen124
+\lineht@=\dimen125
+\@envbody=\toks26
+\multlinegap=\skip68
+\multlinetaggap=\skip69
+\mathdisplay@stack=\toks27
+LaTeX Info: Redefining \[ on input line 2666.
+LaTeX Info: Redefining \] on input line 2667.
+) (/usr/share/texmf-texlive/tex/latex/base/makeidx.sty
+Package: makeidx 2000/03/29 v1.0m Standard LaTeX package
+) (/usr/share/texmf-texlive/tex/latex/ltxmisc/framed.sty
+Package: framed 2007/10/04 v 0.95: framed or shaded text with page breaks
+\fb@frw=\dimen126
+\fb@frh=\dimen127
+\FrameRule=\dimen128
+\FrameSep=\dimen129
+) (/usr/share/texmf-texlive/tex/latex/base/ifthen.sty
+Package: ifthen 2001/05/26 v1.1c Standard LaTeX ifthen package (DPC)
+) (/usr/share/texmf-texlive/tex/latex/graphics/color.sty
+Package: color 2005/11/14 v1.0j Standard LaTeX Color (DPC)
+(/etc/texmf/tex/latex/config/color.cfg
+File: color.cfg 2007/01/18 v1.5 color configuration of teTeX/TeXLive
+)
+Package color Info: Driver file: pdftex.def on input line 130.
+(/usr/share/texmf-texlive/tex/latex/pdftex-def/pdftex.def
+File: pdftex.def 2010/03/12 v0.04p Graphics/color for pdfTeX
+\Gread@gobject=\count109
+)) (/usr/share/texmf-texlive/tex/latex/fancyvrb/fancyvrb.sty
+Package: fancyvrb 2008/02/07
+
+Style option: `fancyvrb' v2.7a, with DG/SPQR fixes, and firstline=lastline fix
+<2008/02/07> (tvz) (/usr/share/texmf-texlive/tex/latex/graphics/keyval.sty
+Package: keyval 1999/03/16 v1.13 key=value parser (DPC)
+\KV@toks@=\toks28
+)
+\FV@CodeLineNo=\count110
+\FV@InFile=\read2
+\FV@TabBox=\box35
+\c@FancyVerbLine=\count111
+\FV@StepNumber=\count112
+\FV@OutFile=\write4
+) (/usr/share/texmf-texlive/tex/latex/ltxmisc/threeparttable.sty
+Package: threeparttable 2003/06/13 v 3.0
+\@tempboxb=\box36
+) (/usr/share/texmf-texlive/tex/latex/mdwtools/footnote.sty
+Package: footnote 1997/01/28 1.13 Save footnotes around boxes
+\fn@notes=\box37
+\fn@width=\dimen130
+) (/usr/share/texmf-texlive/tex/latex/wrapfig/wrapfig.sty
+\wrapoverhang=\dimen131
+\WF@size=\dimen132
+\c@WF@wrappedlines=\count113
+\WF@box=\box38
+\WF@everypar=\toks29
+Package: wrapfig 2003/01/31 v 3.6
+) (/usr/share/texmf-texlive/tex/latex/ltxmisc/parskip.sty
+Package: parskip 2001/04/09 non-zero parskip adjustments
+) (/usr/share/texmf-texlive/tex/latex/graphics/graphicx.sty
+Package: graphicx 1999/02/16 v1.0f Enhanced LaTeX Graphics (DPC,SPQR)
+(/usr/share/texmf-texlive/tex/latex/graphics/graphics.sty
+Package: graphics 2009/02/05 v1.0o Standard LaTeX Graphics (DPC,SPQR)
+(/usr/share/texmf-texlive/tex/latex/graphics/trig.sty
+Package: trig 1999/03/16 v1.09 sin cos tan (DPC)
+) (/etc/texmf/tex/latex/config/graphics.cfg
+File: graphics.cfg 2009/08/28 v1.8 graphics configuration of TeX Live
+)
+Package graphics Info: Driver file: pdftex.def on input line 91.
+)
+\Gin@req@height=\dimen133
+\Gin@req@width=\dimen134
+) (/usr/share/texmf-texlive/tex/plain/misc/pdfcolor.tex)
+\distancetoright=\skip70
+\py@argswidth=\skip71
+\py@noticelength=\skip72
+\lineblockindentation=\skip73
+\image@box=\box39
+\image@width=\dimen135
+(/usr/share/texmf-texlive/tex/latex/hyperref/hyperref.sty
+Package: hyperref 2009/10/09 v6.79a Hypertext links for LaTeX
+(/usr/share/texmf-texlive/tex/generic/oberdiek/ifpdf.sty
+Package: ifpdf 2009/04/10 v2.0 Provides the ifpdf switch (HO)
+Package ifpdf Info: pdfTeX in pdf mode detected.
+) (/usr/share/texmf-texlive/tex/generic/oberdiek/ifvtex.sty
+Package: ifvtex 2008/11/04 v1.4 Switches for detecting VTeX and its modes (HO)
+Package ifvtex Info: VTeX not detected.
+) (/usr/share/texmf-texlive/tex/generic/ifxetex/ifxetex.sty
+Package: ifxetex 2009/01/23 v0.5 Provides ifxetex conditional
+) (/usr/share/texmf-texlive/tex/latex/oberdiek/hycolor.sty
+Package: hycolor 2009/10/02 v1.5 Code for color options of hyperref/bookmark (H
+O)
+(/usr/share/texmf-texlive/tex/latex/oberdiek/xcolor-patch.sty
+Package: xcolor-patch 2009/10/02 xcolor patch
+))
+\@linkdim=\dimen136
+\Hy@linkcounter=\count114
+\Hy@pagecounter=\count115
+(/usr/share/texmf-texlive/tex/latex/hyperref/pd1enc.def
+File: pd1enc.def 2009/10/09 v6.79a Hyperref: PDFDocEncoding definition (HO)
+Now handling font encoding PD1 ...
+... no UTF-8 mapping file for font encoding PD1
+) (/usr/share/texmf-texlive/tex/generic/oberdiek/etexcmds.sty
+Package: etexcmds 2007/12/12 v1.2 Prefix for e-TeX command names (HO)
+(/usr/share/texmf-texlive/tex/generic/oberdiek/infwarerr.sty
+Package: infwarerr 2007/09/09 v1.2 Providing info/warning/message (HO)
+)
+Package etexcmds Info: Could not find \expanded.
+(etexcmds) That can mean that you are not using pdfTeX 1.50 or
+(etexcmds) that some package has redefined \expanded.
+(etexcmds) In the latter case, load this package earlier.
+) (/usr/share/texmf-texlive/tex/latex/latexconfig/hyperref.cfg
+File: hyperref.cfg 2002/06/06 v1.2 hyperref configuration of TeXLive
+) (/usr/share/texmf-texlive/tex/latex/oberdiek/kvoptions.sty
+Package: kvoptions 2009/08/13 v3.4 Keyval support for LaTeX options (HO)
+(/usr/share/texmf-texlive/tex/generic/oberdiek/kvsetkeys.sty
+Package: kvsetkeys 2009/07/30 v1.5 Key value parser with default handler suppor
+t (HO)
+))
+Package hyperref Info: Option `colorlinks' set `true' on input line 2864.
+Package hyperref Info: Option `breaklinks' set `true' on input line 2864.
+Package hyperref Info: Hyper figures OFF on input line 2975.
+Package hyperref Info: Link nesting OFF on input line 2980.
+Package hyperref Info: Hyper index ON on input line 2983.
+Package hyperref Info: Plain pages OFF on input line 2990.
+Package hyperref Info: Backreferencing OFF on input line 2995.
+Implicit mode ON; LaTeX internals redefined
+Package hyperref Info: Bookmarks ON on input line 3191.
+(/usr/share/texmf-texlive/tex/latex/ltxmisc/url.sty
+\Urlmuskip=\muskip11
+Package: url 2006/04/12 ver 3.3 Verb mode for urls, etc.
+)
+LaTeX Info: Redefining \url on input line 3428.
+(/usr/share/texmf-texlive/tex/generic/oberdiek/bitset.sty
+Package: bitset 2007/09/28 v1.0 Data type bit set (HO)
+(/usr/share/texmf-texlive/tex/generic/oberdiek/intcalc.sty
+Package: intcalc 2007/09/27 v1.1 Expandable integer calculations (HO)
+) (/usr/share/texmf-texlive/tex/generic/oberdiek/bigintcalc.sty
+Package: bigintcalc 2007/11/11 v1.1 Expandable big integer calculations (HO)
+(/usr/share/texmf-texlive/tex/generic/oberdiek/pdftexcmds.sty
+Package: pdftexcmds 2009/09/23 v0.6 LuaTeX support for pdfTeX utility functions
+ (HO)
+(/usr/share/texmf-texlive/tex/generic/oberdiek/ifluatex.sty
+Package: ifluatex 2009/04/17 v1.2 Provides the ifluatex switch (HO)
+Package ifluatex Info: LuaTeX not detected.
+) (/usr/share/texmf-texlive/tex/generic/oberdiek/ltxcmds.sty
+Package: ltxcmds 2009/08/05 v1.0 Some LaTeX kernel commands for general use (HO
+)
+)
+Package pdftexcmds Info: LuaTeX not detected.
+Package pdftexcmds Info: \pdf@primitive is available.
+Package pdftexcmds Info: \pdf@ifprimitive is available.
+)))
+\Fld@menulength=\count116
+\Field@Width=\dimen137
+\Fld@charsize=\dimen138
+\Field@toks=\toks30
+Package hyperref Info: Hyper figures OFF on input line 4377.
+Package hyperref Info: Link nesting OFF on input line 4382.
+Package hyperref Info: Hyper index ON on input line 4385.
+Package hyperref Info: backreferencing OFF on input line 4392.
+Package hyperref Info: Link coloring ON on input line 4395.
+Package hyperref Info: Link coloring with OCG OFF on input line 4402.
+Package hyperref Info: PDF/A mode OFF on input line 4407.
+(/usr/share/texmf-texlive/tex/generic/oberdiek/atbegshi.sty
+Package: atbegshi 2008/07/31 v1.9 At begin shipout hook (HO)
+)
+\Hy@abspage=\count117
+\c@Item=\count118
+\c@Hfootnote=\count119
+)
+*hyperref using default driver hpdftex*
+(/usr/share/texmf-texlive/tex/latex/hyperref/hpdftex.def
+File: hpdftex.def 2009/10/09 v6.79a Hyperref driver for pdfTeX
+\Fld@listcount=\count120
+) (/usr/share/texmf-texlive/tex/latex/oberdiek/hypcap.sty
+Package: hypcap 2008/09/08 v1.10 Adjusting anchors of captions (HO)
+)
+\DUlineblockindent=\skip74
+)
+\@indexfile=\write5
+\openout5 = `manual.idx'.
+
+Writing index file manual.idx
+(./manual.aux)
+\openout1 = `manual.aux'.
+
+LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 109.
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 109.
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 109.
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 109.
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 109.
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 109.
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 109.
+LaTeX Font Info: Try loading font information for TS1+cmr on input line 109.
+
+(/usr/share/texmf-texlive/tex/latex/base/ts1cmr.fd
+File: ts1cmr.fd 1999/05/25 v2.5h Standard LaTeX font definitions
+)
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 109.
+LaTeX Font Info: ... okay on input line 109.
+LaTeX Font Info: Try loading font information for T1+ptm on input line 109.
+(/usr/share/texmf-texlive/tex/latex/psnfss/t1ptm.fd
+File: t1ptm.fd 2001/06/04 font definitions for T1/ptm.
+) (/usr/share/texmf-texlive/tex/context/base/supp-pdf.mkii
+[Loading MPS to PDF converter (version 2006.09.02).]
+\scratchcounter=\count121
+\scratchdimen=\dimen139
+\scratchbox=\box40
+\nofMPsegments=\count122
+\nofMParguments=\count123
+\everyMPshowfont=\toks31
+\MPscratchCnt=\count124
+\MPscratchDim=\dimen140
+\MPnumerator=\count125
+\everyMPtoPDFconversion=\toks32
+)
+Package hyperref Info: Link coloring ON on input line 109.
+(/usr/share/texmf-texlive/tex/latex/hyperref/nameref.sty
+Package: nameref 2007/05/29 v2.31 Cross-referencing by name of section
+(/usr/share/texmf-texlive/tex/latex/oberdiek/refcount.sty
+Package: refcount 2008/08/11 v3.1 Data extraction from references (HO)
+)
+\c@section@level=\count126
+)
+LaTeX Info: Redefining \ref on input line 109.
+LaTeX Info: Redefining \pageref on input line 109.
+(./manual.out) (./manual.out)
+\@outlinefile=\write6
+\openout6 = `manual.out'.
+
+\AtBeginShipoutBox=\box41
+
+Underfull \hbox (badness 10000) in paragraph at lines 112--112
+
+ []
+
+LaTeX Font Info: Try loading font information for T1+phv on input line 112.
+(/usr/share/texmf-texlive/tex/latex/psnfss/t1phv.fd
+File: t1phv.fd 2001/06/04 scalable font definitions for T1/phv.
+)
+LaTeX Font Info: Font shape `T1/phv/bx/n' in size <24.88> not available
+(Font) Font shape `T1/phv/b/n' tried instead on input line 112.
+LaTeX Font Info: Font shape `T1/phv/m/it' in size <17.28> not available
+(Font) Font shape `T1/phv/m/sl' tried instead on input line 112.
+LaTeX Font Info: Font shape `T1/phv/bx/it' in size <17.28> not available
+(Font) Font shape `T1/phv/b/it' tried instead on input line 112.
+LaTeX Font Info: Font shape `T1/phv/b/it' in size <17.28> not available
+(Font) Font shape `T1/phv/b/sl' tried instead on input line 112.
+LaTeX Font Info: Font shape `T1/phv/bx/n' in size <17.28> not available
+(Font) Font shape `T1/phv/b/n' tried instead on input line 112.
+[1
+
+{/home/nikratio/.texmf-var/fonts/map/pdftex/updmap/pdftex.map
+
+pdfTeX warning: pdflatex (file /home/nikratio/.texmf-var/fonts/map/pdftex/updma
+p/pdftex.map): fontmap entry for `ugqb8r' already exists, duplicates ignored
+
+
+pdfTeX warning: pdflatex (file /home/nikratio/.texmf-var/fonts/map/pdftex/updma
+p/pdftex.map): fontmap entry for `ugqbo8r' already exists, duplicates ignored
+}] [2
+
+] (./manual.toc
+LaTeX Font Info: Font shape `T1/ptm/bx/n' in size <10> not available
+(Font) Font shape `T1/ptm/b/n' tried instead on input line 2.
+LaTeX Font Info: Try loading font information for T1+pcr on input line 10.
+(/usr/share/texmf-texlive/tex/latex/psnfss/t1pcr.fd
+File: t1pcr.fd 2001/06/04 font definitions for T1/pcr.
+)
+LaTeX Font Info: Font shape `T1/phv/bx/n' in size <10> not available
+(Font) Font shape `T1/phv/b/n' tried instead on input line 38.
+pdfTeX warning (ext4): destination with the same identifier (name{page.i}) has
+been already used, duplicate ignored
+<to be read again>
+ \relax
+l.38 ...ine {9}Checking for Errors}{23}{chapter.9}
+ [1
+
+])
+\tf@toc=\write7
+\openout7 = `manual.toc'.
+
+pdfTeX warning (ext4): destination with the same identifier (name{page.ii}) has
+ been already used, duplicate ignored
+<to be read again>
+ \relax
+l.112 \tableofcontents
+ [2]
+Chapter 1.
+LaTeX Font Info: Font shape `T1/phv/bx/n' in size <14.4> not available
+(Font) Font shape `T1/phv/b/n' tried instead on input line 117.
+LaTeX Font Info: Try loading font information for TS1+ptm on input line 140.
+
+(/usr/share/texmf-texlive/tex/latex/psnfss/ts1ptm.fd
+File: ts1ptm.fd 2001/06/04 font definitions for TS1/ptm.
+) [1
+
+
+] [2]
+Chapter 2.
+[3
+
+] [4]
+Chapter 3.
+
+Underfull \hbox (badness 10000) in paragraph at lines 346--347
+
+ []
+
+[5
+
+]
+LaTeX Font Info: Font shape `T1/pcr/bx/n' in size <14.4> not available
+(Font) Font shape `T1/pcr/b/n' tried instead on input line 402.
+LaTeX Font Info: Font shape `T1/phv/bx/n' in size <12> not available
+(Font) Font shape `T1/phv/b/n' tried instead on input line 462.
+[6] [7]
+LaTeX Font Info: Font shape `T1/pcr/m/it' in size <9> not available
+(Font) Font shape `T1/pcr/m/sl' tried instead on input line 607.
+[8]
+Chapter 4.
+[9
+
+] [10
+
+]
+Chapter 5.
+[11]
+Underfull \hbox (badness 10000) in paragraph at lines 777--778
+
+ []
+
+[12]
+Chapter 6.
+[13
+
+] [14] [15] [16]
+Chapter 7.
+[17
+
+] [18] [19] [20
+
+]
+Chapter 8.
+[21] [22
+
+]
+Chapter 9.
+[23] [24
+
+]
+Chapter 10.
+[25]
+Underfull \hbox (badness 10000) in paragraph at lines 1439--1444
+[]\T1/ptm/b/n/10 expire_backups \T1/ptm/m/n/10 us-age is sim-ple. It re-quires
+back-ups to have names of the forms
+ []
+
+[26] [27] [28
+
+]
+Chapter 11.
+[29] [30]
+Chapter 12.
+[31
+
+] [32]
+Chapter 13.
+[33
+
+] [34] [35] [36] [37] [38] [39] [40] [41] [42] [43] [44] [45]
+Underfull \hbox (badness 10000) in paragraph at lines 2672--2677
+[]\T1/ptm/b/n/10 expire_backups \T1/ptm/m/n/10 us-age is sim-ple. It re-quires
+back-ups to have names of the forms
+ []
+
+[46] [47] [48
+
+]
+Chapter 14.
+No file manual.ind.
+[49] (./manual.aux) )
+Here is how much of TeX's memory you used:
+ 8345 strings out of 495021
+ 113597 string characters out of 1181035
+ 200143 words of memory out of 3000000
+ 11122 multiletter control sequences out of 15000+50000
+ 59136 words of font info for 67 fonts, out of 3000000 for 9000
+ 29 hyphenation exceptions out of 8191
+ 45i,12n,48p,275b,492s stack positions out of 5000i,500n,10000p,200000b,50000s
+{/usr/share/texmf-texlive/fonts/enc/dvips/base/8r.enc}</us
+r/share/texmf-texlive/fonts/type1/urw/courier/ucrb8a.pfb></usr/share/texmf-texl
+ive/fonts/type1/urw/courier/ucrr8a.pfb></usr/share/texmf-texlive/fonts/type1/ur
+w/courier/ucrro8a.pfb></usr/share/texmf-texlive/fonts/type1/urw/helvetic/uhvb8a
+.pfb></usr/share/texmf-texlive/fonts/type1/urw/helvetic/uhvbo8a.pfb></usr/share
+/texmf-texlive/fonts/type1/urw/times/utmb8a.pfb></usr/share/texmf-texlive/fonts
+/type1/urw/times/utmr8a.pfb></usr/share/texmf-texlive/fonts/type1/urw/times/utm
+ri8a.pfb>
+Output written on manual.pdf (53 pages, 289155 bytes).
+PDF statistics:
+ 781 PDF objects out of 1000 (max. 8388607)
+ 224 named destinations out of 1000 (max. 500000)
+ 465 words of extra memory for PDF output out of 10000 (max. 10000000)
+
diff --git a/doc/latex/manual.out b/doc/latex/manual.out
new file mode 100644
index 0000000..16ea0d1
--- /dev/null
+++ b/doc/latex/manual.out
@@ -0,0 +1,58 @@
+\BOOKMARK [0][-]{chapter.1}{About S3QL}{}
+\BOOKMARK [1][-]{section.1.1}{Features}{chapter.1}
+\BOOKMARK [1][-]{section.1.2}{Development Status}{chapter.1}
+\BOOKMARK [0][-]{chapter.2}{Installation}{}
+\BOOKMARK [1][-]{section.2.1}{Dependencies}{chapter.2}
+\BOOKMARK [1][-]{section.2.2}{Installing S3QL}{chapter.2}
+\BOOKMARK [0][-]{chapter.3}{Storage Backends}{}
+\BOOKMARK [1][-]{section.3.1}{On Backend Reliability}{chapter.3}
+\BOOKMARK [1][-]{section.3.2}{The authinfo file}{chapter.3}
+\BOOKMARK [1][-]{section.3.3}{Consistency Guarantees}{chapter.3}
+\BOOKMARK [1][-]{section.3.4}{The Amazon S3 Backend}{chapter.3}
+\BOOKMARK [1][-]{section.3.5}{The Local Backend}{chapter.3}
+\BOOKMARK [1][-]{section.3.6}{The SFTP Backend}{chapter.3}
+\BOOKMARK [0][-]{chapter.4}{File System Creation}{}
+\BOOKMARK [0][-]{chapter.5}{Managing Buckets}{}
+\BOOKMARK [1][-]{section.5.1}{Changing the Passphrase}{chapter.5}
+\BOOKMARK [1][-]{section.5.2}{Upgrading the file system}{chapter.5}
+\BOOKMARK [1][-]{section.5.3}{Deleting a file system}{chapter.5}
+\BOOKMARK [1][-]{section.5.4}{Restoring Metadata Backups}{chapter.5}
+\BOOKMARK [0][-]{chapter.6}{Mounting}{}
+\BOOKMARK [1][-]{section.6.1}{Storing Encryption Passwords}{chapter.6}
+\BOOKMARK [1][-]{section.6.2}{Compression Algorithms}{chapter.6}
+\BOOKMARK [1][-]{section.6.3}{Parallel Compression}{chapter.6}
+\BOOKMARK [1][-]{section.6.4}{Notes about Caching}{chapter.6}
+\BOOKMARK [1][-]{section.6.5}{Automatic Mounting}{chapter.6}
+\BOOKMARK [0][-]{chapter.7}{Advanced S3QL Features}{}
+\BOOKMARK [1][-]{section.7.1}{Snapshotting and Copy-on-Write}{chapter.7}
+\BOOKMARK [1][-]{section.7.2}{Getting Statistics}{chapter.7}
+\BOOKMARK [1][-]{section.7.3}{Immutable Trees}{chapter.7}
+\BOOKMARK [1][-]{section.7.4}{Fast Recursive Removal}{chapter.7}
+\BOOKMARK [1][-]{section.7.5}{Runtime Configuration}{chapter.7}
+\BOOKMARK [0][-]{chapter.8}{Unmounting}{}
+\BOOKMARK [0][-]{chapter.9}{Checking for Errors}{}
+\BOOKMARK [0][-]{chapter.10}{Contributed Programs}{}
+\BOOKMARK [1][-]{section.10.1}{benchmark.py}{chapter.10}
+\BOOKMARK [1][-]{section.10.2}{s3\137copy.py}{chapter.10}
+\BOOKMARK [1][-]{section.10.3}{pcp.py}{chapter.10}
+\BOOKMARK [1][-]{section.10.4}{s3\137backup.sh}{chapter.10}
+\BOOKMARK [1][-]{section.10.5}{expire\137backups.py}{chapter.10}
+\BOOKMARK [1][-]{section.10.6}{s3ql.conf}{chapter.10}
+\BOOKMARK [0][-]{chapter.11}{Tips \046 Tricks}{}
+\BOOKMARK [1][-]{section.11.1}{Permanently mounted backup file system}{chapter.11}
+\BOOKMARK [1][-]{section.11.2}{Improving copy performance}{chapter.11}
+\BOOKMARK [0][-]{chapter.12}{Known Issues}{}
+\BOOKMARK [0][-]{chapter.13}{Manpages}{}
+\BOOKMARK [1][-]{section.13.1}{The mkfs.s3ql command}{chapter.13}
+\BOOKMARK [1][-]{section.13.2}{The s3qladm command}{chapter.13}
+\BOOKMARK [1][-]{section.13.3}{The mount.s3ql command}{chapter.13}
+\BOOKMARK [1][-]{section.13.4}{The s3qlstat command}{chapter.13}
+\BOOKMARK [1][-]{section.13.5}{The s3qlctrl command}{chapter.13}
+\BOOKMARK [1][-]{section.13.6}{The s3qlcp command}{chapter.13}
+\BOOKMARK [1][-]{section.13.7}{The s3qlrm command}{chapter.13}
+\BOOKMARK [1][-]{section.13.8}{The s3qllock command}{chapter.13}
+\BOOKMARK [1][-]{section.13.9}{The umount.s3ql command}{chapter.13}
+\BOOKMARK [1][-]{section.13.10}{The fsck.s3ql command}{chapter.13}
+\BOOKMARK [1][-]{section.13.11}{The pcp command}{chapter.13}
+\BOOKMARK [1][-]{section.13.12}{The expire\137backups command}{chapter.13}
+\BOOKMARK [0][-]{chapter.14}{Further Resources / Getting Help}{}
diff --git a/doc/latex/manual.tex b/doc/latex/manual.tex
new file mode 100644
index 0000000..fe2532b
--- /dev/null
+++ b/doc/latex/manual.tex
@@ -0,0 +1,2761 @@
+% Generated by Sphinx.
+\def\sphinxdocclass{report}
+\documentclass[letterpaper,10pt,english]{sphinxmanual}
+\usepackage[utf8]{inputenc}
+\DeclareUnicodeCharacter{00A0}{\nobreakspace}
+\usepackage[T1]{fontenc}
+\usepackage{babel}
+\usepackage{times}
+\usepackage[Bjarne]{fncychap}
+\usepackage{longtable}
+\usepackage{sphinx}
+
+
+\title{S3QL Documentation}
+\date{May 20, 2011}
+\release{1.0.1}
+\author{Nikolaus Rath}
+\newcommand{\sphinxlogo}{}
+\renewcommand{\releasename}{Release}
+\makeindex
+
+\makeatletter
+\def\PYG@reset{\let\PYG@it=\relax \let\PYG@bf=\relax%
+ \let\PYG@ul=\relax \let\PYG@tc=\relax%
+ \let\PYG@bc=\relax \let\PYG@ff=\relax}
+\def\PYG@tok#1{\csname PYG@tok@#1\endcsname}
+\def\PYG@toks#1+{\ifx\relax#1\empty\else%
+ \PYG@tok{#1}\expandafter\PYG@toks\fi}
+\def\PYG@do#1{\PYG@bc{\PYG@tc{\PYG@ul{%
+ \PYG@it{\PYG@bf{\PYG@ff{#1}}}}}}}
+\def\PYG#1#2{\PYG@reset\PYG@toks#1+\relax+\PYG@do{#2}}
+
+\def\PYG@tok@gd{\def\PYG@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
+\def\PYG@tok@gu{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
+\def\PYG@tok@gt{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.25,0.82}{##1}}}
+\def\PYG@tok@gs{\let\PYG@bf=\textbf}
+\def\PYG@tok@gr{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
+\def\PYG@tok@cm{\def\PYG@tc##1{\textcolor[rgb]{0.50,0.50,0.50}{##1}}}
+\def\PYG@tok@vg{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.82,0.44,0.00}{##1}}}
+\def\PYG@tok@m{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.38,0.00,0.88}{##1}}}
+\def\PYG@tok@mh{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.31,0.50}{##1}}}
+\def\PYG@tok@cs{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.80,0.00,0.00}{##1}}}
+\def\PYG@tok@ge{\let\PYG@it=\textit}
+\def\PYG@tok@vc{\def\PYG@tc##1{\textcolor[rgb]{0.19,0.38,0.56}{##1}}}
+\def\PYG@tok@il{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.82}{##1}}}
+\def\PYG@tok@go{\def\PYG@tc##1{\textcolor[rgb]{0.50,0.50,0.50}{##1}}}
+\def\PYG@tok@cp{\def\PYG@tc##1{\textcolor[rgb]{0.31,0.44,0.56}{##1}}}
+\def\PYG@tok@gi{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
+\def\PYG@tok@gh{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
+\def\PYG@tok@ni{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.50,0.00,0.00}{##1}}}
+\def\PYG@tok@nl{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.56,0.44,0.00}{##1}}}
+\def\PYG@tok@nn{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}}
+\def\PYG@tok@no{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.19,0.38}{##1}}}
+\def\PYG@tok@na{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.75}{##1}}}
+\def\PYG@tok@nb{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
+\def\PYG@tok@nc{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.69,0.00,0.38}{##1}}}
+\def\PYG@tok@nd{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.31,0.31,0.31}{##1}}}
+\def\PYG@tok@ne{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.94,0.00,0.00}{##1}}}
+\def\PYG@tok@nf{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.38,0.69}{##1}}}
+\def\PYG@tok@si{\def\PYG@bc##1{\colorbox[rgb]{0.88,0.88,0.88}{##1}}}
+\def\PYG@tok@s2{\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}}
+\def\PYG@tok@vi{\def\PYG@tc##1{\textcolor[rgb]{0.19,0.19,0.69}{##1}}}
+\def\PYG@tok@nt{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.00}{##1}}}
+\def\PYG@tok@nv{\def\PYG@tc##1{\textcolor[rgb]{0.56,0.38,0.19}{##1}}}
+\def\PYG@tok@s1{\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}}
+\def\PYG@tok@gp{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}}
+\def\PYG@tok@sh{\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}}
+\def\PYG@tok@ow{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}}
+\def\PYG@tok@sx{\def\PYG@tc##1{\textcolor[rgb]{0.82,0.13,0.00}{##1}}\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}}
+\def\PYG@tok@bp{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
+\def\PYG@tok@c1{\def\PYG@tc##1{\textcolor[rgb]{0.50,0.50,0.50}{##1}}}
+\def\PYG@tok@kc{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
+\def\PYG@tok@c{\def\PYG@tc##1{\textcolor[rgb]{0.50,0.50,0.50}{##1}}}
+\def\PYG@tok@mf{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.38,0.00,0.88}{##1}}}
+\def\PYG@tok@err{\def\PYG@tc##1{\textcolor[rgb]{0.94,0.00,0.00}{##1}}\def\PYG@bc##1{\colorbox[rgb]{0.94,0.63,0.63}{##1}}}
+\def\PYG@tok@kd{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
+\def\PYG@tok@ss{\def\PYG@tc##1{\textcolor[rgb]{0.63,0.38,0.00}{##1}}}
+\def\PYG@tok@sr{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,1.00}{##1}}}
+\def\PYG@tok@mo{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.25,0.00,0.88}{##1}}}
+\def\PYG@tok@mi{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.82}{##1}}}
+\def\PYG@tok@kn{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
+\def\PYG@tok@o{\def\PYG@tc##1{\textcolor[rgb]{0.19,0.19,0.19}{##1}}}
+\def\PYG@tok@kr{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
+\def\PYG@tok@s{\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}}
+\def\PYG@tok@kp{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.19,0.50}{##1}}}
+\def\PYG@tok@w{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
+\def\PYG@tok@kt{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.19,0.19,0.56}{##1}}}
+\def\PYG@tok@sc{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.25,0.82}{##1}}}
+\def\PYG@tok@sb{\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}}
+\def\PYG@tok@k{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
+\def\PYG@tok@se{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.38,0.38,0.38}{##1}}\def\PYG@bc##1{\colorbox[rgb]{1.00,0.94,0.94}{##1}}}
+\def\PYG@tok@sd{\def\PYG@tc##1{\textcolor[rgb]{0.82,0.25,0.13}{##1}}}
+
+\def\PYGZbs{\char`\\}
+\def\PYGZus{\char`\_}
+\def\PYGZob{\char`\{}
+\def\PYGZcb{\char`\}}
+\def\PYGZca{\char`\^}
+\def\PYGZsh{\char`\#}
+\def\PYGZpc{\char`\%}
+\def\PYGZdl{\char`\$}
+\def\PYGZti{\char`\~}
+% for compatibility with earlier versions
+\def\PYGZat{@}
+\def\PYGZlb{[}
+\def\PYGZrb{]}
+\makeatother
+
+\begin{document}
+
+\maketitle
+\tableofcontents
+\phantomsection\label{index::doc}
+
+
+
+\chapter{About S3QL}
+\label{about:about-s3ql}\label{about::doc}\label{about:s3ql-user-s-guide}
+S3QL is a file system that stores all its data online. It supports
+\href{http://aws.amazon.com/s3AmazonS3}{Amazon S3} as well as arbitrary
+SFTP servers and effectively provides you with a hard disk of dynamic,
+infinite capacity that can be accessed from any computer with internet
+access.
+
+S3QL is providing a standard, full featured UNIX file system that is
+conceptually indistinguishable from any local file system.
+Furthermore, S3QL has additional features like compression,
+encryption, data de-duplication, immutable trees and snapshotting
+which make it especially suitable for online backup and archival.
+
+S3QL is designed to favor simplicity and elegance over performance and
+feature-creep. Care has been taken to make the source code as
+readable and serviceable as possible. Solid error detection and error
+handling have been included from the very first line, and S3QL comes
+with extensive automated test cases for all its components.
+
+
+\section{Features}
+\label{about:features}\begin{itemize}
+\item {}
+\textbf{Transparency.} Conceptually, S3QL is indistinguishable from a
+local file system. For example, it supports hardlinks, symlinks,
+ACLs and standard unix permissions, extended attributes and file
+sizes up to 2 TB.
+
+\item {}
+\textbf{Dynamic Size.} The size of an S3QL file system grows and shrinks
+dynamically as required.
+
+\item {}
+\textbf{Compression.} Before storage, all data may compressed with the
+LZMA, bzip2 or deflate (gzip) algorithm.
+
+\item {}
+\textbf{Encryption.} After compression (but before upload), all data can
+AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum
+is used to protect the data against manipulation.
+
+\item {}
+\textbf{Data De-duplication.} If several files have identical contents,
+the redundant data will be stored only once. This works across all
+files stored in the file system, and also if only some parts of the
+files are identical while other parts differ.
+
+\item {}
+\textbf{Immutable Trees.} Directory trees can be made immutable, so that
+their contents can no longer be changed in any way whatsoever. This
+can be used to ensure that backups can not be modified after they
+have been made.
+
+\item {}
+\textbf{Copy-on-Write/Snapshotting.} S3QL can replicate entire directory
+trees without using any additional storage space. Only if one of the
+copies is modified, the part of the data that has been modified will
+take up additional storage space. This can be used to create
+intelligent snapshots that preserve the state of a directory at
+different points in time using a minimum amount of space.
+
+\item {}
+\textbf{High Performance independent of network latency.} All operations
+that do not write or read file contents (like creating directories
+or moving, renaming, and changing permissions of files and
+directories) are very fast because they are carried out without any
+network transactions.
+
+S3QL achieves this by saving the entire file and directory structure
+in a database. This database is locally cached and the remote
+copy updated asynchronously.
+
+\item {}
+\textbf{Support for low bandwidth connections.} S3QL splits file contents
+into smaller blocks and caches blocks locally. This minimizes both
+the number of network transactions required for reading and writing
+data, and the amount of data that has to be transferred when only
+parts of a file are read or written.
+
+\end{itemize}
+
+
+\section{Development Status}
+\label{about:development-status}
+After two years of beta-testing by about 93 users did not reveal any
+data-critical bugs, S3QL was declared \textbf{stable} with the release of
+version 1.0 on May 13th, 2011. Note that this does not mean that S3QL
+is bug-free. S3QL still has several known, and probably many more
+unknown bugs. However, there is a high probability that these bugs
+will, although being inconvenient, not endanger any stored data.
+
+Please report any problems on the \href{http://groups.google.com/group/s3ql}{mailing list} or the \href{http://code.google.com/p/s3ql/issues/list}{issue tracker}.
+
+
+\chapter{Installation}
+\label{installation:installation}\label{installation::doc}
+S3QL depends on several other programs and libraries that have to be
+installed first. The best method to satisfy these dependencies depends
+on your distribution. In some cases S3QL and all its dependencies can
+be installed with as little as three commands, while in other cases more work
+may be required.
+
+The \href{http://code.google.com/p/s3ql/w/list}{S3QL Wiki} contains
+installation instructions for quite a few different Linux
+distributions. You should only use the generic instructions in this
+manual if your distribution is not included in the \href{http://code.google.com/p/s3ql/w/list?q=label:Installation}{distribution-specific
+installation instructions} on the wiki.
+
+
+\section{Dependencies}
+\label{installation:dependencies}
+The following is a list of the programs and libraries required for
+running S3QL. Generally, you should first check if your distribution
+already provides a suitable packages and only install from source if
+that is not the case.
+\begin{itemize}
+\item {}
+Kernel version 2.6.9 or newer. Starting with kernel 2.6.26
+you will get significantly better write performance, so you should
+actually use \emph{2.6.26 or newer whenever possible}.
+
+\item {}
+The \href{http://fuse.sourceforge.net/}{FUSE Library} should already be
+installed on your system. However, you have to make sure that you
+have at least version 2.8.0.
+
+\item {}
+The \href{http://pypi.python.org/pypi/pycryptopp}{PyCrypto++ Python Module}. To check if this module
+is installed, try to execute \code{python -c 'import pycryptopp'}.
+
+\item {}
+The \href{http://pypi.python.org/pypi/argparse}{argparse Python Module}. To check if this module is
+installed, try to execute \code{python -c 'import argparse; print
+argparse.\_\_version\_\_'}. If argparse is installed, this will print
+the version number. You need version 1.1 or later.
+
+\item {}
+The \href{http://code.google.com/p/apsw/}{APSW Python Module}. To check
+which (if any) version of APWS is installed, run the command
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{python -c 'import apsw; print apsw.apswversion(), apsw.sqlitelibversion()'}
+\end{Verbatim}
+
+If APSW is installed, this should print two version numbers which
+both have to be at least 3.7.0.
+
+\item {}
+The \href{http://pypi.python.org/pypi/pyliblzma}{PyLibLZMA Python module}. To check if this module
+is installed, execute \code{python -c 'import lzma; print
+lzma.\_\_version\_\_'}. This should print a version number. You need at
+least version 0.5.3.
+
+\item {}
+The \href{http://code.google.com/p/python-llfuse/}{Python LLFUSE module}. To check if this module
+is installed, execute \code{python -c 'import llfuse; print
+llfuse.\_\_version\_\_'}. This should print a version number. You need at
+least version 0.29.
+
+Note that earlier S3QL versions shipped with a builtin version of
+this module. If you are upgrading from such a version, make sure to
+completely remove the old S3QL version first.
+
+\item {}
+If you want to use the SFTP backend, then you also need the
+\href{http://www.lag.net/paramiko/}{Paramiko Python Module}. To check
+if this module is installed, try to execute \code{python -c 'import
+paramiko'}.
+
+\end{itemize}
+
+
+\section{Installing S3QL}
+\label{installation:inst-s3ql}\label{installation:installing-s3ql}
+To install S3QL itself, proceed as follows:
+\begin{enumerate}
+\item {}
+Download S3QL from \href{http://code.google.com/p/s3ql/downloads/list}{http://code.google.com/p/s3ql/downloads/list}
+
+\item {}
+Unpack it into a folder of your choice
+
+\item {}
+Run \code{python setup.py test} to run a self-test. If this fails, ask
+for help on the \href{http://groups.google.com/group/s3ql}{mailing list} or report a bug in the
+\href{http://code.google.com/p/s3ql/issues/list}{issue tracker}.
+
+\end{enumerate}
+
+Now you have three options:
+\begin{itemize}
+\item {}
+You can run the S3QL commands from the \code{bin/} directory.
+
+\item {}
+You can install S3QL system-wide for all users. To do that, you
+have to run \code{sudo python setup.py install}.
+
+\item {}
+You can install S3QL into \code{\textasciitilde{}/.local} by executing \code{python
+setup.py install -{-}user}. In this case you should make sure that
+\code{\textasciitilde{}/.local/bin} is in your \code{\$PATH} environment variable.
+
+\end{itemize}
+
+
+\chapter{Storage Backends}
+\label{backends::doc}\label{backends:storage-backends}
+S3QL can use different protocols to store the file system data.
+Independent of the backend that you use, the place where your file
+system data is being stored is called a \emph{bucket}. (This is mostly for
+historical reasons, since initially S3QL supported only the Amazon S3
+backend).
+
+
+\section{On Backend Reliability}
+\label{backends:on-backend-reliability}
+S3QL has been designed for use with a storage backend where data loss
+is so infrequent that it can be completely neglected (e.g. the Amazon
+S3 backend). If you decide to use a less reliable backend, you should
+keep the following warning in mind and read this section carefully.
+
+\begin{notice}{warning}{Warning:}
+S3QL is not able to compensate for any failures of the backend. In
+particular, it is not able reconstruct any data that has been lost
+or corrupted by the backend. The persistence and durability of data
+stored in an S3QL file system is limited and determined by the
+backend alone.
+\end{notice}
+
+On the plus side, if a backend looses or corrupts some of the stored
+data, S3QL \emph{will} detect the problem. Missing data will be detected
+when running \code{fsck.s3ql} or when attempting to access the data in the
+mounted file system. In the later case you will get an IO Error, and
+on unmounting S3QL will warn you that the file system is damaged and
+you need to run \code{fsck.s3ql}.
+
+\code{fsck.s3ql} will report all the affected files and move them into the
+\code{/lost+found} directory of the file system.
+
+You should be aware that, because of S3QL's data de-duplication
+feature, the consequences of a data loss in the backend can be
+significantly more severe than you may expect. More concretely, a data
+loss in the backend at time \emph{x} may cause data that is written \emph{after}
+time \emph{x} to be lost as well. What may happen is this:
+\begin{enumerate}
+\item {}
+You store an important file in the S3QL file system.
+
+\item {}
+The backend looses the data blocks of this file. As long as you
+do not access the file or run \code{fsck.s3ql}, S3QL
+is not aware that the data has been lost by the backend.
+
+\item {}
+You save an additional copy of the important file in a different
+location on the same S3QL file system.
+
+\item {}
+S3QL detects that the contents of the new file are identical to the
+data blocks that have been stored earlier. Since at this point S3QL
+is not aware that these blocks have been lost by the backend, it
+does not save another copy of the file contents in the backend but
+relies on the (presumably) existing blocks instead.
+
+\item {}
+Therefore, even though you saved another copy, you still do not
+have a backup of the important file (since both copies refer to the
+same data blocks that have been lost by the backend).
+
+\end{enumerate}
+
+As one can see, this effect becomes the less important the more often
+one runs \code{fsck.s3ql}, since \code{fsck.s3ql} will make S3QL aware of any
+blocks that the backend may have lost. Figuratively, this establishes
+a ``checkpoint'': data loss in the backend that occurred before running
+\code{fsck.s3ql} can not affect any file system operations performed after
+running \code{fsck.s3ql}.
+
+Nevertheless, (as said at the beginning) the recommended way to use
+S3QL is in combination with a sufficiently reliable storage backend.
+In that case none of the above will ever be a concern.
+
+
+\section{The \texttt{authinfo} file}
+\label{backends:the-authinfo-file}
+Most backends first try to read the file \code{\textasciitilde{}/.s3ql/authinfo} to determine
+the username and password for connecting to the remote host. If this
+fails, both username and password are read from the terminal.
+
+The \code{authinfo} file has to contain entries of the form
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{backend }\PYG{n+nv}{\textless{}backend\textgreater{}}\PYG{l}{ machine }\PYG{n+nv}{\textless{}host\textgreater{}}\PYG{l}{ login }\PYG{n+nv}{\textless{}user\textgreater{}}\PYG{l}{ password }\PYG{n+nv}{\textless{}password\textgreater{}}
+\end{Verbatim}
+
+So to use the login \code{joe} with password \code{jibbadup} when using the FTP
+backend to connect to the host \code{backups.joesdomain.com}, you would
+specify
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{backend ftp machine backups.joesdomain.com login joe password jibbadup}
+\end{Verbatim}
+
+
+\section{Consistency Guarantees}
+\label{backends:consistency-guarantees}
+The different backends provide different types of \emph{consistency
+guarantees}. Informally, a consistency guarantee tells you how fast
+the backend will apply changes to the stored data.
+
+S3QL defines the following three levels:
+\begin{itemize}
+\item {}
+\textbf{Read-after-Write Consistency.} This is the strongest consistency
+guarantee. If a backend offers read-after-write consistency, it
+guarantees that as soon as you have committed any changes to the
+backend, subsequent requests will take into account these changes.
+
+\item {}
+\textbf{Read-after-Create Consistency.} If a backend provides only
+read-after-create consistency, only the creation of a new object is
+guaranteed to be taken into account for subsequent requests. This
+means that, for example, if you overwrite data in an existing
+object, subsequent requests may still return the old data for a
+certain period of time.
+
+\item {}
+\textbf{Eventual consistency.} This is the lowest consistency level.
+Basically, any changes that you make to the backend may not be
+visible for a certain amount of time after the change has been made.
+However, you are guaranteed that no change will be lost. All changes
+will \emph{eventually} become visible.
+
+.
+
+\end{itemize}
+
+As long as your backend provides read-after-write or read-after-create
+consistency, you do not have to worry about consistency guarantees at
+all. However, if you plan to use a backend with only eventual
+consistency, you have to be a bit careful in some situations.
+
+
+\subsection{Dealing with Eventual Consistency}
+\label{backends:dealing-with-eventual-consistency}\label{backends:eventual-consistency}
+\begin{notice}{note}{Note:}
+The following applies only to storage backends that do not provide
+read-after-create or read-after-write consistency. Currently,
+this is only the Amazon S3 backend \emph{if used with the US-Standard
+storage region}. If you use a different storage backend, or the S3
+backend with a different storage region, this section does not apply
+to you.
+\end{notice}
+
+While the file system is mounted, S3QL is able to automatically handle
+all issues related to the weak eventual consistency guarantee.
+However, some issues may arise during the mount process and when the
+file system is checked.
+
+Suppose that you mount the file system, store some new data, delete
+some old data and unmount it again. Now remember that eventual
+consistency means that there is no guarantee that these changes will
+be visible immediately. At least in theory it is therefore possible
+that if you mount the file system again, S3QL does not see any of the
+changes that you have done and presents you an ``old version'' of the
+file system without them. Even worse, if you notice the problem and
+unmount the file system, S3QL will upload the old status (which S3QL
+necessarily has to consider as current) and thereby permanently
+override the newer version (even though this change may not become
+immediately visible either).
+
+The same problem applies when checking the file system. If the backend
+provides S3QL with only partially updated data, S3QL has no way to
+find out if this a real consistency problem that needs to be fixed or
+if it is only a temporary problem that will resolve itself
+automatically (because there are still changes that have not become
+visible yet).
+
+While this may seem to be a rather big problem, the likelihood of it
+to occur is rather low. In practice, most storage providers rarely
+need more than a few seconds to apply incoming changes, so to trigger
+this problem one would have to unmount and remount the file system in
+a very short time window. Many people therefore make sure that they
+wait a few minutes between successive mounts (or file system checks)
+and decide that the remaining risk is negligible.
+
+Nevertheless, the eventual consistency guarantee does not impose an
+upper limit on the time that it may take for change to become visible.
+Therefore there is no ``totally safe'' waiting time that would totally
+eliminate this problem; a theoretical possibility always remains.
+
+
+\section{The Amazon S3 Backend}
+\label{backends:the-amazon-s3-backend}
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \code{s3://\textless{}bucketname\textgreater{}}. Bucket names must conform to the \href{http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html}{S3
+Bucket Name Restrictions}.
+
+The S3 backend offers exceptionally strong reliability guarantees. As
+of August 2010, Amazon guarantees a durability of 99.999999999\% per
+year. In other words, if you store a thousand million objects then on
+average you would loose less than one object in a hundred years.
+
+The Amazon S3 backend provides read-after-create consistency for the
+EU, Asia-Pacific and US-West storage regions. \emph{For the US-Standard
+storage region, Amazon S3 provides only eventual consistency} (please
+refer to {\hyperref[backends:eventual-consistency]{\emph{Dealing with Eventual Consistency}}} for information about
+what this entails).
+
+When connecting to Amazon S3, S3QL uses an unencrypted HTTP
+connection, so if you want your data to stay confidential, you have
+to create the S3QL file system with encryption (this is also the default).
+
+When reading the authentication information for the S3 backend from
+the \code{authinfo} file, the \code{host} field is ignored, i.e. the first entry
+with \code{s3} as a backend will be used. For example
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{backend s3 machine any login myAWSaccessKeyId password myAwsSecretAccessKey}
+\end{Verbatim}
+
+Note that the bucket names come from a global pool, so chances are
+that your favorite name has already been taken by another S3 user.
+Usually a longer bucket name containing some random numbers, like
+\code{19283712\_yourname\_s3ql}, will work better.
+
+If you do not already have one, you need to obtain an Amazon S3
+account from \href{http://aws.amazon.com/}{Amazon AWS}. The account is
+free, you will pay only for the amount of storage that you actually
+use.
+
+Note that the login and password for accessing S3 are not the user id
+and password that you use to log into the Amazon Webpage, but the ``AWS
+access key id'' and ``AWS secret access key'' shown under \href{https://aws-portal.amazon.com/gp/aws/developer/account/index.html?ie=UTF8\&action=access-key}{My
+Account/Access Identifiers}.
+
+\begin{notice}{note}{Note:}
+S3QL also allows you to use \href{http://aws.amazon.com/s3/\#protecting}{reduced redundancy storage} by using \code{s3rr://}
+instead of \code{s3://} in the storage url. However, this not
+recommended. The reason is a combination of three factors:
+\begin{itemize}
+\item {}
+RRS has a relatively low reliability, on average you loose one
+out of every ten-thousand objects a year. So you can expect to
+occasionally loose some data.
+
+\item {}
+When \code{fsck.s3ql} asks Amazon S3 for a list of the stored objects,
+this list includes even those objects that have been lost.
+Therefore \code{fsck.s3ql} \emph{can not detect lost objects} and lost data
+will only become apparent when you try to actually read from a
+file whose data has been lost. This is a (very unfortunate)
+peculiarity of Amazon S3.
+
+\item {}
+Due to the data de-duplication feature of S3QL, unnoticed lost
+objects may cause subsequent data loss later in time (see {\hyperref[backends:on-backend-reliability]{On
+Backend Reliability}} for details).
+
+\end{itemize}
+
+In other words, you should really only store an S3QL file system
+using RRS if you know exactly what you are getting into.
+\end{notice}
+
+
+\section{The Local Backend}
+\label{backends:the-local-backend}
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\code{local://\textless{}path\textgreater{}}. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \code{local:///var/archive}.
+
+The local backend provides read-after-write consistency.
+
+
+\section{The SFTP Backend}
+\label{backends:the-sftp-backend}
+The SFTP backend uses the SFTP protocol, which is a file transfer
+protocol similar to ftp, but uses an encrypted SSH connection.
+It provides read-after-write consistency.
+
+Note that the SFTP backend is rather slow and has not been tested
+as extensively as the S3 and Local backends.
+
+The storage URL for SFTP connections has the form
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{sftp://}\PYG{n+nv}{\textless{}host\textgreater{}}\PYG{g+ge}{[:port]}\PYG{l}{/}\PYG{n+nv}{\textless{}path\textgreater{}}
+\end{Verbatim}
+
+The SFTP backend will always ask you for a password if you haven't
+defined one in \code{\textasciitilde{}/.s3ql/authinfo}. However, public key authentication
+is tried first and the password will only be used if the public key
+authentication fails.
+
+The public and private keys will be read from the standard files in
+\code{\textasciitilde{}/.ssh/}. Note that S3QL will refuse to connect to a computer with
+unknown host key; to add the key to your local keyring you have to
+establish a connection to that computer with the standard SSH command
+line programs first.
+
+
+\chapter{File System Creation}
+\label{mkfs::doc}\label{mkfs:file-system-creation}
+A S3QL file system is created with the \code{mkfs.s3ql} command. It has the
+following syntax:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{mkfs.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+This command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication
+info. (default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to
+get debug messages from all modules. This option can
+be specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+\item [-{-}s3-location \textless{}name\textgreater{}]
+Storage location for new S3 buckets. Allowed values:
+\code{EU}, \code{us-west-1}, \code{ap-southeast-1}, or \code{us-standard}.
+(default: EU)
+\item [-L \textless{}name\textgreater{}]
+Filesystem label
+\item [-{-}blocksize \textless{}size\textgreater{}]
+Maximum block size in KB (default: 10240)
+\item [-{-}plain]
+Create unencrypted file system.
+\item [-{-}force]
+Overwrite any existing data.
+\end{optionlist}
+\end{quote}
+
+Unless you have specified the \code{-{-}plain} option, \code{mkfs.s3ql} will ask you
+to enter an encryption password. If you do not want to enter this
+password every time that you mount the file system, you can store it
+in the \code{\textasciitilde{}/.s3ql/authinfo} file, see {\hyperref[mount:bucket-pw]{\emph{Storing Encryption Passwords}}}.
+
+
+\chapter{Managing Buckets}
+\label{adm::doc}\label{adm:managing-buckets}
+The \code{s3qladm} command performs various operations on S3QL buckets.
+The file system contained in the bucket \emph{must not be mounted} when
+using \code{s3qladm} or things will go wrong badly.
+
+The syntax is
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qladm }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}action\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}storage-url\textgreater{}}
+\end{Verbatim}
+
+where \code{action} may be either of \textbf{passphrase},
+\textbf{upgrade}, \textbf{delete} or \textbf{download-metadata}.
+
+The \textbf{s3qladm} accepts the following general options, no
+matter what specific action is being invoked:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to get
+debug messages from all modules. This option can be
+specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication info.
+(default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+\end{optionlist}
+\end{quote}
+
+Hint: run \code{s3qladm \textless{}action\textgreater{} -{-}help} to get help on the additional
+arguments that the different actions take.
+
+
+\section{Changing the Passphrase}
+\label{adm:changing-the-passphrase}
+To change the passphrase a bucket, use the \code{s3qladm} command:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qladm passphrase }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+The passphrase can only be changed when the bucket is not mounted.
+
+
+\section{Upgrading the file system}
+\label{adm:upgrading-the-file-system}
+If you have installed a new version of S3QL, it may sometimes be
+necessary to upgrade the file system metadata as well. Note that in
+this case the file system can no longer be accessed with older
+versions of S3QL after the upgrade.
+
+During the upgrade you have to make sure that the command is not
+interrupted, and that no one else tries to mount, check or upgrade the
+file system at the same time.
+
+To upgrade a file system from the previous to the current revision,
+execute
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qladm upgrade }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+
+\section{Deleting a file system}
+\label{adm:deleting-a-file-system}
+A file system can be deleted with:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qladm delete }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+This physically deletes all the data and file system structures.
+
+
+\section{Restoring Metadata Backups}
+\label{adm:restoring-metadata-backups}
+If the most-recent copy of the file system metadata has been damaged
+irreparably, it is possible to restore one of the automatically
+created backup copies.
+
+The command
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qladm download-metadata }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+will give you a list of the available metadata backups and allow you
+to download them. This will create two new files in the current
+directory, ending in \code{.db} and \code{.params}. To actually use the
+downloaded backup, you need to move these files into the \code{\textasciitilde{}/.s3ql/}
+directory and run \code{fsck.s3ql}.
+
+\begin{notice}{warning}{Warning:}
+You should probably not use this functionality without having asked
+for help on the mailing list first (see {\hyperref[resources:resources]{\emph{Further Resources / Getting Help}}}).
+\end{notice}
+
+
+\chapter{Mounting}
+\label{mount:mounting}\label{mount::doc}
+A S3QL file system is mounted with the \code{mount.s3ql} command. It has
+the following syntax:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{mount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}storage url\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}mountpoint\textgreater{}}
+\end{Verbatim}
+
+\begin{notice}{note}{Note:}
+S3QL is not a network file system like \href{http://en.wikipedia.org/wiki/Network\_File\_System\_\%28protocol\%29}{NFS}
+or \href{http://en.wikipedia.org/wiki/CIFS}{CIFS}. It can only be
+mounted on one computer at a time.
+\end{notice}
+
+This command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication
+info. (default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to
+get debug messages from all modules. This option can
+be specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+\item [-{-}cachesize \textless{}size\textgreater{}]
+Cache size in kb (default: 102400 (100 MB)). Should be
+at least 10 times the blocksize of the filesystem,
+otherwise an object may be retrieved and written
+several times during a single write() or read()
+operation.
+\item [-{-}max-cache-entries \textless{}num\textgreater{}]
+Maximum number of entries in cache (default: 768).
+Each cache entry requires one file descriptor, so if
+you increase this number you have to make sure that
+your process file descriptor limit (as set with
+\code{ulimit -n}) is high enough (at least the number of
+cache entries + 100).
+\item [-{-}allow-other]
+Normally, only the user who called \code{mount.s3ql} can
+access the mount point. This user then also has full
+access to it, independent of individual file
+permissions. If the \code{-{-}allow-other} option is
+specified, other users can access the mount point as
+well and individual file permissions are taken into
+account for all users.
+\item [-{-}allow-root]
+Like \code{-{-}allow-other}, but restrict access to the
+mounting user and the root user.
+\item [-{-}fg]
+Do not daemonize, stay in foreground
+\item [-{-}single]
+Run in single threaded mode. If you don't understand
+this, then you don't need it.
+\item [-{-}upstart]
+Stay in foreground and raise SIGSTOP once mountpoint
+is up.
+\item [-{-}profile]
+Create profiling information. If you don't understand
+this, then you don't need it.
+\item [-{-}compress \textless{}name\textgreater{}]
+Compression algorithm to use when storing new data.
+Allowed values: \code{lzma}, \code{bzip2}, \code{zlib}, none.
+(default: \code{lzma})
+\item [-{-}metadata-upload-interval \textless{}seconds\textgreater{}]
+Interval in seconds between complete metadata uploads.
+Set to 0 to disable. Default: 24h.
+\item [-{-}compression-threads \textless{}no\textgreater{}]
+Number of parallel compression and encryption threads
+to use (default: 1).
+\end{optionlist}
+\end{quote}
+
+
+\section{Storing Encryption Passwords}
+\label{mount:bucket-pw}\label{mount:storing-encryption-passwords}
+If you are trying to mount an encrypted bucket, \code{mount.s3ql} will first
+try to read the password from the \code{.s3ql/authinfo} file (the same file
+that is used to read the backend authentication data) and prompt the
+user to enter the password only if this fails.
+
+The \code{authinfo} entries to specify bucket passwords are of the form
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{storage-url }\PYG{n+nv}{\textless{}storage-url\textgreater{}}\PYG{l}{ password }\PYG{n+nv}{\textless{}password\textgreater{}}
+\end{Verbatim}
+
+So to always use the password \code{topsecret} when mounting \code{s3://joes\_bucket},
+the entry would be
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{storage-url s3://joes\PYGZus{}bucket password topsecret}
+\end{Verbatim}
+
+\begin{notice}{note}{Note:}
+If you are using the local backend, the storage url will
+always be converted to an absolute path. So if you are in the
+\code{/home/john} directory and try to mount \code{local://bucket}, the matching
+\code{authinfo} entry has to have a storage url of
+\code{local:///home/john/bucket}.
+\end{notice}
+
+
+\section{Compression Algorithms}
+\label{mount:compression-algorithms}
+S3QL supports three compression algorithms, LZMA, Bzip2 and zlib (with
+LZMA being the default). The compression algorithm can be specified
+freely whenever the file system is mounted, since it affects only the
+compression of new data blocks.
+
+Roughly speaking, LZMA is slower but achieves better compression
+ratios than Bzip2, while Bzip2 in turn is slower but achieves better
+compression ratios than zlib.
+
+For maximum file system performance, the best algorithm therefore
+depends on your network connection speed: the compression algorithm
+should be fast enough to saturate your network connection.
+
+To find the optimal algorithm for your system, S3QL ships with a
+program called \code{benchmark.py} in the \code{contrib} directory. You should
+run this program on a file that has a size that is roughly equal to
+the block size of your file system and has similar contents. It will
+then determine the compression speeds for the different algorithms and
+the upload speeds for the specified backend and recommend the best
+algorithm that is fast enough to saturate your network connection.
+
+Obviously you should make sure that there is little other system load
+when you run \code{benchmark.py} (i.e., don't compile software or encode
+videos at the same time).
+
+
+\section{Parallel Compression}
+\label{mount:parallel-compression}
+If you are running S3QL on a system with multiple cores, you might
+want to set \code{-{-}compression-threads} to a value bigger than one. This
+will instruct S3QL to compress and encrypt several blocks at the same
+time.
+
+If you want to do this in combination with using the LZMA compression
+algorithm, you should keep an eye on memory usage though. Every
+LZMA compression threads requires about 200 MB of RAM.
+
+\begin{notice}{note}{Note:}
+To determine the optimal compression algorithm for your network
+connection when using multiple threads, you can pass the
+\code{-{-}compression-threads} option to \code{contrib/benchmark.py}.
+\end{notice}
+
+
+\section{Notes about Caching}
+\label{mount:notes-about-caching}
+S3QL maintains a local cache of the file system data to speed up
+access. The cache is block based, so it is possible that only parts of
+a file are in the cache.
+
+
+\subsection{Maximum Number of Cache Entries}
+\label{mount:maximum-number-of-cache-entries}
+The maximum size of the cache can be configured with the \code{-{-}cachesize}
+option. In addition to that, the maximum number of objects in the
+cache is limited by the \code{-{-}max-cache-entries} option, so it is
+possible that the cache does not grow up to the maximum cache size
+because the maximum number of cache elements has been reached. The
+reason for this limit is that each cache entry requires one open
+file descriptor, and Linux distributions usually limit the total
+number of file descriptors per process to about a thousand.
+
+If you specify a value for \code{-{-}max-cache-entries}, you should therefore
+make sure to also configure your system to increase the maximum number
+of open file handles. This can be done temporarily with the \code{umask -n}
+command. The method to permanently change this limit system-wide
+depends on your distribution.
+
+
+\subsection{Cache Flushing and Expiration}
+\label{mount:cache-flushing-and-expiration}
+S3QL flushes changed blocks in the cache to the backend whenever a block
+has not been accessed for at least 10 seconds. Note that when a block is
+flushed, it still remains in the cache.
+
+Cache expiration (i.e., removal of blocks from the cache) is only done
+when the maximum cache size is reached. S3QL always expires the least
+recently used blocks first.
+
+
+\section{Automatic Mounting}
+\label{mount:automatic-mounting}
+If you want to mount and umount an S3QL file system automatically at
+system startup and shutdown, you should do so with one dedicated S3QL
+init script for each S3QL file system.
+
+If your system is using upstart, an appropriate job can be defined
+as follows (and should be placed in \code{/etc/init/}):
+
+\begin{Verbatim}[commandchars=\\\{\},numbers=left,firstnumber=1,stepnumber=1]
+\PYG{l}{description "S3QL Backup File System"}
+\PYG{l}{author "Nikolaus Rath }\PYG{n+nv}{\textless{}Nikolaus@rath.org\textgreater{}}\PYG{l}{"}
+
+\PYG{l}{\PYGZsh{} This assumes that eth0 provides your internet connection}
+\PYG{l}{start on (filesystem and net-device-up IFACE=eth0)}
+\PYG{l}{stop on runlevel }\PYG{g+ge}{[016]}
+
+\PYG{l}{env BUCKET="s3://my-backup-bla"}
+\PYG{l}{env MOUNTPOINT="/mnt/backup"}
+
+\PYG{l}{expect stop}
+
+\PYG{l}{script}
+\PYG{l}{ \PYGZsh{} Redirect stdout and stderr into the system log}
+\PYG{l}{ DIR=\PYGZdl{}(mktemp -d)}
+\PYG{l}{ mkfifo "\PYGZdl{}DIR/LOG\PYGZus{}FIFO"}
+\PYG{l}{ logger -t s3ql -p local0.info }\PYG{n+nv}{\textless{} "\PYGZdl{}DIR/LOG\PYGZus{}FIFO" \&}
+\PYG{n+nv}{ exec \textgreater{}}\PYG{l}{ "\PYGZdl{}DIR/LOG\PYGZus{}FIFO"}
+\PYG{l}{ exec 2\textgreater{}\&1}
+\PYG{l}{ rm -rf "\PYGZdl{}DIR"}
+
+\PYG{l}{ \PYGZsh{} Check and mount file system}
+\PYG{l}{ fsck.s3ql --batch "\PYGZdl{}BUCKET"}
+\PYG{l}{ exec mount.s3ql --upstart "\PYGZdl{}BUCKET" "\PYGZdl{}MOUNTPOINT"}
+\PYG{l}{end script}
+
+\PYG{l}{pre-stop script}
+\PYG{l}{ umount.s3ql "\PYGZdl{}MOUNTPOINT"}
+\PYG{l}{end script}
+\end{Verbatim}
+
+\begin{notice}{note}{Note:}
+In principle, it is also possible to automatically mount an S3QL
+file system with an appropriate entry in \code{/etc/fstab}. However,
+this is not recommended for several reasons:
+\begin{itemize}
+\item {}
+file systems mounted in \code{/etc/fstab} will be unmounted with the
+\code{umount} command, so your system will not wait until all data has
+been uploaded but shutdown (or restart) immediately (this is a
+FUSE limitation, see \href{http://code.google.com/p/s3ql/issues/detail?id=159}{issue 159}).
+
+\item {}
+There is no way to tell the system that mounting S3QL requires a
+Python interpreter to be available, so it may attempt to run
+\code{mount.s3ql} before it has mounted the volume containing the
+Python interpreter.
+
+\item {}
+There is no standard way to tell the system that internet
+connection has to be up before the S3QL file system can be
+mounted.
+
+\end{itemize}
+\end{notice}
+
+
+\chapter{Advanced S3QL Features}
+\label{special:advanced-s3ql-features}\label{special::doc}
+
+\section{Snapshotting and Copy-on-Write}
+\label{special:snapshotting-and-copy-on-write}\label{special:s3qlcp}
+The command \code{s3qlcp} can be used to duplicate a directory tree without
+physically copying the file contents. This is possible due to the data
+de-duplication feature of S3QL.
+
+The syntax of \code{s3qlcp} is:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlcp }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}src\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}target\textgreater{}}
+\end{Verbatim}
+
+This will replicate the contents of the directory \code{\textless{}src\textgreater{}} in the
+directory \code{\textless{}target\textgreater{}}. \code{\textless{}src\textgreater{}} has to be an existing directory and
+\code{\textless{}target\textgreater{}} must not exist. Moreover, both directories have to be
+within the same S3QL file system.
+
+The replication will not take any additional space. Only if one of
+directories is modified later on, the modified data will take
+additional storage space.
+
+\code{s3qlcp} can only be called by the user that mounted the file system
+and (if the file system was mounted with \code{-{-}allow-other} or \code{-{-}allow-root})
+the root user. This limitation might be removed in the future (see \href{http://code.google.com/p/s3ql/issues/detail?id=155}{issue 155}).
+
+Note that:
+\begin{itemize}
+\item {}
+After the replication, both source and target directory will still
+be completely ordinary directories. You can regard \code{\textless{}src\textgreater{}} as a
+snapshot of \code{\textless{}target\textgreater{}} or vice versa. However, the most common
+usage of \code{s3qlcp} is to regularly duplicate the same source
+directory, say \code{documents}, to different target directories. For a
+e.g. monthly replication, the target directories would typically be
+named something like \code{documents\_Januray} for the replication in
+January, \code{documents\_February} for the replication in February etc.
+In this case it is clear that the target directories should be
+regarded as snapshots of the source directory.
+
+\item {}
+Exactly the same effect could be achieved by an ordinary copy
+program like \code{cp -a}. However, this procedure would be orders of
+magnitude slower, because \code{cp} would have to read every file
+completely (so that S3QL had to fetch all the data over the network
+from the backend) before writing them into the destination folder.
+
+\item {}
+Before starting with the replication, S3QL has to flush the local
+cache. So if you just copied lots of new data into the file system
+that has not yet been uploaded, replication will take longer than
+usual.
+
+\end{itemize}
+
+
+\subsection{Snapshotting vs Hardlinking}
+\label{special:snapshotting-vs-hardlinking}
+Snapshot support in S3QL is inspired by the hardlinking feature that
+is offered by programs like \href{http://www.samba.org/rsync}{rsync} or
+\href{http://savannah.nongnu.org/projects/storebackup}{storeBackup}.
+These programs can create a hardlink instead of copying a file if an
+identical file already exists in the backup. However, using hardlinks
+has two large disadvantages:
+\begin{itemize}
+\item {}
+backups and restores always have to be made with a special program
+that takes care of the hardlinking. The backup must not be touched
+by any other programs (they may make changes that inadvertently
+affect other hardlinked files)
+
+\item {}
+special care needs to be taken to handle files which are already
+hardlinked (the restore program needs to know that the hardlink was
+not just introduced by the backup program to safe space)
+
+\end{itemize}
+
+S3QL snapshots do not have these problems, and they can be used with
+any backup program.
+
+
+\section{Getting Statistics}
+\label{special:s3qlstat}\label{special:getting-statistics}
+You can get more information about a mounted S3QL file system with the
+\code{s3qlstat} command. It has the following syntax:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlstat }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}mountpoint\textgreater{}}
+\end{Verbatim}
+
+Probably the most interesting numbers are the total size of your data,
+the total size after duplication, and the final size after
+de-duplication and compression.
+
+\code{s3qlstat} can only be called by the user that mounted the file system
+and (if the file system was mounted with \code{-{-}allow-other} or \code{-{-}allow-root})
+the root user. This limitation might be removed in the future (see \href{http://code.google.com/p/s3ql/issues/detail?id=155}{issue 155}).
+
+For a full list of available options, run \code{s3qlstat -{-}help}.
+
+
+\section{Immutable Trees}
+\label{special:immutable-trees}\label{special:s3qllock}
+The command \textbf{s3qllock} can be used to make a directory tree
+immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the \textbf{s3qlrm} command (see
+below).
+
+For example, to make the directory tree beneath the directory
+\code{2010-04-21} immutable, execute
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qllock 2010-04-21}
+\end{Verbatim}
+
+Immutability is a feature designed for backups. Traditionally, backups
+have been made on external tape drives. Once a backup was made, the
+tape drive was removed and locked somewhere in a shelf. This has the
+great advantage that the contents of the backup are now permanently
+fixed. Nothing (short of physical destruction) can change or delete
+files in the backup.
+
+In contrast, when backing up into an online storage system like S3QL,
+all backups are available every time the file system is mounted.
+Nothing prevents a file in an old backup from being changed again
+later on. In the worst case, this may make your entire backup system
+worthless. Imagine that your system gets infected by a nasty virus
+that simply deletes all files it can find -- if the virus is active
+while the backup file system is mounted, the virus will destroy all
+your old backups as well!
+
+Even if the possibility of a malicious virus or trojan horse is
+excluded, being able to change a backup after it has been made is
+generally not a good idea. A common S3QL use case is to keep the file
+system mounted at all times and periodically create backups with
+\textbf{rsync -a}. This allows every user to recover her files from a
+backup without having to call the system administrator. However, this
+also allows every user to accidentally change or delete files \emph{in} one
+of the old backups.
+
+Making a backup immutable protects you against all these problems.
+Unless you happen to run into a virus that was specifically programmed
+to attack S3QL file systems, backups can be neither deleted nor
+changed after they have been made immutable.
+
+
+\section{Fast Recursive Removal}
+\label{special:s3qlrm}\label{special:fast-recursive-removal}
+The \code{s3qlrm} command can be used to recursively delete files and
+directories on an S3QL file system. Although \code{s3qlrm} is faster than
+using e.g. \code{rm -r}, the main reason for its existence is that it
+allows you to delete immutable trees as well. The syntax is rather
+simple:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlrm }\PYG{n+nv}{\textless{}directory\textgreater{}}
+\end{Verbatim}
+
+Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.
+
+
+\section{Runtime Configuration}
+\label{special:runtime-configuration}\label{special:s3qlctrl}
+The \code{s3qlctrl} can be used to control a mounted S3QL file system. Its
+syntax is
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlctrl }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}action\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}mountpoint\textgreater{}}\PYG{l}{ ...}
+\end{Verbatim}
+
+\code{\textless{}mountpoint\textgreater{}} must be the location of a mounted S3QL file system.
+For a list of valid options, run \code{s3qlctrl -{-}help}. \code{\textless{}action\textgreater{}}
+may be either of:
+\begin{quote}
+\begin{quote}\begin{description}
+\item[{flushcache}] \leavevmode
+Flush file system cache. The command blocks until the cache has
+been flushed.
+
+\item[{log}] \leavevmode
+Change log level.
+
+\item[{cachesize}] \leavevmode
+Change file system cache size.
+
+\item[{upload-meta}] \leavevmode
+Trigger a metadata upload.
+
+\end{description}\end{quote}
+\end{quote}
+
+
+\chapter{Unmounting}
+\label{umount::doc}\label{umount:unmounting}
+To unmount an S3QL file system, use the command:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{umount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}mountpoint\textgreater{}}
+\end{Verbatim}
+
+This will block until all data has been committed to the storage
+backend.
+
+Only the user who mounted the file system with \textbf{mount.s3ql}
+is able to unmount it again. If you are root and want to unmount an
+S3QL file system mounted by an ordinary user, you have to use the
+\textbf{fusermount -u} or \textbf{umount} command instead. Note
+that these commands do not block until all data has been uploaded, so
+if you use them instead of \code{umount.s3ql} then you should manually wait
+for the \code{mount.s3ql} process to terminate before shutting down the
+system.
+
+The \textbf{umount.s3ql} command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug]
+activate debugging output
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}lazy, -z]
+Lazy umount. Detaches the file system immediately, even if
+there are still open files. The data will be uploaded in the
+background once all open files have been closed.
+\end{optionlist}
+\end{quote}
+
+If, for some reason, the \code{umount.sql} command does not work, the file
+system can also be unmounted with \code{fusermount -u -z}. Note that this
+command will return immediately and the file system may continue to
+upload data in the background for a while longer.
+
+
+\chapter{Checking for Errors}
+\label{fsck:checking-for-errors}\label{fsck::doc}
+If, for some reason, the filesystem has not been correctly unmounted,
+or if you suspect that there might be errors, you should run the
+\code{fsck.s3ql} utility. It has the following syntax:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{fsck.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+This command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication info.
+(default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to get
+debug messages from all modules. This option can be
+specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext even
+for unencrypted file systems.
+\item [-{-}batch]
+If user input is required, exit without prompting.
+\item [-{-}force]
+Force checking even if file system is marked clean.
+\end{optionlist}
+\end{quote}
+
+
+\chapter{Contributed Programs}
+\label{contrib:contributed-programs}\label{contrib::doc}
+S3QL comes with a few contributed programs that are not part of the
+core distribution (and are therefore not installed automatically by
+default), but which may nevertheless be useful. These programs are in
+the \code{contrib} directory of the source distribution or in
+\code{/usr/share/doc/s3ql/contrib} if you installed S3QL from a package.
+
+
+\section{benchmark.py}
+\label{contrib:benchmark-py}
+This program measures your uplink bandwidth and compression speed and
+recommends a compression algorithm for optimal throughput.
+
+
+\section{s3\_copy.py}
+\label{contrib:s3-copy-py}
+This program physically duplicates Amazon S3 bucket. It can be used to
+migrate buckets to a different storage region or storage class
+(standard or reduced redundancy).
+
+
+\section{pcp.py}
+\label{contrib:pcp-py}\label{contrib:pcp}
+\code{pcp.py} is a wrapper program that starts several rsync processes to
+copy directory trees in parallel. This is important because
+transferring files in parallel significantly enhances performance when
+copying data from an S3QL file system (see {\hyperref[tips:copy-performance]{\emph{Permanently mounted backup file system}}} for
+details).
+
+To recursively copy the directory \code{/mnt/home-backup} into
+\code{/home/joe} using 8 parallel processes and preserving permissions,
+you would execute
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{pcp.py -a --processes=8 /mnt/home-backup/ /home/joe}
+\end{Verbatim}
+
+
+\section{s3\_backup.sh}
+\label{contrib:s3-backup-sh}
+This is an example script that demonstrates how to set up a simple but
+powerful backup solution using S3QL and \href{http://samba.org/rsync}{rsync}.
+
+The \code{s3\_backup.sh} script automates the following steps:
+\begin{enumerate}
+\item {}
+Mount the file system
+
+\item {}
+Replicate the previous backup with {\hyperref[special:s3qlcp]{\emph{s3qlcp}}}
+
+\item {}
+Update the new copy with the data from the backup source using rsync
+
+\item {}
+Make the new backup immutable with {\hyperref[special:s3qllock]{\emph{s3qllock}}}
+
+\item {}
+Delete old backups that are no longer needed
+
+\item {}
+Unmount the file system
+
+\end{enumerate}
+
+The backups are stored in directories of the form
+\code{YYYY-MM-DD\_HH:mm:SS} and the {\hyperref[contrib:expire-backups-py]{expire\_backups.py}} command is used to
+delete old backups.
+
+
+\section{expire\_backups.py}
+\label{contrib:expire-backups-py}
+\textbf{expire\_backups.py} is a program to intelligently remove old
+backups that are no longer needed.
+
+To define what backups you want to keep for how long, you define a
+number of \emph{age ranges}. \textbf{expire\_backups} ensures that you
+will have at least one backup in each age range at all times. It will
+keep exactly as many backups as are required for that and delete any
+backups that become redundant.
+
+Age ranges are specified by giving a list of range boundaries in terms
+of backup cycles. Every time you create a new backup, the existing
+backups age by one cycle.
+
+Example: when \textbf{expire\_backups} is called with the age range
+definition \code{1 3 7 14 31}, it will guarantee that you always have the
+following backups available:
+\begin{enumerate}
+\item {}
+A backup that is 0 to 1 cycles old (i.e, the most recent backup)
+
+\item {}
+A backup that is 1 to 3 cycles old
+
+\item {}
+A backup that is 3 to 7 cycles old
+
+\item {}
+A backup that is 7 to 14 cycles old
+
+\item {}
+A backup that is 14 to 31 cycles old
+
+\end{enumerate}
+
+\begin{notice}{note}{Note:}
+If you do backups in fixed intervals, then one cycle will be
+equivalent to the backup interval. The advantage of specifying the
+age ranges in terms of backup cycles rather than days or weeks is
+that it allows you to gracefully handle irregular backup intervals.
+Imagine that for some reason you do not turn on your computer for
+one month. Now all your backups are at least a month old, and if you
+had specified the above backup strategy in terms of absolute ages,
+they would all be deleted! Specifying age ranges in terms of backup
+cycles avoids these sort of problems.
+\end{notice}
+
+\textbf{expire\_backups} usage is simple. It requires backups to have
+names of the forms \code{year-month-day\_hour:minute:seconds}
+(\code{YYYY-MM-DD\_HH:mm:ss}) and works on all backups in the current
+directory. So for the above backup strategy, the correct invocation
+would be:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{expire\PYGZus{}backups.py 1 3 7 14 31}
+\end{Verbatim}
+
+When storing your backups on an S3QL file system, you probably want to
+specify the \code{-{-}use-s3qlrm} option as well. This tells
+\textbf{expire\_backups} to use the {\hyperref[special:s3qlrm]{\emph{s3qlrm}}} command to
+delete directories.
+
+\textbf{expire\_backups} uses a ``state file'' to keep track which
+backups are how many cycles old (since this cannot be inferred from
+the dates contained in the directory names). The standard name for
+this state file is \code{.expire\_backups.dat}. If this file gets
+damaged or deleted, \textbf{expire\_backups} no longer knows the ages
+of the backups and refuses to work. In this case you can use the
+\code{-{-}reconstruct-state} option to try to reconstruct the state
+from the backup dates. However, the accuracy of this reconstruction
+depends strongly on how rigorous you have been with making backups (it
+is only completely correct if the time between subsequent backups has
+always been exactly the same), so it's generally a good idea not to
+tamper with the state file.
+
+For a full list of available options, run \textbf{expire\_backups.py
+--help}.
+
+
+\section{s3ql.conf}
+\label{contrib:s3ql-conf}
+\code{s3ql.conf} is an example upstart job definition file. It defines a
+job that automatically mounts an S3QL file system on system start, and
+properly unmounts it when the system is shut down.
+
+
+\chapter{Tips \& Tricks}
+\label{tips:tips-tricks}\label{tips::doc}
+
+\section{Permanently mounted backup file system}
+\label{tips:copy-performance}\label{tips:permanently-mounted-backup-file-system}
+If you use S3QL as a backup file system, it can be useful to mount the
+file system permanently (rather than just mounting it for a backup and
+unmounting it afterwards). Especially if your file system becomes
+large, this saves you long mount- and unmount times if you only want
+to restore a single file.
+
+If you decide to do so, you should make sure to
+\begin{itemize}
+\item {}
+Use {\hyperref[special:s3qllock]{\emph{s3qllock}}} to ensure that backups are immutable
+after they have been made.
+
+\item {}
+Call {\hyperref[special:s3qlctrl]{\emph{s3qlctrl upload-meta}}} right after a every
+backup to make sure that the newest metadata is stored safely (if
+you do backups often enough, this may also allow you to set the
+\code{-{-}metadata-upload-interval} option of \textbf{mount.s3ql}
+to zero).
+
+\end{itemize}
+
+
+\section{Improving copy performance}
+\label{tips:improving-copy-performance}
+If you want to copy a lot of smaller files \emph{from} an S3QL file system
+(e.g. for a system restore) you will probably notice that the
+performance is rather bad.
+
+The reason for this is intrinsic to the way S3QL works. Whenever you
+read a file, S3QL first has to retrieve this file over the network
+from the storage backend. This takes a minimum amount of time (the
+network latency), no matter how big or small the file is. So when you
+copy lots of small files, 99\% of the time is actually spend waiting
+for network data.
+
+Theoretically, this problem is easy to solve: you just have to copy
+several files at the same time. In practice, however, almost all unix
+utilities (\code{cp}, \code{rsync}, \code{tar} and friends) insist on copying
+data one file at a time. This makes a lot of sense when copying data
+on the local hard disk, but in case of S3QL this is really
+unfortunate.
+
+The best workaround that has been found so far is to copy files by
+starting several rsync processes at once and use exclusion rules to
+make sure that they work on different sets of files.
+
+For example, the following script will start 3 rsync instances. The
+first instance handles all filenames starting with a-f, the second the
+filenames from g-l and the third covers the rest. The \code{+ */} rule
+ensures that every instance looks into all directories.
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{c}{\PYGZsh{}!/bin/bash}
+
+\PYG{l}{RSYNC\PYGZus{}ARGS="-aHv /mnt/s3ql/ /home/restore/"}
+
+\PYG{l}{rsync -f "+ */" -f "-! }\PYG{g+ge}{[a-f]}\PYG{l}{*" \PYGZdl{}RSYNC\PYGZus{}ARGS \&}
+\PYG{l}{rsync -f "+ */" -f "-! }\PYG{g+ge}{[g-l]}\PYG{l}{*" \PYGZdl{}RSYNC\PYGZus{}ARGS \&}
+\PYG{l}{rsync -f "+ */" -f "- }\PYG{g+ge}{[a-l]}\PYG{l}{*" \PYGZdl{}RSYNC\PYGZus{}ARGS \&}
+
+\PYG{l}{wait}
+\end{Verbatim}
+
+The optimum number of parallel processes depends on your network
+connection and the size of the files that you want to transfer.
+However, starting about 10 processes seems to be a good compromise
+that increases performance dramatically in almost all situations.
+
+S3QL comes with a script named \code{pcp.py} in the \code{contrib} directory
+that can be used to transfer files in parallel without having to write
+an explicit script first. See the description of {\hyperref[contrib:pcp]{\emph{pcp.py}}} for
+details.
+
+
+\chapter{Known Issues}
+\label{issues:known-issues}\label{issues::doc}\begin{itemize}
+\item {}
+S3QL is rather slow when an application tries to write data in
+unreasonably small chunks. If a 1 MB file is copied in chunks of 1
+KB, this will take more than 10 times as long as when it's copied
+with the (recommended) chunk size of 128 KB.
+
+This is a limitation of the FUSE library (which does not yet support
+write caching) which will hopefully be addressed in some future FUSE
+version.
+
+Most applications, including e.g. GNU \code{cp} and \code{rsync}, use
+reasonably large buffers and are therefore not affected by this
+problem and perform very efficient on S3QL file systems.
+
+However, if you encounter unexpectedly slow performance with a
+specific program, this might be due to the program using very small
+write buffers. Although this is not really a bug in the program,
+it might be worth to ask the program's authors for help.
+
+\item {}
+S3QL always updates file and directory access times as if the \code{relatime}
+mount option has been specified: the access time (``atime'') is only updated
+if it is currently earlier than either the status change time
+(``ctime'') or modification time (``mtime'').
+
+\item {}
+S3QL directories always have an \code{st\_nlink} value of 1. This may confuse
+programs that rely on directories having \code{st\_nlink} values of \emph{(2 +
+number of sub directories)}.
+
+Note that this is not a bug in S3QL. Including sub directories in
+the \code{st\_nlink} value is a Unix convention, but by no means a
+requirement. If an application blindly relies on this convention
+being followed, then this is a bug in the application.
+
+A prominent example are early versions of GNU find, which required
+the \code{-{-}noleaf} option to work correctly on S3QL file systems. This
+bug has already been fixed in recent find versions.
+
+\item {}
+In theory, S3QL is not fully compatible with NFS. Since S3QL does
+not support \emph{inode generation numbers}, NFS clients may (once again,
+in theory) accidentally read or write the wrong file in the
+following situation:
+\begin{enumerate}
+\item {}
+An S3QL file system is exported over NFS
+
+\item {}
+NFS client 1 opens a file A
+
+\item {}
+Another NFS client 2 (or the server itself) deletes file A (without
+client 1 knowing about this)
+
+\item {}
+A new file B is created by either of the clients or the server
+
+\item {}
+NFS client 1 tries to read or write file A (which has actually already been deleted).
+
+\end{enumerate}
+
+In this situation it is possible that NFS client 1 actually writes
+or reads the newly created file B instead. The chances of this are 1
+to (2\textasciicircum{}32 - \emph{n}) where \emph{n} is the total number of directory entries
+in the S3QL file system (as displayed by \code{s3qlstat}).
+
+Luckily enough, as long as you have less than about 2 thousand
+million directory entries (2\textasciicircum{}31), the chances for this are totally
+irrelevant and you don't have to worry about it.
+
+\item {}
+The \code{umount} and \code{fusermount -u} commands will \emph{not} block until all
+data has been uploaded to the backend. (this is a FUSE limitation
+that will hopefully be removed in the future, see \href{http://code.google.com/p/s3ql/issues/detail?id=159}{issue 159}). If you use
+either command to unmount an S3QL file system, you have to take care
+to explicitly wait for the \code{mount.s3ql} process to terminate before
+you shut down or restart the system. Therefore it is generally not a
+good idea to mount an S3QL file system in \code{/etc/fstab} (you should
+use a dedicated init script instead).
+
+\item {}
+S3QL relies on the backends not to run out of space. This is a given
+for big storage providers like Amazon S3, but you may stumble upon
+this if you store buckets e.g. on a small sftp server.
+
+If there is no space left in the backend, attempts to write more
+data into the S3QL file system will fail and the file system will be
+in an inconsistent state and require a file system check (and you
+should make sure to make space available in the backend before
+running the check).
+
+Unfortunately, there is no way to handle insufficient space in the
+backend without leaving the file system inconsistent. Since
+S3QL first writes data into the cache, it can no longer return an
+error when it later turns out that the cache can not be committed to
+the backend.
+
+\end{itemize}
+
+
+\chapter{Manpages}
+\label{man/index:manpages}\label{man/index::doc}
+The man pages are installed with S3QL on your system and can be viewed
+with the \textbf{man} command. For reference, they are also included
+here in the User's Guide.
+
+
+\section{The \textbf{mkfs.s3ql} command}
+\label{man/mkfs:the-mkfs-s3ql-command}\label{man/mkfs::doc}
+
+\subsection{Synopsis}
+\label{man/mkfs:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{mkfs.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/mkfs:description}
+The \textbf{mkfs.s3ql} command creates a new file system in the location
+specified by \emph{storage url}.
+
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+
+
+\subsubsection{Amazon S3}
+\label{man/mkfs:amazon-s3}
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \code{s3://\textless{}bucketname\textgreater{}}. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+
+
+\subsubsection{Local}
+\label{man/mkfs:local}
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\code{local://\textless{}path\textgreater{}}. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \code{local:///var/archive}.
+
+
+\subsubsection{SFTP}
+\label{man/mkfs:sftp}
+The storage URL for SFTP connections has the form
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{sftp://}\PYG{n+nv}{\textless{}host\textgreater{}}\PYG{g+ge}{[:port]}\PYG{l}{/}\PYG{n+nv}{\textless{}path\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Options}
+\label{man/mkfs:options}
+The \textbf{mkfs.s3ql} command accepts the following options.
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication
+info. (default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to
+get debug messages from all modules. This option can
+be specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+\item [-{-}s3-location \textless{}name\textgreater{}]
+Storage location for new S3 buckets. Allowed values:
+\code{EU}, \code{us-west-1}, \code{ap-southeast-1}, or \code{us-standard}.
+(default: EU)
+\item [-L \textless{}name\textgreater{}]
+Filesystem label
+\item [-{-}blocksize \textless{}size\textgreater{}]
+Maximum block size in KB (default: 10240)
+\item [-{-}plain]
+Create unencrypted file system.
+\item [-{-}force]
+Overwrite any existing data.
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Files}
+\label{man/mkfs:files}
+Authentication data for backends and bucket encryption passphrases are
+read from \code{authinfo} in \code{\textasciitilde{}/.s3ql} or the directory
+specified with \code{-{-}homedir}. Log files are placed in the same
+directory.
+
+
+\subsection{Exit Status}
+\label{man/mkfs:exit-status}
+\textbf{mkfs.s3ql} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/mkfs:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{s3qladm} command}
+\label{man/adm::doc}\label{man/adm:the-s3qladm-command}
+
+\subsection{Synopsis}
+\label{man/adm:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qladm }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}action\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+where \code{action} may be either of \textbf{passphrase},
+\textbf{upgrade}, \textbf{delete} or \textbf{download-metadata}.
+
+
+\subsection{Description}
+\label{man/adm:description}
+The \textbf{s3qladm} command performs various operations on S3QL buckets.
+The file system contained in the bucket \emph{must not be mounted} when
+using \textbf{s3qladm} or things will go wrong badly.
+
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+
+
+\subsubsection{Amazon S3}
+\label{man/adm:amazon-s3}
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \code{s3://\textless{}bucketname\textgreater{}}. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+
+
+\subsubsection{Local}
+\label{man/adm:local}
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\code{local://\textless{}path\textgreater{}}. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \code{local:///var/archive}.
+
+
+\subsubsection{SFTP}
+\label{man/adm:sftp}
+The storage URL for SFTP connections has the form
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{sftp://}\PYG{n+nv}{\textless{}host\textgreater{}}\PYG{g+ge}{[:port]}\PYG{l}{/}\PYG{n+nv}{\textless{}path\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Options}
+\label{man/adm:options}
+The \textbf{s3qladm} command accepts the following options.
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to get
+debug messages from all modules. This option can be
+specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication info.
+(default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+\end{optionlist}
+\end{quote}
+
+Hint: run \code{s3qladm \textless{}action\textgreater{} -{-}help} to get help on the additional
+arguments that the different actions take.
+
+
+\subsection{Actions}
+\label{man/adm:actions}
+The following actions may be specified:
+\begin{description}
+\item[{passphrase}] \leavevmode
+Changes the encryption passphrase of the bucket.
+
+\item[{upgrade}] \leavevmode
+Upgrade the file system contained in the bucket to the newest revision.
+
+\item[{delete}] \leavevmode
+Delete the bucket and all its contents.
+
+\item[{download-metadata}] \leavevmode
+Interactively download backups of the file system metadata.
+
+\end{description}
+
+
+\subsection{Files}
+\label{man/adm:files}
+Authentication data for backends and bucket encryption passphrases are
+read from \code{authinfo} in \code{\textasciitilde{}/.s3ql} or the directory
+specified with \code{-{-}homedir}. Log files are placed in the same
+directory.
+
+
+\subsection{Exit Status}
+\label{man/adm:exit-status}
+\textbf{s3qladm} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/adm:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{mount.s3ql} command}
+\label{man/mount::doc}\label{man/mount:the-mount-s3ql-command}
+
+\subsection{Synopsis}
+\label{man/mount:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{mount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}storage url\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}mount point\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/mount:description}
+The \textbf{mount.s3ql} command mounts the S3QL file system stored in \emph{storage
+url} in the directory \emph{mount point}.
+
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+
+
+\subsubsection{Amazon S3}
+\label{man/mount:amazon-s3}
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \code{s3://\textless{}bucketname\textgreater{}}. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+
+
+\subsubsection{Local}
+\label{man/mount:local}
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\code{local://\textless{}path\textgreater{}}. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \code{local:///var/archive}.
+
+
+\subsubsection{SFTP}
+\label{man/mount:sftp}
+The storage URL for SFTP connections has the form
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{sftp://}\PYG{n+nv}{\textless{}host\textgreater{}}\PYG{g+ge}{[:port]}\PYG{l}{/}\PYG{n+nv}{\textless{}path\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Options}
+\label{man/mount:options}
+The \textbf{mount.s3ql} command accepts the following options.
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication
+info. (default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to
+get debug messages from all modules. This option can
+be specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+\item [-{-}cachesize \textless{}size\textgreater{}]
+Cache size in kb (default: 102400 (100 MB)). Should be
+at least 10 times the blocksize of the filesystem,
+otherwise an object may be retrieved and written
+several times during a single write() or read()
+operation.
+\item [-{-}max-cache-entries \textless{}num\textgreater{}]
+Maximum number of entries in cache (default: 768).
+Each cache entry requires one file descriptor, so if
+you increase this number you have to make sure that
+your process file descriptor limit (as set with
+\code{ulimit -n}) is high enough (at least the number of
+cache entries + 100).
+\item [-{-}allow-other]
+Normally, only the user who called \code{mount.s3ql} can
+access the mount point. This user then also has full
+access to it, independent of individual file
+permissions. If the \code{-{-}allow-other} option is
+specified, other users can access the mount point as
+well and individual file permissions are taken into
+account for all users.
+\item [-{-}allow-root]
+Like \code{-{-}allow-other}, but restrict access to the
+mounting user and the root user.
+\item [-{-}fg]
+Do not daemonize, stay in foreground
+\item [-{-}single]
+Run in single threaded mode. If you don't understand
+this, then you don't need it.
+\item [-{-}upstart]
+Stay in foreground and raise SIGSTOP once mountpoint
+is up.
+\item [-{-}profile]
+Create profiling information. If you don't understand
+this, then you don't need it.
+\item [-{-}compress \textless{}name\textgreater{}]
+Compression algorithm to use when storing new data.
+Allowed values: \code{lzma}, \code{bzip2}, \code{zlib}, none.
+(default: \code{lzma})
+\item [-{-}metadata-upload-interval \textless{}seconds\textgreater{}]
+Interval in seconds between complete metadata uploads.
+Set to 0 to disable. Default: 24h.
+\item [-{-}compression-threads \textless{}no\textgreater{}]
+Number of parallel compression and encryption threads
+to use (default: 1).
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Files}
+\label{man/mount:files}
+Authentication data for backends and bucket encryption passphrases are
+read from \code{authinfo} in \code{\textasciitilde{}/.s3ql} or the directory
+specified with \code{-{-}homedir}. Log files are placed in the same
+directory.
+
+
+\subsection{Exit Status}
+\label{man/mount:exit-status}
+\textbf{mount.s3ql} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/mount:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{s3qlstat} command}
+\label{man/stat:the-s3qlstat-command}\label{man/stat::doc}
+
+\subsection{Synopsis}
+\label{man/stat:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlstat }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}mountpoint\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/stat:description}
+The \textbf{s3qlstat} command prints statistics about the S3QL file system mounted
+at \code{mountpoint}.
+
+\textbf{s3qlstat} can only be called by the user that mounted the file system
+and (if the file system was mounted with \code{-{-}allow-other} or
+\code{-{-}allow-root}) the root user. This limitation might be
+removed in the future (see \href{http://code.google.com/p/s3ql/issues/detail?id=155}{issue 155}).
+
+
+\subsection{Options}
+\label{man/stat:options}
+The \textbf{s3qlstat} command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug]
+activate debugging output
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Exit Status}
+\label{man/stat:exit-status}
+\textbf{s3qlstat} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/stat:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{s3qlctrl} command}
+\label{man/ctrl:the-s3qlctrl-command}\label{man/ctrl::doc}
+
+\subsection{Synopsis}
+\label{man/ctrl:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlctrl }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}action\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}mountpoint\textgreater{}}\PYG{l}{ ...}
+\end{Verbatim}
+
+where \code{action} may be either of \textbf{flushcache},
+\textbf{upload-meta}, \textbf{cachesize} or
+\textbf{log-metadata}.
+
+
+\subsection{Description}
+\label{man/ctrl:description}
+The \textbf{s3qlctrl} command performs various actions on the S3QL file system mounted
+in \code{mountpoint}.
+
+The following actions may be specified:
+\begin{description}
+\item[{flushcache}] \leavevmode
+Uploads all changed file data to the backend.
+
+\item[{upload-meta}] \leavevmode
+Upload metadata to the backend. All file system operations will
+block while a snapshot of the metadata is prepared for upload.
+
+\item[{cachesize}] \leavevmode
+Changes the cache size of the file system. This action requires an
+additional argument that specifies the new cache size in kB, so the
+complete command line is:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlctrl }\PYG{g+ge}{[options]}\PYG{l}{ cachesize }\PYG{n+nv}{\textless{}mountpoint\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}new-cache-size\textgreater{}}
+\end{Verbatim}
+
+\item[{log}] \leavevmode
+Change the amount of information that is logged into
+\code{\textasciitilde{}/.s3ql/mount.log} file. The complete syntax is:
+
+\begin{Verbatim}[commandchars=@\[\]]
+s3qlctrl @PYGZlb[]options@PYGZrb[] log @textless[]mountpoint@textgreater[] @textless[]level@textgreater[] @PYGZlb[]@textless[]module@textgreater[] @PYGZlb[]@textless[]module@textgreater[] ...@PYGZrb[]@PYGZrb[]
+\end{Verbatim}
+
+here \code{level} is the desired new log level and may be either of
+\emph{debug}, \emph{info} or \emph{warn}. One or more \code{module} may only be
+specified with the \emph{debug} level and allow to restrict the debug
+output to just the listed modules.
+
+\end{description}
+
+
+\subsection{Options}
+\label{man/ctrl:options}
+The \textbf{s3qlctrl} command also accepts the following options, no matter
+what specific action is being invoked:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug]
+activate debugging output
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\end{optionlist}
+\end{quote}
+
+Hint: run \code{s3qlctrl \textless{}action\textgreater{} -{-}help} to get help on the additional
+arguments that the different actions take.
+
+
+\subsection{Exit Status}
+\label{man/ctrl:exit-status}
+\textbf{s3qlctrl} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/ctrl:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{s3qlcp} command}
+\label{man/cp:the-s3qlcp-command}\label{man/cp::doc}
+
+\subsection{Synopsis}
+\label{man/cp:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlcp }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}source-dir\textgreater{}}\PYG{l}{ }\PYG{n+nv}{\textless{}dest-dir\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/cp:description}
+The \textbf{s3qlcp} command duplicates the directory tree \code{source-dir}
+into \code{dest-dir} without physically copying the file contents.
+Both source and destination must lie inside the same S3QL file system.
+
+The replication will not take any additional space. Only if one of
+directories is modified later on, the modified data will take
+additional storage space.
+
+\code{s3qlcp} can only be called by the user that mounted the file system
+and (if the file system was mounted with \code{-{-}allow-other} or \code{-{-}allow-root})
+the root user. This limitation might be removed in the future (see \href{http://code.google.com/p/s3ql/issues/detail?id=155}{issue 155}).
+
+Note that:
+\begin{itemize}
+\item {}
+After the replication, both source and target directory will still
+be completely ordinary directories. You can regard \code{\textless{}src\textgreater{}} as a
+snapshot of \code{\textless{}target\textgreater{}} or vice versa. However, the most common
+usage of \code{s3qlcp} is to regularly duplicate the same source
+directory, say \code{documents}, to different target directories. For a
+e.g. monthly replication, the target directories would typically be
+named something like \code{documents\_Januray} for the replication in
+January, \code{documents\_February} for the replication in February etc.
+In this case it is clear that the target directories should be
+regarded as snapshots of the source directory.
+
+\item {}
+Exactly the same effect could be achieved by an ordinary copy
+program like \code{cp -a}. However, this procedure would be orders of
+magnitude slower, because \code{cp} would have to read every file
+completely (so that S3QL had to fetch all the data over the network
+from the backend) before writing them into the destination folder.
+
+\item {}
+Before starting with the replication, S3QL has to flush the local
+cache. So if you just copied lots of new data into the file system
+that has not yet been uploaded, replication will take longer than
+usual.
+
+\end{itemize}
+
+
+\subsubsection{Snapshotting vs Hardlinking}
+\label{man/cp:snapshotting-vs-hardlinking}
+Snapshot support in S3QL is inspired by the hardlinking feature that
+is offered by programs like \href{http://www.samba.org/rsync}{rsync} or
+\href{http://savannah.nongnu.org/projects/storebackup}{storeBackup}.
+These programs can create a hardlink instead of copying a file if an
+identical file already exists in the backup. However, using hardlinks
+has two large disadvantages:
+\begin{itemize}
+\item {}
+backups and restores always have to be made with a special program
+that takes care of the hardlinking. The backup must not be touched
+by any other programs (they may make changes that inadvertently
+affect other hardlinked files)
+
+\item {}
+special care needs to be taken to handle files which are already
+hardlinked (the restore program needs to know that the hardlink was
+not just introduced by the backup program to safe space)
+
+\end{itemize}
+
+S3QL snapshots do not have these problems, and they can be used with
+any backup program.
+
+
+\subsection{Options}
+\label{man/cp:options}
+The \textbf{s3qlcp} command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug]
+activate debugging output
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Exit Status}
+\label{man/cp:exit-status}
+\textbf{s3qlcp} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/cp:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{s3qlrm} command}
+\label{man/rm::doc}\label{man/rm:the-s3qlrm-command}
+
+\subsection{Synopsis}
+\label{man/rm:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qlrm }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}directory\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/rm:description}
+The \textbf{s3qlrm} command recursively deletes files and directories on an
+S3QL file system. Although \textbf{s3qlrm} is faster than using e.g.
+\textbf{rm -r{}`}, the main reason for its existence is that it allows
+you to delete immutable trees (which can be created with
+\textbf{s3qllock}) as well.
+
+Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.
+
+
+\subsection{Options}
+\label{man/rm:options}
+The \textbf{s3qlrm} command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug]
+activate debugging output
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Exit Status}
+\label{man/rm:exit-status}
+\textbf{s3qlrm} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/rm:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{s3qllock} command}
+\label{man/lock:the-s3qllock-command}\label{man/lock::doc}
+
+\subsection{Synopsis}
+\label{man/lock:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{s3qllock }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}directory\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/lock:description}
+The \textbf{s3qllock} command makes a directory tree in an S3QL file
+system immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the \textbf{s3qlrm} command.
+
+
+\subsection{Rationale}
+\label{man/lock:rationale}
+Immutability is a feature designed for backups. Traditionally, backups
+have been made on external tape drives. Once a backup was made, the
+tape drive was removed and locked somewhere in a shelf. This has the
+great advantage that the contents of the backup are now permanently
+fixed. Nothing (short of physical destruction) can change or delete
+files in the backup.
+
+In contrast, when backing up into an online storage system like S3QL,
+all backups are available every time the file system is mounted.
+Nothing prevents a file in an old backup from being changed again
+later on. In the worst case, this may make your entire backup system
+worthless. Imagine that your system gets infected by a nasty virus
+that simply deletes all files it can find -- if the virus is active
+while the backup file system is mounted, the virus will destroy all
+your old backups as well!
+
+Even if the possibility of a malicious virus or trojan horse is
+excluded, being able to change a backup after it has been made is
+generally not a good idea. A common S3QL use case is to keep the file
+system mounted at all times and periodically create backups with
+\textbf{rsync -a}. This allows every user to recover her files from a
+backup without having to call the system administrator. However, this
+also allows every user to accidentally change or delete files \emph{in} one
+of the old backups.
+
+Making a backup immutable protects you against all these problems.
+Unless you happen to run into a virus that was specifically programmed
+to attack S3QL file systems, backups can be neither deleted nor
+changed after they have been made immutable.
+
+
+\subsection{Options}
+\label{man/lock:options}
+The \textbf{s3qllock} command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug]
+activate debugging output
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Exit Status}
+\label{man/lock:exit-status}
+\textbf{s3qllock} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/lock:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{umount.s3ql} command}
+\label{man/umount::doc}\label{man/umount:the-umount-s3ql-command}
+
+\subsection{Synopsis}
+\label{man/umount:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{umount.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}mount point\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/umount:description}
+The \textbf{umount.s3ql} command unmounts the S3QL file system mounted in the
+directory \emph{mount point} and blocks until all data has been uploaded to
+the storage backend.
+
+Only the user who mounted the file system with \textbf{mount.s3ql}
+is able to unmount it with \textbf{umount.s3ql}. If you are root and want to
+unmount an S3QL file system mounted by an ordinary user, you have to
+use the \textbf{fusermount -u} or \textbf{umount} command instead.
+Note that these commands do not block until all data has been
+uploaded, so if you use them instead of \textbf{umount.s3ql} then
+you should manually wait for the \textbf{mount.s3ql} process to
+terminate before shutting down the system.
+
+
+\subsection{Options}
+\label{man/umount:options}
+The \textbf{umount.s3ql} command accepts the following options.
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}debug]
+activate debugging output
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}lazy, -z]
+Lazy umount. Detaches the file system immediately, even if
+there are still open files. The data will be uploaded in the
+background once all open files have been closed.
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Exit Status}
+\label{man/umount:exit-status}
+\textbf{umount.s3ql} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/umount:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{fsck.s3ql} command}
+\label{man/fsck::doc}\label{man/fsck:the-fsck-s3ql-command}
+
+\subsection{Synopsis}
+\label{man/fsck:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{fsck.s3ql }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}storage url\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/fsck:description}
+The \textbf{mkfs.s3ql} command checks the new file system in the location
+specified by \emph{storage url} for errors and attempts to repair any
+problems.
+
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+
+
+\subsubsection{Amazon S3}
+\label{man/fsck:amazon-s3}
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \code{s3://\textless{}bucketname\textgreater{}}. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+
+
+\subsubsection{Local}
+\label{man/fsck:local}
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\code{local://\textless{}path\textgreater{}}. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \code{local:///var/archive}.
+
+
+\subsubsection{SFTP}
+\label{man/fsck:sftp}
+The storage URL for SFTP connections has the form
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{sftp://}\PYG{n+nv}{\textless{}host\textgreater{}}\PYG{g+ge}{[:port]}\PYG{l}{/}\PYG{n+nv}{\textless{}path\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Options}
+\label{man/fsck:options}
+The \textbf{mkfs.s3ql} command accepts the following options.
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}homedir \textless{}path\textgreater{}]
+Directory for log files, cache and authentication info.
+(default: \code{\textasciitilde{}/.s3ql)}
+\item [-{-}debug \textless{}module\textgreater{}]
+activate debugging output from \textless{}module\textgreater{}. Use \code{all} to get
+debug messages from all modules. This option can be
+specified multiple times.
+\item [-{-}quiet]
+be really quiet
+\item [-{-}version]
+just print program version and exit
+\item [-{-}ssl]
+Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext even
+for unencrypted file systems.
+\item [-{-}batch]
+If user input is required, exit without prompting.
+\item [-{-}force]
+Force checking even if file system is marked clean.
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Files}
+\label{man/fsck:files}
+Authentication data for backends and bucket encryption passphrases are
+read from \code{authinfo} in \code{\textasciitilde{}/.s3ql} or the directory
+specified with \code{-{-}homedir}. Log files are placed in the same
+directory.
+
+
+\subsection{Exit Status}
+\label{man/fsck:exit-status}
+\textbf{mkfs.s3ql} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/fsck:see-also}
+The S3QL homepage is at \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \code{/usr/share/doc/s3ql} or
+\code{/usr/local/doc/s3ql}.
+
+
+\section{The \textbf{pcp} command}
+\label{man/pcp:the-pcp-command}\label{man/pcp::doc}
+
+\subsection{Synopsis}
+\label{man/pcp:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{pcp }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}source\textgreater{}}\PYG{l}{ }\PYG{g+ge}{[\textless{}source\textgreater{} ...]}\PYG{l}{ }\PYG{n+nv}{\textless{}destination\textgreater{}}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/pcp:description}
+The \textbf{pcp} command is a is a wrapper that starts several
+\textbf{sync} processes to copy directory trees in parallel. This is
+allows much better copying performance on file system that have
+relatively high latency when retrieving individual files like S3QL.
+
+
+\subsection{Options}
+\label{man/pcp:options}
+The \textbf{pcp} command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}quiet]
+be really quiet
+\item [-{-}debug]
+activate debugging output
+\item [-{-}version]
+just print program version and exit
+\item [-a]
+Pass -aHAX option to rsync.
+\item [-{-}processes \textless{}no\textgreater{}]
+Number of rsync processes to use (default: 10).
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Exit Status}
+\label{man/pcp:exit-status}
+\textbf{pcp} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/pcp:see-also}
+\textbf{pcp} is shipped as part of S3QL, \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+
+\section{The \textbf{expire\_backups} command}
+\label{man/expire_backups::doc}\label{man/expire_backups:the-expire-backups-command}
+
+\subsection{Synopsis}
+\label{man/expire_backups:synopsis}
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{expire\PYGZus{}backups }\PYG{g+ge}{[options]}\PYG{l}{ }\PYG{n+nv}{\textless{}age\textgreater{}}\PYG{l}{ }\PYG{g+ge}{[\textless{}age\textgreater{} ...]}
+\end{Verbatim}
+
+
+\subsection{Description}
+\label{man/expire_backups:description}
+The \textbf{expire\_backups} command intelligently remove old backups that are no
+longer needed.
+
+To define what backups you want to keep for how long, you define a
+number of \emph{age ranges}. \textbf{expire\_backups} ensures that you
+will have at least one backup in each age range at all times. It will
+keep exactly as many backups as are required for that and delete any
+backups that become redundant.
+
+Age ranges are specified by giving a list of range boundaries in terms
+of backup cycles. Every time you create a new backup, the existing
+backups age by one cycle.
+
+Example: when \textbf{expire\_backups} is called with the age range
+definition \code{1 3 7 14 31}, it will guarantee that you always have the
+following backups available:
+\begin{enumerate}
+\item {}
+A backup that is 0 to 1 cycles old (i.e, the most recent backup)
+
+\item {}
+A backup that is 1 to 3 cycles old
+
+\item {}
+A backup that is 3 to 7 cycles old
+
+\item {}
+A backup that is 7 to 14 cycles old
+
+\item {}
+A backup that is 14 to 31 cycles old
+
+\end{enumerate}
+
+\begin{notice}{note}{Note:}
+If you do backups in fixed intervals, then one cycle will be
+equivalent to the backup interval. The advantage of specifying the
+age ranges in terms of backup cycles rather than days or weeks is
+that it allows you to gracefully handle irregular backup intervals.
+Imagine that for some reason you do not turn on your computer for
+one month. Now all your backups are at least a month old, and if you
+had specified the above backup strategy in terms of absolute ages,
+they would all be deleted! Specifying age ranges in terms of backup
+cycles avoids these sort of problems.
+\end{notice}
+
+\textbf{expire\_backups} usage is simple. It requires backups to have
+names of the forms \code{year-month-day\_hour:minute:seconds}
+(\code{YYYY-MM-DD\_HH:mm:ss}) and works on all backups in the current
+directory. So for the above backup strategy, the correct invocation
+would be:
+
+\begin{Verbatim}[commandchars=\\\{\}]
+\PYG{l}{expire\PYGZus{}backups.py 1 3 7 14 31}
+\end{Verbatim}
+
+When storing your backups on an S3QL file system, you probably want to
+specify the \code{-{-}use-s3qlrm} option as well. This tells
+\textbf{expire\_backups} to use the {\hyperref[special:s3qlrm]{\emph{s3qlrm}}} command to
+delete directories.
+
+\textbf{expire\_backups} uses a ``state file'' to keep track which
+backups are how many cycles old (since this cannot be inferred from
+the dates contained in the directory names). The standard name for
+this state file is \code{.expire\_backups.dat}. If this file gets
+damaged or deleted, \textbf{expire\_backups} no longer knows the ages
+of the backups and refuses to work. In this case you can use the
+\code{-{-}reconstruct-state} option to try to reconstruct the state
+from the backup dates. However, the accuracy of this reconstruction
+depends strongly on how rigorous you have been with making backups (it
+is only completely correct if the time between subsequent backups has
+always been exactly the same), so it's generally a good idea not to
+tamper with the state file.
+
+
+\subsection{Options}
+\label{man/expire_backups:options}
+The \textbf{expire\_backups} command accepts the following options:
+\begin{quote}
+\begin{optionlist}{3cm}
+\item [-{-}quiet]
+be really quiet
+\item [-{-}debug]
+activate debugging output
+\item [-{-}version]
+just print program version and exit
+\item [-{-}state \textless{}file\textgreater{}]
+File to save state information in (default:
+''.expire\_backups.dat'')
+\item [-n]
+Dry run. Just show which backups would be deleted.
+\item [-{-}reconstruct-state]
+Try to reconstruct a missing state file from backup
+dates.
+\item [-{-}use-s3qlrm]
+Use \code{s3qlrm} command to delete backups.
+\end{optionlist}
+\end{quote}
+
+
+\subsection{Exit Status}
+\label{man/expire_backups:exit-status}
+\textbf{expire\_backups} returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+\subsection{See Also}
+\label{man/expire_backups:see-also}
+\textbf{expire\_backups} is shipped as part of S3QL, \href{http://code.google.com/p/s3ql/}{http://code.google.com/p/s3ql/}.
+
+
+\chapter{Further Resources / Getting Help}
+\label{resources::doc}\label{resources:further-resources-getting-help}\label{resources:resources}
+If you have questions or problems with S3QL that you weren't able to
+resolve with this manual, you might want to consider the following other resources:
+\begin{itemize}
+\item {}
+The \href{http://code.google.com/p/s3ql/w/list}{S3QL Wiki}
+
+\item {}
+The \href{http://code.google.com/p/s3ql/wiki/FAQ}{S3QL FAQ}
+
+\item {}
+The \href{http://groups.google.com/group/s3ql}{S3QL Mailing List}. You
+can subscribe by sending a mail to
+\href{mailto:s3ql+subscribe@googlegroups.com}{s3ql+subscribe@googlegroups.com}.
+
+\end{itemize}
+
+Please report any bugs you may encounter in the \href{http://code.google.com/p/s3ql/issues/list}{Issue Tracker}.
+
+
+
+\renewcommand{\indexname}{Index}
+\printindex
+\end{document}
diff --git a/doc/latex/manual.toc b/doc/latex/manual.toc
new file mode 100644
index 0000000..2996158
--- /dev/null
+++ b/doc/latex/manual.toc
@@ -0,0 +1,142 @@
+\select@language {english}
+\contentsline {chapter}{\numberline {1}About S3QL}{1}{chapter.1}
+\contentsline {section}{\numberline {1.1}Features}{1}{section.1.1}
+\contentsline {section}{\numberline {1.2}Development Status}{2}{section.1.2}
+\contentsline {chapter}{\numberline {2}Installation}{3}{chapter.2}
+\contentsline {section}{\numberline {2.1}Dependencies}{3}{section.2.1}
+\contentsline {section}{\numberline {2.2}Installing S3QL}{4}{section.2.2}
+\contentsline {chapter}{\numberline {3}Storage Backends}{5}{chapter.3}
+\contentsline {section}{\numberline {3.1}On Backend Reliability}{5}{section.3.1}
+\contentsline {section}{\numberline {3.2}The \texttt {authinfo} file}{6}{section.3.2}
+\contentsline {section}{\numberline {3.3}Consistency Guarantees}{6}{section.3.3}
+\contentsline {subsection}{\numberline {3.3.1}Dealing with Eventual Consistency}{6}{subsection.3.3.1}
+\contentsline {section}{\numberline {3.4}The Amazon S3 Backend}{7}{section.3.4}
+\contentsline {section}{\numberline {3.5}The Local Backend}{8}{section.3.5}
+\contentsline {section}{\numberline {3.6}The SFTP Backend}{8}{section.3.6}
+\contentsline {chapter}{\numberline {4}File System Creation}{9}{chapter.4}
+\contentsline {chapter}{\numberline {5}Managing Buckets}{11}{chapter.5}
+\contentsline {section}{\numberline {5.1}Changing the Passphrase}{11}{section.5.1}
+\contentsline {section}{\numberline {5.2}Upgrading the file system}{11}{section.5.2}
+\contentsline {section}{\numberline {5.3}Deleting a file system}{12}{section.5.3}
+\contentsline {section}{\numberline {5.4}Restoring Metadata Backups}{12}{section.5.4}
+\contentsline {chapter}{\numberline {6}Mounting}{13}{chapter.6}
+\contentsline {section}{\numberline {6.1}Storing Encryption Passwords}{14}{section.6.1}
+\contentsline {section}{\numberline {6.2}Compression Algorithms}{14}{section.6.2}
+\contentsline {section}{\numberline {6.3}Parallel Compression}{15}{section.6.3}
+\contentsline {section}{\numberline {6.4}Notes about Caching}{15}{section.6.4}
+\contentsline {subsection}{\numberline {6.4.1}Maximum Number of Cache Entries}{15}{subsection.6.4.1}
+\contentsline {subsection}{\numberline {6.4.2}Cache Flushing and Expiration}{15}{subsection.6.4.2}
+\contentsline {section}{\numberline {6.5}Automatic Mounting}{15}{section.6.5}
+\contentsline {chapter}{\numberline {7}Advanced S3QL Features}{17}{chapter.7}
+\contentsline {section}{\numberline {7.1}Snapshotting and Copy-on-Write}{17}{section.7.1}
+\contentsline {subsection}{\numberline {7.1.1}Snapshotting vs Hardlinking}{17}{subsection.7.1.1}
+\contentsline {section}{\numberline {7.2}Getting Statistics}{18}{section.7.2}
+\contentsline {section}{\numberline {7.3}Immutable Trees}{18}{section.7.3}
+\contentsline {section}{\numberline {7.4}Fast Recursive Removal}{19}{section.7.4}
+\contentsline {section}{\numberline {7.5}Runtime Configuration}{19}{section.7.5}
+\contentsline {chapter}{\numberline {8}Unmounting}{21}{chapter.8}
+\contentsline {chapter}{\numberline {9}Checking for Errors}{23}{chapter.9}
+\contentsline {chapter}{\numberline {10}Contributed Programs}{25}{chapter.10}
+\contentsline {section}{\numberline {10.1}benchmark.py}{25}{section.10.1}
+\contentsline {section}{\numberline {10.2}s3\_copy.py}{25}{section.10.2}
+\contentsline {section}{\numberline {10.3}pcp.py}{25}{section.10.3}
+\contentsline {section}{\numberline {10.4}s3\_backup.sh}{25}{section.10.4}
+\contentsline {section}{\numberline {10.5}expire\_backups.py}{26}{section.10.5}
+\contentsline {section}{\numberline {10.6}s3ql.conf}{27}{section.10.6}
+\contentsline {chapter}{\numberline {11}Tips \& Tricks}{29}{chapter.11}
+\contentsline {section}{\numberline {11.1}Permanently mounted backup file system}{29}{section.11.1}
+\contentsline {section}{\numberline {11.2}Improving copy performance}{29}{section.11.2}
+\contentsline {chapter}{\numberline {12}Known Issues}{31}{chapter.12}
+\contentsline {chapter}{\numberline {13}Manpages}{33}{chapter.13}
+\contentsline {section}{\numberline {13.1}The \textbf {mkfs.s3ql} command}{33}{section.13.1}
+\contentsline {subsection}{\numberline {13.1.1}Synopsis}{33}{subsection.13.1.1}
+\contentsline {subsection}{\numberline {13.1.2}Description}{33}{subsection.13.1.2}
+\contentsline {subsubsection}{Amazon S3}{33}{subsubsection*.3}
+\contentsline {subsubsection}{Local}{33}{subsubsection*.4}
+\contentsline {subsubsection}{SFTP}{33}{subsubsection*.5}
+\contentsline {subsection}{\numberline {13.1.3}Options}{34}{subsection.13.1.3}
+\contentsline {subsection}{\numberline {13.1.4}Files}{34}{subsection.13.1.4}
+\contentsline {subsection}{\numberline {13.1.5}Exit Status}{34}{subsection.13.1.5}
+\contentsline {subsection}{\numberline {13.1.6}See Also}{34}{subsection.13.1.6}
+\contentsline {section}{\numberline {13.2}The \textbf {s3qladm} command}{34}{section.13.2}
+\contentsline {subsection}{\numberline {13.2.1}Synopsis}{34}{subsection.13.2.1}
+\contentsline {subsection}{\numberline {13.2.2}Description}{35}{subsection.13.2.2}
+\contentsline {subsubsection}{Amazon S3}{35}{subsubsection*.6}
+\contentsline {subsubsection}{Local}{35}{subsubsection*.7}
+\contentsline {subsubsection}{SFTP}{35}{subsubsection*.8}
+\contentsline {subsection}{\numberline {13.2.3}Options}{35}{subsection.13.2.3}
+\contentsline {subsection}{\numberline {13.2.4}Actions}{35}{subsection.13.2.4}
+\contentsline {subsection}{\numberline {13.2.5}Files}{36}{subsection.13.2.5}
+\contentsline {subsection}{\numberline {13.2.6}Exit Status}{36}{subsection.13.2.6}
+\contentsline {subsection}{\numberline {13.2.7}See Also}{36}{subsection.13.2.7}
+\contentsline {section}{\numberline {13.3}The \textbf {mount.s3ql} command}{36}{section.13.3}
+\contentsline {subsection}{\numberline {13.3.1}Synopsis}{36}{subsection.13.3.1}
+\contentsline {subsection}{\numberline {13.3.2}Description}{36}{subsection.13.3.2}
+\contentsline {subsubsection}{Amazon S3}{36}{subsubsection*.9}
+\contentsline {subsubsection}{Local}{36}{subsubsection*.10}
+\contentsline {subsubsection}{SFTP}{36}{subsubsection*.11}
+\contentsline {subsection}{\numberline {13.3.3}Options}{37}{subsection.13.3.3}
+\contentsline {subsection}{\numberline {13.3.4}Files}{38}{subsection.13.3.4}
+\contentsline {subsection}{\numberline {13.3.5}Exit Status}{38}{subsection.13.3.5}
+\contentsline {subsection}{\numberline {13.3.6}See Also}{38}{subsection.13.3.6}
+\contentsline {section}{\numberline {13.4}The \textbf {s3qlstat} command}{38}{section.13.4}
+\contentsline {subsection}{\numberline {13.4.1}Synopsis}{38}{subsection.13.4.1}
+\contentsline {subsection}{\numberline {13.4.2}Description}{38}{subsection.13.4.2}
+\contentsline {subsection}{\numberline {13.4.3}Options}{38}{subsection.13.4.3}
+\contentsline {subsection}{\numberline {13.4.4}Exit Status}{38}{subsection.13.4.4}
+\contentsline {subsection}{\numberline {13.4.5}See Also}{38}{subsection.13.4.5}
+\contentsline {section}{\numberline {13.5}The \textbf {s3qlctrl} command}{39}{section.13.5}
+\contentsline {subsection}{\numberline {13.5.1}Synopsis}{39}{subsection.13.5.1}
+\contentsline {subsection}{\numberline {13.5.2}Description}{39}{subsection.13.5.2}
+\contentsline {subsection}{\numberline {13.5.3}Options}{39}{subsection.13.5.3}
+\contentsline {subsection}{\numberline {13.5.4}Exit Status}{39}{subsection.13.5.4}
+\contentsline {subsection}{\numberline {13.5.5}See Also}{39}{subsection.13.5.5}
+\contentsline {section}{\numberline {13.6}The \textbf {s3qlcp} command}{40}{section.13.6}
+\contentsline {subsection}{\numberline {13.6.1}Synopsis}{40}{subsection.13.6.1}
+\contentsline {subsection}{\numberline {13.6.2}Description}{40}{subsection.13.6.2}
+\contentsline {subsubsection}{Snapshotting vs Hardlinking}{40}{subsubsection*.12}
+\contentsline {subsection}{\numberline {13.6.3}Options}{40}{subsection.13.6.3}
+\contentsline {subsection}{\numberline {13.6.4}Exit Status}{41}{subsection.13.6.4}
+\contentsline {subsection}{\numberline {13.6.5}See Also}{41}{subsection.13.6.5}
+\contentsline {section}{\numberline {13.7}The \textbf {s3qlrm} command}{41}{section.13.7}
+\contentsline {subsection}{\numberline {13.7.1}Synopsis}{41}{subsection.13.7.1}
+\contentsline {subsection}{\numberline {13.7.2}Description}{41}{subsection.13.7.2}
+\contentsline {subsection}{\numberline {13.7.3}Options}{41}{subsection.13.7.3}
+\contentsline {subsection}{\numberline {13.7.4}Exit Status}{41}{subsection.13.7.4}
+\contentsline {subsection}{\numberline {13.7.5}See Also}{41}{subsection.13.7.5}
+\contentsline {section}{\numberline {13.8}The \textbf {s3qllock} command}{42}{section.13.8}
+\contentsline {subsection}{\numberline {13.8.1}Synopsis}{42}{subsection.13.8.1}
+\contentsline {subsection}{\numberline {13.8.2}Description}{42}{subsection.13.8.2}
+\contentsline {subsection}{\numberline {13.8.3}Rationale}{42}{subsection.13.8.3}
+\contentsline {subsection}{\numberline {13.8.4}Options}{42}{subsection.13.8.4}
+\contentsline {subsection}{\numberline {13.8.5}Exit Status}{42}{subsection.13.8.5}
+\contentsline {subsection}{\numberline {13.8.6}See Also}{43}{subsection.13.8.6}
+\contentsline {section}{\numberline {13.9}The \textbf {umount.s3ql} command}{43}{section.13.9}
+\contentsline {subsection}{\numberline {13.9.1}Synopsis}{43}{subsection.13.9.1}
+\contentsline {subsection}{\numberline {13.9.2}Description}{43}{subsection.13.9.2}
+\contentsline {subsection}{\numberline {13.9.3}Options}{43}{subsection.13.9.3}
+\contentsline {subsection}{\numberline {13.9.4}Exit Status}{43}{subsection.13.9.4}
+\contentsline {subsection}{\numberline {13.9.5}See Also}{43}{subsection.13.9.5}
+\contentsline {section}{\numberline {13.10}The \textbf {fsck.s3ql} command}{44}{section.13.10}
+\contentsline {subsection}{\numberline {13.10.1}Synopsis}{44}{subsection.13.10.1}
+\contentsline {subsection}{\numberline {13.10.2}Description}{44}{subsection.13.10.2}
+\contentsline {subsubsection}{Amazon S3}{44}{subsubsection*.13}
+\contentsline {subsubsection}{Local}{44}{subsubsection*.14}
+\contentsline {subsubsection}{SFTP}{44}{subsubsection*.15}
+\contentsline {subsection}{\numberline {13.10.3}Options}{44}{subsection.13.10.3}
+\contentsline {subsection}{\numberline {13.10.4}Files}{45}{subsection.13.10.4}
+\contentsline {subsection}{\numberline {13.10.5}Exit Status}{45}{subsection.13.10.5}
+\contentsline {subsection}{\numberline {13.10.6}See Also}{45}{subsection.13.10.6}
+\contentsline {section}{\numberline {13.11}The \textbf {pcp} command}{45}{section.13.11}
+\contentsline {subsection}{\numberline {13.11.1}Synopsis}{45}{subsection.13.11.1}
+\contentsline {subsection}{\numberline {13.11.2}Description}{45}{subsection.13.11.2}
+\contentsline {subsection}{\numberline {13.11.3}Options}{45}{subsection.13.11.3}
+\contentsline {subsection}{\numberline {13.11.4}Exit Status}{45}{subsection.13.11.4}
+\contentsline {subsection}{\numberline {13.11.5}See Also}{45}{subsection.13.11.5}
+\contentsline {section}{\numberline {13.12}The \textbf {expire\_backups} command}{46}{section.13.12}
+\contentsline {subsection}{\numberline {13.12.1}Synopsis}{46}{subsection.13.12.1}
+\contentsline {subsection}{\numberline {13.12.2}Description}{46}{subsection.13.12.2}
+\contentsline {subsection}{\numberline {13.12.3}Options}{47}{subsection.13.12.3}
+\contentsline {subsection}{\numberline {13.12.4}Exit Status}{47}{subsection.13.12.4}
+\contentsline {subsection}{\numberline {13.12.5}See Also}{47}{subsection.13.12.5}
+\contentsline {chapter}{\numberline {14}Further Resources / Getting Help}{49}{chapter.14}
diff --git a/doc/latex/python.ist b/doc/latex/python.ist
new file mode 100644
index 0000000..9ffa0f9
--- /dev/null
+++ b/doc/latex/python.ist
@@ -0,0 +1,11 @@
+line_max 100
+headings_flag 1
+heading_prefix " \\bigletter "
+
+preamble "\\begin{theindex}
+\\def\\bigletter#1{{\\Large\\sffamily#1}\\nopagebreak\\vspace{1mm}}
+
+"
+
+symhead_positive "{Symbols}"
+numhead_positive "{Numbers}"
diff --git a/doc/latex/sphinx.sty b/doc/latex/sphinx.sty
new file mode 100644
index 0000000..21a3d89
--- /dev/null
+++ b/doc/latex/sphinx.sty
@@ -0,0 +1,464 @@
+%
+% sphinx.sty
+%
+% Adapted from the old python.sty, mostly written by Fred Drake,
+% by Georg Brandl.
+%
+
+\NeedsTeXFormat{LaTeX2e}[1995/12/01]
+\ProvidesPackage{sphinx}[2010/01/15 LaTeX package (Sphinx markup)]
+
+\RequirePackage{textcomp}
+\RequirePackage{fancyhdr}
+\RequirePackage{fancybox}
+\RequirePackage{titlesec}
+\RequirePackage{tabulary}
+\RequirePackage{amsmath} % for \text
+\RequirePackage{makeidx}
+\RequirePackage{framed}
+\RequirePackage{ifthen}
+\RequirePackage{color}
+% For highlighted code.
+\RequirePackage{fancyvrb}
+% For table captions.
+\RequirePackage{threeparttable}
+% Handle footnotes in tables.
+\RequirePackage{footnote}
+\makesavenoteenv{tabulary}
+% For floating figures in the text.
+\RequirePackage{wrapfig}
+% Separate paragraphs by space by default.
+\RequirePackage{parskip}
+
+% Redefine these colors to your liking in the preamble.
+\definecolor{TitleColor}{rgb}{0.126,0.263,0.361}
+\definecolor{InnerLinkColor}{rgb}{0.208,0.374,0.486}
+\definecolor{OuterLinkColor}{rgb}{0.216,0.439,0.388}
+% Redefine these colors to something not white if you want to have colored
+% background and border for code examples.
+\definecolor{VerbatimColor}{rgb}{1,1,1}
+\definecolor{VerbatimBorderColor}{rgb}{1,1,1}
+
+% Uncomment these two lines to ignore the paper size and make the page
+% size more like a typical published manual.
+%\renewcommand{\paperheight}{9in}
+%\renewcommand{\paperwidth}{8.5in} % typical squarish manual
+%\renewcommand{\paperwidth}{7in} % O'Reilly ``Programmming Python''
+
+% For graphicx, check if we are compiling under latex or pdflatex.
+\ifx\pdftexversion\undefined
+ \usepackage{graphicx}
+\else
+ \usepackage[pdftex]{graphicx}
+\fi
+
+% for PDF output, use colors and maximal compression
+\newif\ifsphinxpdfoutput\sphinxpdfoutputfalse
+\ifx\pdfoutput\undefined\else\ifcase\pdfoutput
+ \let\py@NormalColor\relax
+ \let\py@TitleColor\relax
+\else
+ \sphinxpdfoutputtrue
+ \input{pdfcolor}
+ \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}}
+ \def\py@TitleColor{\color{TitleColor}}
+ \pdfcompresslevel=9
+\fi\fi
+
+% XeLaTeX can do colors, too
+\ifx\XeTeXrevision\undefined\else
+ \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}}
+ \def\py@TitleColor{\color{TitleColor}}
+\fi
+
+% Increase printable page size (copied from fullpage.sty)
+\topmargin 0pt
+\advance \topmargin by -\headheight
+\advance \topmargin by -\headsep
+
+% attempt to work a little better for A4 users
+\textheight \paperheight
+\advance\textheight by -2in
+
+\oddsidemargin 0pt
+\evensidemargin 0pt
+%\evensidemargin -.25in % for ``manual size'' documents
+\marginparwidth 0.5in
+
+\textwidth \paperwidth
+\advance\textwidth by -2in
+
+
+% Style parameters and macros used by most documents here
+\raggedbottom
+\sloppy
+\hbadness = 5000 % don't print trivial gripes
+
+\pagestyle{empty} % start this way; change for
+\pagenumbering{roman} % ToC & chapters
+
+% Use this to set the font family for headers and other decor:
+\newcommand{\py@HeaderFamily}{\sffamily\bfseries}
+
+% Redefine the 'normal' header/footer style when using "fancyhdr" package:
+\@ifundefined{fancyhf}{}{
+ % Use \pagestyle{normal} as the primary pagestyle for text.
+ \fancypagestyle{normal}{
+ \fancyhf{}
+ \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}}
+ \fancyfoot[LO]{{\py@HeaderFamily\nouppercase{\rightmark}}}
+ \fancyfoot[RE]{{\py@HeaderFamily\nouppercase{\leftmark}}}
+ \fancyhead[LE,RO]{{\py@HeaderFamily \@title, \py@release}}
+ \renewcommand{\headrulewidth}{0.4pt}
+ \renewcommand{\footrulewidth}{0.4pt}
+ }
+ % Update the plain style so we get the page number & footer line,
+ % but not a chapter or section title. This is to keep the first
+ % page of a chapter and the blank page between chapters `clean.'
+ \fancypagestyle{plain}{
+ \fancyhf{}
+ \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}}
+ \renewcommand{\headrulewidth}{0pt}
+ \renewcommand{\footrulewidth}{0.4pt}
+ }
+}
+
+% Some custom font markup commands.
+%
+\newcommand{\strong}[1]{{\bf #1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\bfcode}[1]{\code{\bfseries#1}}
+\newcommand{\samp}[1]{`\code{#1}'}
+\newcommand{\email}[1]{\textsf{#1}}
+
+% Redefine the Verbatim environment to allow border and background colors.
+% The original environment is still used for verbatims within tables.
+\let\OriginalVerbatim=\Verbatim
+\let\endOriginalVerbatim=\endVerbatim
+
+% Play with vspace to be able to keep the indentation.
+\newlength\distancetoright
+\def\mycolorbox#1{%
+ \setlength\distancetoright{\linewidth}%
+ \advance\distancetoright -\@totalleftmargin %
+ \fcolorbox{VerbatimBorderColor}{VerbatimColor}{%
+ \begin{minipage}{\distancetoright}%
+ #1
+ \end{minipage}%
+ }%
+}
+\def\FrameCommand{\mycolorbox}
+
+\renewcommand{\Verbatim}[1][1]{%
+ % list starts new par, but we don't want it to be set apart vertically
+ \bgroup\parskip=0pt%
+ \smallskip%
+ % The list environement is needed to control perfectly the vertical
+ % space.
+ \list{}{%
+ \setlength\parskip{0pt}%
+ \setlength\itemsep{0ex}%
+ \setlength\topsep{0ex}%
+ \setlength\partopsep{0pt}%
+ \setlength\leftmargin{0pt}%
+ }%
+ \item\MakeFramed {\FrameRestore}%
+ \small%
+ \OriginalVerbatim[#1]%
+}
+\renewcommand{\endVerbatim}{%
+ \endOriginalVerbatim%
+ \endMakeFramed%
+ \endlist%
+ % close group to restore \parskip
+ \egroup%
+}
+
+
+% \moduleauthor{name}{email}
+\newcommand{\moduleauthor}[2]{}
+
+% \sectionauthor{name}{email}
+\newcommand{\sectionauthor}[2]{}
+
+% Augment the sectioning commands used to get our own font family in place,
+% and reset some internal data items:
+\titleformat{\section}{\Large\py@HeaderFamily}%
+ {\py@TitleColor\thesection}{0.5em}{\py@TitleColor}{\py@NormalColor}
+\titleformat{\subsection}{\large\py@HeaderFamily}%
+ {\py@TitleColor\thesubsection}{0.5em}{\py@TitleColor}{\py@NormalColor}
+\titleformat{\subsubsection}{\py@HeaderFamily}%
+ {\py@TitleColor\thesubsubsection}{0.5em}{\py@TitleColor}{\py@NormalColor}
+\titleformat{\paragraph}{\small\py@HeaderFamily}%
+ {\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
+
+% {fulllineitems} is the main environment for object descriptions.
+%
+\newcommand{\py@itemnewline}[1]{%
+ \@tempdima\linewidth%
+ \advance\@tempdima \leftmargin\makebox[\@tempdima][l]{#1}%
+}
+
+\newenvironment{fulllineitems}{
+ \begin{list}{}{\labelwidth \leftmargin \labelsep 0pt
+ \rightmargin 0pt \topsep -\parskip \partopsep \parskip
+ \itemsep -\parsep
+ \let\makelabel=\py@itemnewline}
+}{\end{list}}
+
+% \optional is used for ``[, arg]``, i.e. desc_optional nodes.
+\newcommand{\optional}[1]{%
+ {\textnormal{\Large[}}{#1}\hspace{0.5mm}{\textnormal{\Large]}}}
+
+\newlength{\py@argswidth}
+\newcommand{\py@sigparams}[2]{%
+ \parbox[t]{\py@argswidth}{#1\code{)}#2}}
+\newcommand{\pysigline}[1]{\item[#1]\nopagebreak}
+\newcommand{\pysiglinewithargsret}[3]{%
+ \settowidth{\py@argswidth}{#1\code{(}}%
+ \addtolength{\py@argswidth}{-2\py@argswidth}%
+ \addtolength{\py@argswidth}{\linewidth}%
+ \item[#1\code{(}\py@sigparams{#2}{#3}]}
+
+% Production lists
+%
+\newenvironment{productionlist}{
+% \def\optional##1{{\Large[}##1{\Large]}}
+ \def\production##1##2{\\\code{##1}&::=&\code{##2}}
+ \def\productioncont##1{\\& &\code{##1}}
+ \parindent=2em
+ \indent
+ \begin{tabular}{lcl}
+}{%
+ \end{tabular}
+}
+
+% Notices / Admonitions
+%
+\newlength{\py@noticelength}
+
+\newcommand{\py@heavybox}{
+ \setlength{\fboxrule}{1pt}
+ \setlength{\fboxsep}{6pt}
+ \setlength{\py@noticelength}{\linewidth}
+ \addtolength{\py@noticelength}{-2\fboxsep}
+ \addtolength{\py@noticelength}{-2\fboxrule}
+ %\setlength{\shadowsize}{3pt}
+ \Sbox
+ \minipage{\py@noticelength}
+}
+\newcommand{\py@endheavybox}{
+ \endminipage
+ \endSbox
+ \fbox{\TheSbox}
+}
+
+\newcommand{\py@lightbox}{{%
+ \setlength\parskip{0pt}\par
+ \rule[0ex]{\linewidth}{0.5pt}%
+ \par\vspace{-0.5ex}%
+ }}
+\newcommand{\py@endlightbox}{{%
+ \setlength{\parskip}{0pt}%
+ \par\rule[0.5ex]{\linewidth}{0.5pt}%
+ \par\vspace{-0.5ex}%
+ }}
+
+% Some are quite plain:
+\newcommand{\py@noticestart@note}{\py@lightbox}
+\newcommand{\py@noticeend@note}{\py@endlightbox}
+\newcommand{\py@noticestart@hint}{\py@lightbox}
+\newcommand{\py@noticeend@hint}{\py@endlightbox}
+\newcommand{\py@noticestart@important}{\py@lightbox}
+\newcommand{\py@noticeend@important}{\py@endlightbox}
+\newcommand{\py@noticestart@tip}{\py@lightbox}
+\newcommand{\py@noticeend@tip}{\py@endlightbox}
+
+% Others gets more visible distinction:
+\newcommand{\py@noticestart@warning}{\py@heavybox}
+\newcommand{\py@noticeend@warning}{\py@endheavybox}
+\newcommand{\py@noticestart@caution}{\py@heavybox}
+\newcommand{\py@noticeend@caution}{\py@endheavybox}
+\newcommand{\py@noticestart@attention}{\py@heavybox}
+\newcommand{\py@noticeend@attention}{\py@endheavybox}
+\newcommand{\py@noticestart@danger}{\py@heavybox}
+\newcommand{\py@noticeend@danger}{\py@endheavybox}
+\newcommand{\py@noticestart@error}{\py@heavybox}
+\newcommand{\py@noticeend@error}{\py@endheavybox}
+
+\newenvironment{notice}[2]{
+ \def\py@noticetype{#1}
+ \csname py@noticestart@#1\endcsname
+ \strong{#2}
+}{\csname py@noticeend@\py@noticetype\endcsname}
+
+% Allow the release number to be specified independently of the
+% \date{}. This allows the date to reflect the document's date and
+% release to specify the release that is documented.
+%
+\newcommand{\py@release}{}
+\newcommand{\version}{}
+\newcommand{\shortversion}{}
+\newcommand{\releaseinfo}{}
+\newcommand{\releasename}{Release}
+\newcommand{\release}[1]{%
+ \renewcommand{\py@release}{\releasename\space\version}%
+ \renewcommand{\version}{#1}}
+\newcommand{\setshortversion}[1]{%
+ \renewcommand{\shortversion}{#1}}
+\newcommand{\setreleaseinfo}[1]{%
+ \renewcommand{\releaseinfo}{#1}}
+
+% Allow specification of the author's address separately from the
+% author's name. This can be used to format them differently, which
+% is a good thing.
+%
+\newcommand{\py@authoraddress}{}
+\newcommand{\authoraddress}[1]{\renewcommand{\py@authoraddress}{#1}}
+
+% This sets up the fancy chapter headings that make the documents look
+% at least a little better than the usual LaTeX output.
+%
+\@ifundefined{ChTitleVar}{}{
+ \ChNameVar{\raggedleft\normalsize\py@HeaderFamily}
+ \ChNumVar{\raggedleft \bfseries\Large\py@HeaderFamily}
+ \ChTitleVar{\raggedleft \rm\Huge\py@HeaderFamily}
+ % This creates chapter heads without the leading \vspace*{}:
+ \def\@makechapterhead#1{%
+ {\parindent \z@ \raggedright \normalfont
+ \ifnum \c@secnumdepth >\m@ne
+ \DOCH
+ \fi
+ \interlinepenalty\@M
+ \DOTI{#1}
+ }
+ }
+}
+
+% Redefine description environment so that it is usable inside fulllineitems.
+%
+\renewcommand{\description}{%
+ \list{}{\labelwidth\z@%
+ \itemindent-\leftmargin%
+ \labelsep5pt%
+ \let\makelabel=\descriptionlabel}}
+
+% Definition lists; requested by AMK for HOWTO documents. Probably useful
+% elsewhere as well, so keep in in the general style support.
+%
+\newenvironment{definitions}{%
+ \begin{description}%
+ \def\term##1{\item[##1]\mbox{}\\*[0mm]}
+}{%
+ \end{description}%
+}
+
+% Tell TeX about pathological hyphenation cases:
+\hyphenation{Base-HTTP-Re-quest-Hand-ler}
+
+
+% The following is stuff copied from docutils' latex writer.
+%
+\newcommand{\optionlistlabel}[1]{\bf #1 \hfill}
+\newenvironment{optionlist}[1]
+{\begin{list}{}
+ {\setlength{\labelwidth}{#1}
+ \setlength{\rightmargin}{1cm}
+ \setlength{\leftmargin}{\rightmargin}
+ \addtolength{\leftmargin}{\labelwidth}
+ \addtolength{\leftmargin}{\labelsep}
+ \renewcommand{\makelabel}{\optionlistlabel}}
+}{\end{list}}
+
+\newlength{\lineblockindentation}
+\setlength{\lineblockindentation}{2.5em}
+\newenvironment{lineblock}[1]
+{\begin{list}{}
+ {\setlength{\partopsep}{\parskip}
+ \addtolength{\partopsep}{\baselineskip}
+ \topsep0pt\itemsep0.15\baselineskip\parsep0pt
+ \leftmargin#1}
+ \raggedright}
+{\end{list}}
+
+% Redefine includgraphics for avoiding images larger than the screen size
+% If the size is not specified.
+\let\py@Oldincludegraphics\includegraphics
+
+\newbox\image@box%
+\newdimen\image@width%
+\renewcommand\includegraphics[2][\@empty]{%
+ \ifx#1\@empty%
+ \setbox\image@box=\hbox{\py@Oldincludegraphics{#2}}%
+ \image@width\wd\image@box%
+ \ifdim \image@width>\linewidth%
+ \setbox\image@box=\hbox{\py@Oldincludegraphics[width=\linewidth]{#2}}%
+ \box\image@box%
+ \else%
+ \py@Oldincludegraphics{#2}%
+ \fi%
+ \else%
+ \py@Oldincludegraphics[#1]{#2}%
+ \fi%
+}
+
+
+% Fix the index and bibliography environments to add an entry to the Table of
+% Contents; this is much nicer than just having to jump to the end of the book
+% and flip around, especially with multiple indexes.
+%
+\let\py@OldTheindex=\theindex
+\renewcommand{\theindex}{
+ \cleardoublepage
+ \phantomsection
+ \py@OldTheindex
+ \addcontentsline{toc}{chapter}{\indexname}
+}
+
+\let\py@OldThebibliography=\thebibliography
+\renewcommand{\thebibliography}[1]{
+ \cleardoublepage
+ \phantomsection
+ \py@OldThebibliography{1}
+ \addcontentsline{toc}{chapter}{\bibname}
+}
+
+% Include hyperref last.
+\RequirePackage[colorlinks,breaklinks,
+ linkcolor=InnerLinkColor,filecolor=OuterLinkColor,
+ menucolor=OuterLinkColor,urlcolor=OuterLinkColor,
+ citecolor=InnerLinkColor]{hyperref}
+% Fix anchor placement for figures with captions.
+% (Note: we don't use a package option here; instead, we give an explicit
+% \capstart for figures that actually have a caption.)
+\RequirePackage{hypcap}
+
+% From docutils.writers.latex2e
+\providecommand{\DUspan}[2]{%
+ {% group ("span") to limit the scope of styling commands
+ \@for\node@class@name:=#1\do{%
+ \ifcsname docutilsrole\node@class@name\endcsname%
+ \csname docutilsrole\node@class@name\endcsname%
+ \fi%
+ }%
+ {#2}% node content
+ }% close "span"
+}
+
+\providecommand*{\DUprovidelength}[2]{
+ \ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
+}
+
+\DUprovidelength{\DUlineblockindent}{2.5em}
+\ifthenelse{\isundefined{\DUlineblock}}{
+ \newenvironment{DUlineblock}[1]{%
+ \list{}{\setlength{\partopsep}{\parskip}
+ \addtolength{\partopsep}{\baselineskip}
+ \setlength{\topsep}{0pt}
+ \setlength{\itemsep}{0.15\baselineskip}
+ \setlength{\parsep}{0pt}
+ \setlength{\leftmargin}{#1}}
+ \raggedright
+ }
+ {\endlist}
+}{}
diff --git a/doc/latex/sphinxhowto.cls b/doc/latex/sphinxhowto.cls
new file mode 100644
index 0000000..1ebdd43
--- /dev/null
+++ b/doc/latex/sphinxhowto.cls
@@ -0,0 +1,81 @@
+%
+% sphinxhowto.cls for Sphinx (http://sphinx.pocoo.org/)
+%
+
+\NeedsTeXFormat{LaTeX2e}[1995/12/01]
+\ProvidesClass{sphinxhowto}[2009/06/02 Document class (Sphinx HOWTO)]
+
+% 'oneside' option overriding the 'twoside' default
+\newif\if@oneside
+\DeclareOption{oneside}{\@onesidetrue}
+% Pass remaining document options to the parent class.
+\DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}}
+\ProcessOptions\relax
+
+% Default to two-side document
+\if@oneside
+% nothing to do (oneside is the default)
+\else
+\PassOptionsToClass{twoside}{\sphinxdocclass}
+\fi
+
+\LoadClass{\sphinxdocclass}
+
+% Set some sane defaults for section numbering depth and TOC depth. You can
+% reset these counters in your preamble.
+%
+\setcounter{secnumdepth}{2}
+
+% Change the title page to look a bit better, and fit in with the fncychap
+% ``Bjarne'' style a bit better.
+%
+\renewcommand{\maketitle}{
+ \rule{\textwidth}{1pt}
+ \ifsphinxpdfoutput
+ \begingroup
+ % These \defs are required to deal with multi-line authors; it
+ % changes \\ to ', ' (comma-space), making it pass muster for
+ % generating document info in the PDF file.
+ \def\\{, }
+ \def\and{and }
+ \pdfinfo{
+ /Author (\@author)
+ /Title (\@title)
+ }
+ \endgroup
+ \fi
+ \begin{flushright}
+ \sphinxlogo%
+ {\rm\Huge\py@HeaderFamily \@title} \par
+ {\em\large\py@HeaderFamily \py@release\releaseinfo} \par
+ \vspace{25pt}
+ {\Large\py@HeaderFamily
+ \begin{tabular}[t]{c}
+ \@author
+ \end{tabular}} \par
+ \vspace{25pt}
+ \@date \par
+ \py@authoraddress \par
+ \end{flushright}
+ \@thanks
+ \setcounter{footnote}{0}
+ \let\thanks\relax\let\maketitle\relax
+ %\gdef\@thanks{}\gdef\@author{}\gdef\@title{}
+}
+
+\let\py@OldTableofcontents=\tableofcontents
+\renewcommand{\tableofcontents}{
+ \begingroup
+ \parskip = 0mm
+ \py@OldTableofcontents
+ \endgroup
+ \rule{\textwidth}{1pt}
+ \vspace{12pt}
+}
+
+\@ifundefined{fancyhf}{
+ \pagestyle{plain}}{
+ \pagestyle{normal}} % start this way; change for
+\pagenumbering{arabic} % ToC & chapters
+
+\thispagestyle{empty}
diff --git a/doc/latex/sphinxmanual.cls b/doc/latex/sphinxmanual.cls
new file mode 100644
index 0000000..5751779
--- /dev/null
+++ b/doc/latex/sphinxmanual.cls
@@ -0,0 +1,122 @@
+%
+% sphinxmanual.cls for Sphinx (http://sphinx.pocoo.org/)
+%
+
+\NeedsTeXFormat{LaTeX2e}[1995/12/01]
+\ProvidesClass{sphinxmanual}[2009/06/02 Document class (Sphinx manual)]
+
+% chapters starting at odd pages (overridden by 'openany' document option)
+\PassOptionsToClass{openright}{\sphinxdocclass}
+
+% 'oneside' option overriding the 'twoside' default
+\newif\if@oneside
+\DeclareOption{oneside}{\@onesidetrue}
+% Pass remaining document options to the parent class.
+\DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}}
+\ProcessOptions\relax
+
+% Defaults two-side document
+\if@oneside
+% nothing to do (oneside is the default)
+\else
+\PassOptionsToClass{twoside}{\sphinxdocclass}
+\fi
+
+\LoadClass{\sphinxdocclass}
+
+% Set some sane defaults for section numbering depth and TOC depth. You can
+% reset these counters in your preamble.
+%
+\setcounter{secnumdepth}{2}
+\setcounter{tocdepth}{1}
+
+% Change the title page to look a bit better, and fit in with the fncychap
+% ``Bjarne'' style a bit better.
+%
+\renewcommand{\maketitle}{%
+ \begin{titlepage}%
+ \let\footnotesize\small
+ \let\footnoterule\relax
+ \rule{\textwidth}{1pt}%
+ \ifsphinxpdfoutput
+ \begingroup
+ % These \defs are required to deal with multi-line authors; it
+ % changes \\ to ', ' (comma-space), making it pass muster for
+ % generating document info in the PDF file.
+ \def\\{, }
+ \def\and{and }
+ \pdfinfo{
+ /Author (\@author)
+ /Title (\@title)
+ }
+ \endgroup
+ \fi
+ \begin{flushright}%
+ \sphinxlogo%
+ {\rm\Huge\py@HeaderFamily \@title \par}%
+ {\em\LARGE\py@HeaderFamily \py@release\releaseinfo \par}
+ \vfill
+ {\LARGE\py@HeaderFamily
+ \begin{tabular}[t]{c}
+ \@author
+ \end{tabular}
+ \par}
+ \vfill\vfill
+ {\large
+ \@date \par
+ \vfill
+ \py@authoraddress \par
+ }%
+ \end{flushright}%\par
+ \@thanks
+ \end{titlepage}%
+ \cleardoublepage%
+ \setcounter{footnote}{0}%
+ \let\thanks\relax\let\maketitle\relax
+ %\gdef\@thanks{}\gdef\@author{}\gdef\@title{}
+}
+
+
+% Catch the end of the {abstract} environment, but here make sure the abstract
+% is followed by a blank page if the 'openright' option is used.
+%
+\let\py@OldEndAbstract=\endabstract
+\renewcommand{\endabstract}{
+ \if@openright
+ \ifodd\value{page}
+ \typeout{Adding blank page after the abstract.}
+ \vfil\pagebreak
+ \fi
+ \fi
+ \py@OldEndAbstract
+}
+
+% This wraps the \tableofcontents macro with all the magic to get the spacing
+% right and have the right number of pages if the 'openright' option has been
+% used. This eliminates a fair amount of crud in the individual document files.
+%
+\let\py@OldTableofcontents=\tableofcontents
+\renewcommand{\tableofcontents}{%
+ \setcounter{page}{1}%
+ \pagebreak%
+ \pagestyle{plain}%
+ {%
+ \parskip = 0mm%
+ \py@OldTableofcontents%
+ \if@openright%
+ \ifodd\value{page}%
+ \typeout{Adding blank page after the table of contents.}%
+ \pagebreak\hspace{0pt}%
+ \fi%
+ \fi%
+ \cleardoublepage%
+ }%
+ \pagenumbering{arabic}%
+ \@ifundefined{fancyhf}{}{\pagestyle{normal}}%
+}
+
+% This is needed to get the width of the section # area wide enough in the
+% library reference. Doing it here keeps it the same for all the manuals.
+%
+\renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}}
+\renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}}
diff --git a/doc/latex/tabulary.sty b/doc/latex/tabulary.sty
new file mode 100644
index 0000000..ba83c0a
--- /dev/null
+++ b/doc/latex/tabulary.sty
@@ -0,0 +1,452 @@
+%%
+%% This is file `tabulary.sty',
+%% generated with the docstrip utility.
+%%
+%% The original source files were:
+%%
+%% tabulary.dtx (with options: `package')
+%% DRAFT VERSION
+%%
+%% File `tabulary.dtx'.
+%% Copyright (C) 1995 1996 2003 David Carlisle
+%% This file may be distributed under the terms of the LPPL.
+%% See 00readme.txt for details.
+%%
+\NeedsTeXFormat{LaTeX2e}
+\ProvidesPackage{tabulary}
+ [2007/10/02 v0.9 tabulary package (DPC)]
+\RequirePackage{array}
+\catcode`\Z=14
+\DeclareOption{debugshow}{\catcode`\Z=9\relax}
+\ProcessOptions
+\def\arraybackslash{\let\\=\@arraycr}
+\def\@finalstrut#1{%
+ \unskip\ifhmode\nobreak\fi\vrule\@width\z@\@height\z@\@depth\dp#1}
+\newcount\TY@count
+\def\tabulary{%
+ \let\TY@final\tabular
+ \let\endTY@final\endtabular
+ \TY@tabular}
+\def\TY@tabular#1{%
+ \edef\TY@{\@currenvir}%
+ {\ifnum0=`}\fi
+ \@ovxx\TY@linewidth
+ \@ovyy\TY@tablewidth
+ \count@\z@
+ \@tempswatrue
+ \@whilesw\if@tempswa\fi{%
+ \advance\count@\@ne
+ \expandafter\ifx\csname TY@F\the\count@\endcsname\relax
+ \@tempswafalse
+ \else
+ \expandafter\let\csname TY@SF\the\count@\expandafter\endcsname
+ \csname TY@F\the\count@\endcsname
+ \global\expandafter\let\csname TY@F\the\count@\endcsname\relax
+ \expandafter\let\csname TY@S\the\count@\expandafter\endcsname
+ \csname TY@\the\count@\endcsname
+ \fi}%
+ \global\TY@count\@ne
+ \TY@width\xdef{0pt}%
+ \global\TY@tablewidth\z@
+ \global\TY@linewidth#1\relax
+Z\message{^^J^^JTable^^J%
+Z Target Width: \the\TY@linewidth^^J%
+Z \string\tabcolsep: \the\tabcolsep\space
+Z \string\arrayrulewidth: \the\arrayrulewidth\space
+Z \string\doublerulesep: \the\doublerulesep^^J%
+Z \string\tymin: \the\tymin\space
+Z \string\tymax: \the\tymax^^J}%
+ \let\@classz\TY@classz
+ \let\verb\TX@verb
+ \toks@{}\TY@get@body}
+\let\TY@@mkpream\@mkpream
+\def\TY@mkpream{%
+ \def\@addamp{%
+ \if@firstamp \@firstampfalse \else
+ \global\advance\TY@count\@ne
+ \edef\@preamble{\@preamble &}\fi
+ \TY@width\xdef{0pt}}%
+ \def\@acol{%
+ \TY@subwidth\col@sep
+ \@addtopreamble{\hskip\col@sep}}%
+ \let\@arrayrule\TY@arrayrule
+ \let\@classvi\TY@classvi
+ \def\@classv{\save@decl
+ \expandafter\NC@ecs\@nextchar\extracolsep{}\extracolsep\@@@
+ \sbox\z@{\d@llarbegin\@nextchar\d@llarend}%
+ \TY@subwidth{\wd\z@}%
+ \@addtopreamble{\d@llarbegin\the@toks\the\count@\relax\d@llarend}%
+ \prepnext@tok}%
+ \global\let\@mkpream\TY@@mkpream
+ \TY@@mkpream}
+\def\TY@arrayrule{%
+ \TY@subwidth\arrayrulewidth
+ \@addtopreamble \vline}
+\def\TY@classvi{\ifcase \@lastchclass
+ \@acol \or
+ \TY@subwidth\doublerulesep
+ \@addtopreamble{\hskip \doublerulesep}\or
+ \@acol \or
+ \@classvii
+ \fi}
+\def\TY@tab{%
+ \setbox\z@\hbox\bgroup
+ \let\[$\let\]$%
+ \let\equation$\let\endequation$%
+ \col@sep\tabcolsep
+ \let\d@llarbegin\begingroup\let\d@llarend\endgroup
+ \let\@mkpream\TY@mkpream
+ \def\multicolumn##1##2##3{\multispan##1\relax}%
+ \CT@start\TY@tabarray}
+\def\TY@tabarray{\@ifnextchar[{\TY@array}{\@array[t]}}
+\def\TY@array[#1]{\@array[t]}
+\def\TY@width#1{%
+ \expandafter#1\csname TY@\the\TY@count\endcsname}
+\def\TY@subwidth#1{%
+ \TY@width\dimen@
+ \advance\dimen@-#1\relax
+ \TY@width\xdef{\the\dimen@}%
+ \global\advance\TY@linewidth-#1\relax}
+\def\endtabulary{%
+ \gdef\@halignto{}%
+ \let\TY@footnote\footnote%
+ \def\footnote{}% prevent footnotes from doing anything
+ \expandafter\TY@tab\the\toks@
+ \crcr\omit
+ {\xdef\TY@save@row{}%
+ \loop
+ \advance\TY@count\m@ne
+ \ifnum\TY@count>\z@
+ \xdef\TY@save@row{\TY@save@row&\omit}%
+ \repeat}\TY@save@row
+ \endarray\global\setbox1=\lastbox\setbox0=\vbox{\unvbox1
+ \unskip\global\setbox1=\lastbox}\egroup
+ \dimen@\TY@linewidth
+ \divide\dimen@\TY@count
+ \ifdim\dimen@<\tymin
+ \TY@warn{tymin too large (\the\tymin), resetting to \the\dimen@}%
+ \tymin\dimen@
+ \fi
+ \setbox\tw@=\hbox{\unhbox\@ne
+ \loop
+\@tempdima=\lastskip
+\ifdim\@tempdima>\z@
+Z \message{ecs=\the\@tempdima^^J}%
+ \global\advance\TY@linewidth-\@tempdima
+\fi
+ \unskip
+ \setbox\tw@=\lastbox
+ \ifhbox\tw@
+Z \message{Col \the\TY@count: Initial=\the\wd\tw@\space}%
+ \ifdim\wd\tw@>\tymax
+ \wd\tw@\tymax
+Z \message{> max\space}%
+Z \else
+Z \message{ \@spaces\space}%
+ \fi
+ \TY@width\dimen@
+Z \message{\the\dimen@\space}%
+ \advance\dimen@\wd\tw@
+Z \message{Final=\the\dimen@\space}%
+ \TY@width\xdef{\the\dimen@}%
+ \ifdim\dimen@<\tymin
+Z \message{< tymin}%
+ \global\advance\TY@linewidth-\dimen@
+ \expandafter\xdef\csname TY@F\the\TY@count\endcsname
+ {\the\dimen@}%
+ \else
+ \expandafter\ifx\csname TY@F\the\TY@count\endcsname\z@
+Z \message{***}%
+ \global\advance\TY@linewidth-\dimen@
+ \expandafter\xdef\csname TY@F\the\TY@count\endcsname
+ {\the\dimen@}%
+ \else
+Z \message{> tymin}%
+ \global\advance\TY@tablewidth\dimen@
+ \global\expandafter\let\csname TY@F\the\TY@count\endcsname
+ \maxdimen
+ \fi\fi
+ \advance\TY@count\m@ne
+ \repeat}%
+ \TY@checkmin
+ \TY@checkmin
+ \TY@checkmin
+ \TY@checkmin
+ \TY@count\z@
+ \let\TY@box\TY@box@v
+ \let\footnote\TY@footnote % restore footnotes
+ {\expandafter\TY@final\the\toks@\endTY@final}%
+ \count@\z@
+ \@tempswatrue
+ \@whilesw\if@tempswa\fi{%
+ \advance\count@\@ne
+ \expandafter\ifx\csname TY@SF\the\count@\endcsname\relax
+ \@tempswafalse
+ \else
+ \global\expandafter\let\csname TY@F\the\count@\expandafter\endcsname
+ \csname TY@SF\the\count@\endcsname
+ \global\expandafter\let\csname TY@\the\count@\expandafter\endcsname
+ \csname TY@S\the\count@\endcsname
+ \fi}%
+ \TY@linewidth\@ovxx
+ \TY@tablewidth\@ovyy
+ \ifnum0=`{\fi}}
+\def\TY@checkmin{%
+ \let\TY@checkmin\relax
+\ifdim\TY@tablewidth>\z@
+ \Gscale@div\TY@ratio\TY@linewidth\TY@tablewidth
+ \ifdim\TY@tablewidth <\linewidth
+ \def\TY@ratio{1}%
+ \fi
+\else
+ \TY@warn{No suitable columns!}%
+ \def\TY@ratio{1}%
+\fi
+\count@\z@
+Z \message{^^JLine Width: \the\TY@linewidth,
+Z Natural Width: \the\TY@tablewidth,
+Z Ratio: \TY@ratio^^J}%
+\@tempdima\z@
+\loop
+\ifnum\count@<\TY@count
+\advance\count@\@ne
+ \ifdim\csname TY@F\the\count@\endcsname>\tymin
+ \dimen@\csname TY@\the\count@\endcsname
+ \dimen@\TY@ratio\dimen@
+ \ifdim\dimen@<\tymin
+Z \message{Column \the\count@\space ->}%
+ \global\expandafter\let\csname TY@F\the\count@\endcsname\tymin
+ \global\advance\TY@linewidth-\tymin
+ \global\advance\TY@tablewidth-\csname TY@\the\count@\endcsname
+ \let\TY@checkmin\TY@@checkmin
+ \else
+ \expandafter\xdef\csname TY@F\the\count@\endcsname{\the\dimen@}%
+ \advance\@tempdima\csname TY@F\the\count@\endcsname
+ \fi
+ \fi
+Z \dimen@\csname TY@F\the\count@\endcsname\message{\the\dimen@, }%
+\repeat
+Z \message{^^JTotal:\the\@tempdima^^J}%
+}
+\let\TY@@checkmin\TY@checkmin
+\newdimen\TY@linewidth
+\def\tyformat{\everypar{{\nobreak\hskip\z@skip}}}
+\newdimen\tymin
+\tymin=10pt
+\newdimen\tymax
+\tymax=2\textwidth
+\def\@testpach{\@chclass
+ \ifnum \@lastchclass=6 \@ne \@chnum \@ne \else
+ \ifnum \@lastchclass=7 5 \else
+ \ifnum \@lastchclass=8 \tw@ \else
+ \ifnum \@lastchclass=9 \thr@@
+ \else \z@
+ \ifnum \@lastchclass = 10 \else
+ \edef\@nextchar{\expandafter\string\@nextchar}%
+ \@chnum
+ \if \@nextchar c\z@ \else
+ \if \@nextchar l\@ne \else
+ \if \@nextchar r\tw@ \else
+ \if \@nextchar C7 \else
+ \if \@nextchar L8 \else
+ \if \@nextchar R9 \else
+ \if \@nextchar J10 \else
+ \z@ \@chclass
+ \if\@nextchar |\@ne \else
+ \if \@nextchar !6 \else
+ \if \@nextchar @7 \else
+ \if \@nextchar <8 \else
+ \if \@nextchar >9 \else
+ 10
+ \@chnum
+ \if \@nextchar m\thr@@\else
+ \if \@nextchar p4 \else
+ \if \@nextchar b5 \else
+ \z@ \@chclass \z@ \@preamerr \z@ \fi \fi \fi \fi\fi \fi \fi\fi \fi
+ \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi \fi}
+\def\TY@classz{%
+ \@classx
+ \@tempcnta\count@
+ \ifx\TY@box\TY@box@v
+ \global\advance\TY@count\@ne
+ \fi
+ \let\centering c%
+ \let\raggedright\noindent
+ \let\raggedleft\indent
+ \let\arraybackslash\relax
+ \prepnext@tok
+ \ifnum\@chnum<4
+ \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@
+ \fi
+ \ifnum\@chnum=6
+ \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@
+ \fi
+ \@addtopreamble{%
+ \ifcase\@chnum
+ \hfil \d@llarbegin\insert@column\d@llarend \hfil \or
+ \kern\z@
+ \d@llarbegin \insert@column \d@llarend \hfil \or
+ \hfil\kern\z@ \d@llarbegin \insert@column \d@llarend \or
+ $\vcenter\@startpbox{\@nextchar}\insert@column \@endpbox $\or
+ \vtop \@startpbox{\@nextchar}\insert@column \@endpbox \or
+ \vbox \@startpbox{\@nextchar}\insert@column \@endpbox \or
+ \d@llarbegin \insert@column \d@llarend \or% dubious "s" case
+ \TY@box\centering\or
+ \TY@box\raggedright\or
+ \TY@box\raggedleft\or
+ \TY@box\relax
+ \fi}\prepnext@tok}
+\def\TY@box#1{%
+ \ifx\centering#1%
+ \hfil \d@llarbegin\insert@column\d@llarend \hfil \else
+ \ifx\raggedright#1%
+ \kern\z@%<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ \d@llarbegin \insert@column \d@llarend \hfil \else
+ \ifx\raggedleft#1%
+ \hfil\kern\z@ \d@llarbegin \insert@column \d@llarend \else
+ \ifx\relax#1%
+ \d@llarbegin \insert@column \d@llarend
+ \fi \fi \fi \fi}
+\def\TY@box@v#1{%
+ \vtop \@startpbox{\csname TY@F\the\TY@count\endcsname}%
+ #1\arraybackslash\tyformat
+ \insert@column\@endpbox}
+\newdimen\TY@tablewidth
+\def\Gscale@div#1#2#3{%
+ \setlength\dimen@{#3}%
+ \ifdim\dimen@=\z@
+ \PackageError{graphics}{Division by 0}\@eha
+ \dimen@#2%
+ \fi
+ \edef\@tempd{\the\dimen@}%
+ \setlength\dimen@{#2}%
+ \count@65536\relax
+ \ifdim\dimen@<\z@
+ \dimen@-\dimen@
+ \count@-\count@
+ \fi
+ \loop
+ \ifdim\dimen@<8192\p@
+ \dimen@\tw@\dimen@
+ \divide\count@\tw@
+ \repeat
+ \dimen@ii=\@tempd\relax
+ \divide\dimen@ii\count@
+ \divide\dimen@\dimen@ii
+ \edef#1{\strip@pt\dimen@}}
+\long\def\TY@get@body#1\end
+ {\toks@\expandafter{\the\toks@#1}\TY@find@end}
+\def\TY@find@end#1{%
+ \def\@tempa{#1}%
+ \ifx\@tempa\TY@\def\@tempa{\end{#1}}\expandafter\@tempa
+ \else\toks@\expandafter
+ {\the\toks@\end{#1}}\expandafter\TY@get@body\fi}
+\def\TY@warn{%
+ \PackageWarning{tabulary}}
+\catcode`\Z=11
+\AtBeginDocument{
+\@ifpackageloaded{colortbl}{%
+\expandafter\def\expandafter\@mkpream\expandafter#\expandafter1%
+ \expandafter{%
+ \expandafter\let\expandafter\CT@setup\expandafter\relax
+ \expandafter\let\expandafter\CT@color\expandafter\relax
+ \expandafter\let\expandafter\CT@do@color\expandafter\relax
+ \expandafter\let\expandafter\color\expandafter\relax
+ \expandafter\let\expandafter\CT@column@color\expandafter\relax
+ \expandafter\let\expandafter\CT@row@color\expandafter\relax
+ \@mkpream{#1}}
+\let\TY@@mkpream\@mkpream
+\def\TY@classz{%
+ \@classx
+ \@tempcnta\count@
+ \ifx\TY@box\TY@box@v
+ \global\advance\TY@count\@ne
+ \fi
+ \let\centering c%
+ \let\raggedright\noindent
+ \let\raggedleft\indent
+ \let\arraybackslash\relax
+ \prepnext@tok
+\expandafter\CT@extract\the\toks\@tempcnta\columncolor!\@nil
+ \ifnum\@chnum<4
+ \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@
+ \fi
+ \ifnum\@chnum=6
+ \global\expandafter\let\csname TY@F\the\TY@count\endcsname\z@
+ \fi
+ \@addtopreamble{%
+ \setbox\z@\hbox\bgroup\bgroup
+ \ifcase\@chnum
+ \hskip\stretch{.5}\kern\z@
+ \d@llarbegin\insert@column\d@llarend\hskip\stretch{.5}\or
+ \kern\z@%<<<<<<<<<<<<<<<<<<<<<<<<<<<
+ \d@llarbegin \insert@column \d@llarend \hfill \or
+ \hfill\kern\z@ \d@llarbegin \insert@column \d@llarend \or
+ $\vcenter\@startpbox{\@nextchar}\insert@column \@endpbox $\or
+ \vtop \@startpbox{\@nextchar}\insert@column \@endpbox \or
+ \vbox \@startpbox{\@nextchar}\insert@column \@endpbox \or
+ \d@llarbegin \insert@column \d@llarend \or% dubious s case
+ \TY@box\centering\or
+ \TY@box\raggedright\or
+ \TY@box\raggedleft\or
+ \TY@box\relax
+ \fi
+ \egroup\egroup
+\begingroup
+ \CT@setup
+ \CT@column@color
+ \CT@row@color
+ \CT@do@color
+\endgroup
+ \@tempdima\ht\z@
+ \advance\@tempdima\minrowclearance
+ \vrule\@height\@tempdima\@width\z@
+\unhbox\z@
+}\prepnext@tok}%
+ \def\TY@arrayrule{%
+ \TY@subwidth\arrayrulewidth
+ \@addtopreamble{{\CT@arc@\vline}}}%
+ \def\TY@classvi{\ifcase \@lastchclass
+ \@acol \or
+ \TY@subwidth\doublerulesep
+ \ifx\CT@drsc@\relax
+ \@addtopreamble{\hskip\doublerulesep}%
+ \else
+ \@addtopreamble{{\CT@drsc@\vrule\@width\doublerulesep}}%
+ \fi\or
+ \@acol \or
+ \@classvii
+ \fi}%
+}{%
+\let\CT@start\relax
+}
+}
+{\uccode`\*=`\ %
+\uppercase{\gdef\TX@verb{%
+ \leavevmode\null\TX@vwarn
+ {\ifnum0=`}\fi\ttfamily\let\\\ignorespaces
+ \@ifstar{\let~*\TX@vb}{\TX@vb}}}}
+\def\TX@vb#1{\def\@tempa##1#1{\toks@{##1}\edef\@tempa{\the\toks@}%
+ \expandafter\TX@v\meaning\@tempa\\ \\\ifnum0=`{\fi}}\@tempa!}
+\def\TX@v#1!{\afterassignment\TX@vfirst\let\@tempa= }
+\begingroup
+\catcode`\*=\catcode`\#
+\catcode`\#=12
+\gdef\TX@vfirst{%
+ \if\@tempa#%
+ \def\@tempb{\TX@v@#}%
+ \else
+ \let\@tempb\TX@v@
+ \if\@tempa\space~\else\@tempa\fi
+ \fi
+ \@tempb}
+\gdef\TX@v@*1 *2{%
+ \TX@v@hash*1##\relax\if*2\\\else~\expandafter\TX@v@\fi*2}
+\gdef\TX@v@hash*1##*2{*1\ifx*2\relax\else#\expandafter\TX@v@hash\fi*2}
+\endgroup
+\def\TX@vwarn{%
+ \@warning{\noexpand\verb may be unreliable inside tabularx/y}%
+ \global\let\TX@vwarn\@empty}
+\endinput
+%%
+%% End of file `tabulary.sty'.
diff --git a/doc/man/fsck.s3ql.1 b/doc/man/fsck.s3ql.1
new file mode 100644
index 0000000..e6c786d
--- /dev/null
+++ b/doc/man/fsck.s3ql.1
@@ -0,0 +1,137 @@
+.TH "FSCK.S3QL" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+fsck.s3ql \- Check an S3QL file system for errors
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+fsck.s3ql [options] <storage url>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBmkfs.s3ql\fP command checks the new file system in the location
+specified by \fIstorage url\fP for errors and attempts to repair any
+problems.
+.sp
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+.SS Amazon S3
+.sp
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \fBs3://<bucketname>\fP. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+.SS Local
+.sp
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\fBlocal://<path>\fP. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \fBlocal:///var/archive\fP.
+.SS SFTP
+.sp
+The storage URL for SFTP connections has the form
+.sp
+.nf
+.ft C
+sftp://<host>[:port]/<path>
+.ft P
+.fi
+.SH OPTIONS
+.sp
+The \fBmkfs.s3ql\fP command accepts the following options.
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.BI \-\-homedir \ <path>
+.
+Directory for log files, cache and authentication info.
+(default: \fB~/.s3ql)\fP
+.TP
+.BI \-\-debug \ <module>
+.
+activate debugging output from <module>. Use \fBall\fP to get
+debug messages from all modules. This option can be
+specified multiple times.
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.TP
+.B \-\-ssl
+.
+Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext even
+for unencrypted file systems.
+.TP
+.B \-\-batch
+.
+If user input is required, exit without prompting.
+.TP
+.B \-\-force
+.
+Force checking even if file system is marked clean.
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH FILES
+.sp
+Authentication data for backends and bucket encryption passphrases are
+read from \fBauthinfo\fP in \fB~/.s3ql\fP or the directory
+specified with \fB\-\-homedir\fP. Log files are placed in the same
+directory.
+.SH EXIT STATUS
+.sp
+\fBmkfs.s3ql\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/mkfs.s3ql.1 b/doc/man/mkfs.s3ql.1
new file mode 100644
index 0000000..dc57add
--- /dev/null
+++ b/doc/man/mkfs.s3ql.1
@@ -0,0 +1,150 @@
+.TH "MKFS.S3QL" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+mkfs.s3ql \- Create an S3QL file system
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+mkfs.s3ql [options] <storage url>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBmkfs.s3ql\fP command creates a new file system in the location
+specified by \fIstorage url\fP.
+.sp
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+.SS Amazon S3
+.sp
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \fBs3://<bucketname>\fP. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+.SS Local
+.sp
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\fBlocal://<path>\fP. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \fBlocal:///var/archive\fP.
+.SS SFTP
+.sp
+The storage URL for SFTP connections has the form
+.sp
+.nf
+.ft C
+sftp://<host>[:port]/<path>
+.ft P
+.fi
+.SH OPTIONS
+.sp
+The \fBmkfs.s3ql\fP command accepts the following options.
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.BI \-\-homedir \ <path>
+.
+Directory for log files, cache and authentication
+info. (default: \fB~/.s3ql)\fP
+.TP
+.BI \-\-debug \ <module>
+.
+activate debugging output from <module>. Use \fBall\fP to
+get debug messages from all modules. This option can
+be specified multiple times.
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.TP
+.B \-\-ssl
+.
+Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+.TP
+.BI \-\-s3\-location \ <name>
+.
+Storage location for new S3 buckets. Allowed values:
+\fBEU\fP, \fBus\-west\-1\fP, \fBap\-southeast\-1\fP, or \fBus\-standard\fP.
+(default: EU)
+.TP
+.BI \-L \ <name>
+.
+Filesystem label
+.TP
+.BI \-\-blocksize \ <size>
+.
+Maximum block size in KB (default: 10240)
+.TP
+.B \-\-plain
+.
+Create unencrypted file system.
+.TP
+.B \-\-force
+.
+Overwrite any existing data.
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH FILES
+.sp
+Authentication data for backends and bucket encryption passphrases are
+read from \fBauthinfo\fP in \fB~/.s3ql\fP or the directory
+specified with \fB\-\-homedir\fP. Log files are placed in the same
+directory.
+.SH EXIT STATUS
+.sp
+\fBmkfs.s3ql\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/mount.s3ql.1 b/doc/man/mount.s3ql.1
new file mode 100644
index 0000000..fa20a20
--- /dev/null
+++ b/doc/man/mount.s3ql.1
@@ -0,0 +1,195 @@
+.TH "MOUNT.S3QL" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+mount.s3ql \- Mount an S3QL file system
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+mount.s3ql [options] <storage url> <mount point>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBmount.s3ql\fP command mounts the S3QL file system stored in \fIstorage
+url\fP in the directory \fImount point\fP.
+.sp
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+.SS Amazon S3
+.sp
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \fBs3://<bucketname>\fP. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+.SS Local
+.sp
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\fBlocal://<path>\fP. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \fBlocal:///var/archive\fP.
+.SS SFTP
+.sp
+The storage URL for SFTP connections has the form
+.sp
+.nf
+.ft C
+sftp://<host>[:port]/<path>
+.ft P
+.fi
+.SH OPTIONS
+.sp
+The \fBmount.s3ql\fP command accepts the following options.
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.BI \-\-homedir \ <path>
+.
+Directory for log files, cache and authentication
+info. (default: \fB~/.s3ql)\fP
+.TP
+.BI \-\-debug \ <module>
+.
+activate debugging output from <module>. Use \fBall\fP to
+get debug messages from all modules. This option can
+be specified multiple times.
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.TP
+.B \-\-ssl
+.
+Use SSL when connecting to remote servers. This option
+is not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+.TP
+.BI \-\-cachesize \ <size>
+.
+Cache size in kb (default: 102400 (100 MB)). Should be
+at least 10 times the blocksize of the filesystem,
+otherwise an object may be retrieved and written
+several times during a single write() or read()
+operation.
+.TP
+.BI \-\-max\-cache\-entries \ <num>
+.
+Maximum number of entries in cache (default: 768).
+Each cache entry requires one file descriptor, so if
+you increase this number you have to make sure that
+your process file descriptor limit (as set with
+\fBulimit \-n\fP) is high enough (at least the number of
+cache entries + 100).
+.TP
+.B \-\-allow\-other
+.
+Normally, only the user who called \fBmount.s3ql\fP can
+access the mount point. This user then also has full
+access to it, independent of individual file
+permissions. If the \fB\-\-allow\-other\fP option is
+specified, other users can access the mount point as
+well and individual file permissions are taken into
+account for all users.
+.TP
+.B \-\-allow\-root
+.
+Like \fB\-\-allow\-other\fP, but restrict access to the
+mounting user and the root user.
+.TP
+.B \-\-fg
+.
+Do not daemonize, stay in foreground
+.TP
+.B \-\-single
+.
+Run in single threaded mode. If you don\(aqt understand
+this, then you don\(aqt need it.
+.TP
+.B \-\-upstart
+.
+Stay in foreground and raise SIGSTOP once mountpoint
+is up.
+.TP
+.B \-\-profile
+.
+Create profiling information. If you don\(aqt understand
+this, then you don\(aqt need it.
+.TP
+.BI \-\-compress \ <name>
+.
+Compression algorithm to use when storing new data.
+Allowed values: \fBlzma\fP, \fBbzip2\fP, \fBzlib\fP, none.
+(default: \fBlzma\fP)
+.TP
+.BI \-\-metadata\-upload\-interval \ <seconds>
+.
+Interval in seconds between complete metadata uploads.
+Set to 0 to disable. Default: 24h.
+.TP
+.BI \-\-compression\-threads \ <no>
+.
+Number of parallel compression and encryption threads
+to use (default: 1).
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH FILES
+.sp
+Authentication data for backends and bucket encryption passphrases are
+read from \fBauthinfo\fP in \fB~/.s3ql\fP or the directory
+specified with \fB\-\-homedir\fP. Log files are placed in the same
+directory.
+.SH EXIT STATUS
+.sp
+\fBmount.s3ql\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/s3qladm.1 b/doc/man/s3qladm.1
new file mode 100644
index 0000000..24cb81a
--- /dev/null
+++ b/doc/man/s3qladm.1
@@ -0,0 +1,156 @@
+.TH "S3QLADM" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+s3qladm \- Manage S3QL buckets
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+s3qladm [options] <action> <storage url>
+.ft P
+.fi
+.sp
+where \fBaction\fP may be either of \fBpassphrase\fP,
+\fBupgrade\fP, \fBdelete\fP or \fBdownload\-metadata\fP.
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBs3qladm\fP command performs various operations on S3QL buckets.
+The file system contained in the bucket \fImust not be mounted\fP when
+using \fBs3qladm\fP or things will go wrong badly.
+.sp
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+.SS Amazon S3
+.sp
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form \fBs3://<bucketname>\fP. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+.SS Local
+.sp
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+\fBlocal://<path>\fP. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. \fBlocal:///var/archive\fP.
+.SS SFTP
+.sp
+The storage URL for SFTP connections has the form
+.sp
+.nf
+.ft C
+sftp://<host>[:port]/<path>
+.ft P
+.fi
+.SH OPTIONS
+.sp
+The \fBs3qladm\fP command accepts the following options.
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.BI \-\-debug \ <module>
+.
+activate debugging output from <module>. Use \fBall\fP to get
+debug messages from all modules. This option can be
+specified multiple times.
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.BI \-\-homedir \ <path>
+.
+Directory for log files, cache and authentication info.
+(default: \fB~/.s3ql)\fP
+.TP
+.B \-\-version
+.
+just print program version and exit
+.TP
+.B \-\-ssl
+.
+Use SSL when connecting to remote servers. This option is
+not enabled by default, because for encrypted file
+systems, all data is already encrypted anyway, and
+authentication data is never transmitted in plaintext
+even for unencrypted file systems.
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.sp
+Hint: run \fBs3qladm <action> \-\-help\fP to get help on the additional
+arguments that the different actions take.
+.SH ACTIONS
+.sp
+The following actions may be specified:
+.INDENT 0.0
+.TP
+.B passphrase
+.
+Changes the encryption passphrase of the bucket.
+.TP
+.B upgrade
+.
+Upgrade the file system contained in the bucket to the newest revision.
+.TP
+.B delete
+.
+Delete the bucket and all its contents.
+.TP
+.B download\-metadata
+.
+Interactively download backups of the file system metadata.
+.UNINDENT
+.SH FILES
+.sp
+Authentication data for backends and bucket encryption passphrases are
+read from \fBauthinfo\fP in \fB~/.s3ql\fP or the directory
+specified with \fB\-\-homedir\fP. Log files are placed in the same
+directory.
+.SH EXIT STATUS
+.sp
+\fBs3qladm\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/s3qlcp.1 b/doc/man/s3qlcp.1
new file mode 100644
index 0000000..b2bc8b2
--- /dev/null
+++ b/doc/man/s3qlcp.1
@@ -0,0 +1,146 @@
+.TH "S3QLCP" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+s3qlcp \- Copy-on-write replication on S3QL file systems
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+s3qlcp [options] <source\-dir> <dest\-dir>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBs3qlcp\fP command duplicates the directory tree \fBsource\-dir\fP
+into \fBdest\-dir\fP without physically copying the file contents.
+Both source and destination must lie inside the same S3QL file system.
+.sp
+The replication will not take any additional space. Only if one of
+directories is modified later on, the modified data will take
+additional storage space.
+.sp
+\fBs3qlcp\fP can only be called by the user that mounted the file system
+and (if the file system was mounted with \fB\-\-allow\-other\fP or \fB\-\-allow\-root\fP)
+the root user. This limitation might be removed in the future (see \fI\%issue 155\fP).
+.sp
+Note that:
+.INDENT 0.0
+.IP \(bu 2
+.
+After the replication, both source and target directory will still
+be completely ordinary directories. You can regard \fB<src>\fP as a
+snapshot of \fB<target>\fP or vice versa. However, the most common
+usage of \fBs3qlcp\fP is to regularly duplicate the same source
+directory, say \fBdocuments\fP, to different target directories. For a
+e.g. monthly replication, the target directories would typically be
+named something like \fBdocuments_Januray\fP for the replication in
+January, \fBdocuments_February\fP for the replication in February etc.
+In this case it is clear that the target directories should be
+regarded as snapshots of the source directory.
+.IP \(bu 2
+.
+Exactly the same effect could be achieved by an ordinary copy
+program like \fBcp \-a\fP. However, this procedure would be orders of
+magnitude slower, because \fBcp\fP would have to read every file
+completely (so that S3QL had to fetch all the data over the network
+from the backend) before writing them into the destination folder.
+.IP \(bu 2
+.
+Before starting with the replication, S3QL has to flush the local
+cache. So if you just copied lots of new data into the file system
+that has not yet been uploaded, replication will take longer than
+usual.
+.UNINDENT
+.SS Snapshotting vs Hardlinking
+.sp
+Snapshot support in S3QL is inspired by the hardlinking feature that
+is offered by programs like \fI\%rsync\fP or
+\fI\%storeBackup\fP.
+These programs can create a hardlink instead of copying a file if an
+identical file already exists in the backup. However, using hardlinks
+has two large disadvantages:
+.INDENT 0.0
+.IP \(bu 2
+.
+backups and restores always have to be made with a special program
+that takes care of the hardlinking. The backup must not be touched
+by any other programs (they may make changes that inadvertently
+affect other hardlinked files)
+.IP \(bu 2
+.
+special care needs to be taken to handle files which are already
+hardlinked (the restore program needs to know that the hardlink was
+not just introduced by the backup program to safe space)
+.UNINDENT
+.sp
+S3QL snapshots do not have these problems, and they can be used with
+any backup program.
+.SH OPTIONS
+.sp
+The \fBs3qlcp\fP command accepts the following options:
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH EXIT STATUS
+.sp
+\fBs3qlcp\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/s3qlctrl.1 b/doc/man/s3qlctrl.1
new file mode 100644
index 0000000..f8c0d56
--- /dev/null
+++ b/doc/man/s3qlctrl.1
@@ -0,0 +1,133 @@
+.TH "S3QLCTRL" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+s3qlctrl \- Control a mounted S3QL file system
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+s3qlctrl [options] <action> <mountpoint> ...
+.ft P
+.fi
+.sp
+where \fBaction\fP may be either of \fBflushcache\fP,
+\fBupload\-meta\fP, \fBcachesize\fP or
+\fBlog\-metadata\fP.
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBs3qlctrl\fP command performs various actions on the S3QL file system mounted
+in \fBmountpoint\fP.
+.sp
+The following actions may be specified:
+.INDENT 0.0
+.TP
+.B flushcache
+.
+Uploads all changed file data to the backend.
+.TP
+.B upload\-meta
+.
+Upload metadata to the backend. All file system operations will
+block while a snapshot of the metadata is prepared for upload.
+.TP
+.B cachesize
+.
+Changes the cache size of the file system. This action requires an
+additional argument that specifies the new cache size in kB, so the
+complete command line is:
+.sp
+.nf
+.ft C
+s3qlctrl [options] cachesize <mountpoint> <new\-cache\-size>
+.ft P
+.fi
+.TP
+.B log
+.
+Change the amount of information that is logged into
+\fB~/.s3ql/mount.log\fP file. The complete syntax is:
+.sp
+.nf
+.ft C
+s3qlctrl [options] log <mountpoint> <level> [<module> [<module> ...]]
+.ft P
+.fi
+.sp
+here \fBlevel\fP is the desired new log level and may be either of
+\fIdebug\fP, \fIinfo\fP or \fIwarn\fP. One or more \fBmodule\fP may only be
+specified with the \fIdebug\fP level and allow to restrict the debug
+output to just the listed modules.
+.UNINDENT
+.SH OPTIONS
+.sp
+The \fBs3qlctrl\fP command also accepts the following options, no matter
+what specific action is being invoked:
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.sp
+Hint: run \fBs3qlctrl <action> \-\-help\fP to get help on the additional
+arguments that the different actions take.
+.SH EXIT STATUS
+.sp
+\fBs3qlctrl\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/s3qllock.1 b/doc/man/s3qllock.1
new file mode 100644
index 0000000..5892a46
--- /dev/null
+++ b/doc/man/s3qllock.1
@@ -0,0 +1,118 @@
+.TH "S3QLLOCK" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+s3qllock \- Make trees on an S3QL file system immutable
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+s3qllock [options] <directory>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBs3qllock\fP command makes a directory tree in an S3QL file
+system immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the \fBs3qlrm\fP command.
+.SH RATIONALE
+.sp
+Immutability is a feature designed for backups. Traditionally, backups
+have been made on external tape drives. Once a backup was made, the
+tape drive was removed and locked somewhere in a shelf. This has the
+great advantage that the contents of the backup are now permanently
+fixed. Nothing (short of physical destruction) can change or delete
+files in the backup.
+.sp
+In contrast, when backing up into an online storage system like S3QL,
+all backups are available every time the file system is mounted.
+Nothing prevents a file in an old backup from being changed again
+later on. In the worst case, this may make your entire backup system
+worthless. Imagine that your system gets infected by a nasty virus
+that simply deletes all files it can find \-\- if the virus is active
+while the backup file system is mounted, the virus will destroy all
+your old backups as well!
+.sp
+Even if the possibility of a malicious virus or trojan horse is
+excluded, being able to change a backup after it has been made is
+generally not a good idea. A common S3QL use case is to keep the file
+system mounted at all times and periodically create backups with
+\fBrsync \-a\fP. This allows every user to recover her files from a
+backup without having to call the system administrator. However, this
+also allows every user to accidentally change or delete files \fIin\fP one
+of the old backups.
+.sp
+Making a backup immutable protects you against all these problems.
+Unless you happen to run into a virus that was specifically programmed
+to attack S3QL file systems, backups can be neither deleted nor
+changed after they have been made immutable.
+.SH OPTIONS
+.sp
+The \fBs3qllock\fP command accepts the following options:
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH EXIT STATUS
+.sp
+\fBs3qllock\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/s3qlrm.1 b/doc/man/s3qlrm.1
new file mode 100644
index 0000000..66af40f
--- /dev/null
+++ b/doc/man/s3qlrm.1
@@ -0,0 +1,90 @@
+.TH "S3QLRM" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+s3qlrm \- Fast tree removal on S3QL file systems
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+s3qlrm [options] <directory>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBs3qlrm\fP command recursively deletes files and directories on an
+S3QL file system. Although \fBs3qlrm\fP is faster than using e.g.
+\fBrm \-r\(ga\fP, the main reason for its existence is that it allows
+you to delete immutable trees (which can be created with
+\fBs3qllock\fP) as well.
+.sp
+Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.
+.SH OPTIONS
+.sp
+The \fBs3qlrm\fP command accepts the following options:
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH EXIT STATUS
+.sp
+\fBs3qlrm\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/s3qlstat.1 b/doc/man/s3qlstat.1
new file mode 100644
index 0000000..b9ce4f6
--- /dev/null
+++ b/doc/man/s3qlstat.1
@@ -0,0 +1,89 @@
+.TH "S3QLSTAT" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+s3qlstat \- Gather S3QL file system statistics
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+s3qlstat [options] <mountpoint>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBs3qlstat\fP command prints statistics about the S3QL file system mounted
+at \fBmountpoint\fP.
+.sp
+\fBs3qlstat\fP can only be called by the user that mounted the file system
+and (if the file system was mounted with \fB\-\-allow\-other\fP or
+\fB\-\-allow\-root\fP) the root user. This limitation might be
+removed in the future (see \fI\%issue 155\fP).
+.SH OPTIONS
+.sp
+The \fBs3qlstat\fP command accepts the following options:
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH EXIT STATUS
+.sp
+\fBs3qlstat\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/man/umount.s3ql.1 b/doc/man/umount.s3ql.1
new file mode 100644
index 0000000..b1ea3e8
--- /dev/null
+++ b/doc/man/umount.s3ql.1
@@ -0,0 +1,100 @@
+.TH "UMOUNT.S3QL" "1" "May 20, 2011" "1.0.1" "S3QL"
+.SH NAME
+umount.s3ql \- Unmount an S3QL file system
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.\" Man page generated from reStructeredText.
+.
+.SH SYNOPSIS
+.sp
+.nf
+.ft C
+umount.s3ql [options] <mount point>
+.ft P
+.fi
+.SH DESCRIPTION
+.sp
+S3QL is a file system for online data storage. Before using S3QL, make
+sure to consult the full documentation (rather than just the man pages
+which only briefly document the available userspace commands).
+.sp
+The \fBumount.s3ql\fP command unmounts the S3QL file system mounted in the
+directory \fImount point\fP and blocks until all data has been uploaded to
+the storage backend.
+.sp
+Only the user who mounted the file system with \fBmount.s3ql\fP
+is able to unmount it with \fBumount.s3ql\fP. If you are root and want to
+unmount an S3QL file system mounted by an ordinary user, you have to
+use the \fBfusermount \-u\fP or \fBumount\fP command instead.
+Note that these commands do not block until all data has been
+uploaded, so if you use them instead of \fBumount.s3ql\fP then
+you should manually wait for the \fBmount.s3ql\fP process to
+terminate before shutting down the system.
+.SH OPTIONS
+.sp
+The \fBumount.s3ql\fP command accepts the following options.
+.INDENT 0.0
+.INDENT 3.5
+.INDENT 0.0
+.TP
+.B \-\-debug
+.
+activate debugging output
+.TP
+.B \-\-quiet
+.
+be really quiet
+.TP
+.B \-\-version
+.
+just print program version and exit
+.TP
+.B \-\-lazy, \-z
+.
+Lazy umount. Detaches the file system immediately, even if
+there are still open files. The data will be uploaded in the
+background once all open files have been closed.
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH EXIT STATUS
+.sp
+\fBumount.s3ql\fP returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+.SH SEE ALSO
+.sp
+The S3QL homepage is at \fI\%http://code.google.com/p/s3ql/\fP.
+.sp
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are \fB/usr/share/doc/s3ql\fP or
+\fB/usr/local/doc/s3ql\fP.
+.SH COPYRIGHT
+2008-2011, Nikolaus Rath
+.\" Generated by docutils manpage writer.
+.\"
+.
diff --git a/doc/manual.pdf b/doc/manual.pdf
new file mode 100644
index 0000000..0553294
--- /dev/null
+++ b/doc/manual.pdf
Binary files differ
diff --git a/rst/_static/sphinxdoc.css b/rst/_static/sphinxdoc.css
new file mode 100644
index 0000000..38ca95a
--- /dev/null
+++ b/rst/_static/sphinxdoc.css
@@ -0,0 +1,340 @@
+/**
+ * Sphinx stylesheet -- sphinxdoc theme
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ letter-spacing: -0.01em;
+ line-height: 150%;
+ text-align: center;
+ background-color: #BFD1D4;
+ color: black;
+ padding: 0;
+ border: 1px solid #aaa;
+
+ margin: 0px 80px 0px 80px;
+ min-width: 740px;
+}
+
+div.document {
+ background-color: white;
+ text-align: left;
+ background-image: url(contents.png);
+ background-repeat: repeat-x;
+}
+
+div.bodywrapper {
+ margin: 0 240px 0 0;
+ border-right: 1px solid #ccc;
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+ font-size: 1em;
+}
+
+div.related ul {
+ background-image: url(navigation.png);
+ height: 2em;
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+}
+
+div.related ul li {
+ margin: 0;
+ padding: 0;
+ height: 2em;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: #EE9816;
+}
+
+div.related ul li a:hover {
+ color: #3CA8E7;
+}
+
+div.sphinxsidebarwrapper {
+ padding: 0;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0.5em 15px 15px 0;
+ width: 210px;
+ float: right;
+ font-size: 1em;
+ text-align: left;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin: 1em 0 0.5em 0;
+ font-size: 1em;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border: 1px solid #86989B;
+ background-color: #AFC1C4;
+}
+
+div.sphinxsidebar h3 a {
+ color: white;
+}
+
+div.sphinxsidebar ul {
+ padding-left: 1.5em;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+}
+
+div.footer {
+ background-color: #E3EFF1;
+ color: #86989B;
+ padding: 3px 8px 3px 0;
+ clear: both;
+ font-size: 0.8em;
+ text-align: right;
+}
+
+div.footer a {
+ color: #86989B;
+ text-decoration: underline;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: #CA7900;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #2491CF;
+}
+
+/* div.body a {
+ text-decoration: underline;
+} */
+
+h1 {
+ margin: 0;
+ padding: 0.7em 0 0.3em 0;
+ font-size: 1.5em;
+ color: #11557C;
+}
+
+h2 {
+ margin: 1.3em 0 0.2em 0;
+ font-size: 1.35em;
+ padding: 0;
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.1em;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+ display: none;
+ margin: 0 0 0 0.3em;
+ padding: 0 0.2em 0 0.2em;
+ color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+ display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+ color: #777;
+ background-color: #eee;
+}
+
+a.headerlink {
+ color: #c60f0f!important;
+ font-size: 1em;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none!important;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'Deja Vu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.01em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border-bottom: 1px solid #ddd;
+ color: #333;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+a tt {
+ border: 0;
+ color: #CA7900;
+}
+
+a tt:hover {
+ color: #2491CF;
+}
+
+pre {
+ font-family: 'Deja Vu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 2px 7px;
+ border: 1px solid #ccc;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ background-color: #f7f7f7;
+ padding: 0;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin: 0;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border-bottom: 1px solid #86989B;
+ font-weight: bold;
+ background-color: #AFC1C4;
+}
+
+div.warning {
+ border: 1px solid #940000;
+}
+
+div.warning p.admonition-title {
+ background-color: #CF0000;
+ border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+div.versioninfo {
+ margin: 1em 0 0 0;
+ border: 1px solid #ccc;
+ background-color: #DDEAF0;
+ padding: 8px;
+ line-height: 1.3em;
+ font-size: 0.9em;
+}
+
+/* Niko's Styles */
+
+div.body li p {
+ margin-bottom: 0.8em;
+ margin-top: 0.8em;
+}
+
+table.option-list td,
+table.option-list th {
+ border: 0px;
+}
+
+strong.program {
+ font-weight: normal;
+ font-style: italic;
+}
diff --git a/rst/_templates/layout.html b/rst/_templates/layout.html
new file mode 100644
index 0000000..5ae78e1
--- /dev/null
+++ b/rst/_templates/layout.html
@@ -0,0 +1,72 @@
+{% extends "!layout.html" %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
+
+{#
+ - Do not mention Python staff in the search box instructions
+ - Display complete TOC
+#}
+{%- macro sidebar() %}
+ {%- if not embedded %}{% if not theme_nosidebar|tobool %}
+ <div class="sphinxsidebar">
+ <div class="sphinxsidebarwrapper">
+ {%- block sidebarlogo %}
+ {%- if logo %}
+ <p class="logo"><a href="{{ pathto(master_doc) }}">
+ <img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
+ </a></p>
+ {%- endif %}
+ {%- endblock %}
+ {%- block sidebartoc %}
+ <h3><a href="{{ pathto(master_doc) }}">{{ _('Table Of Contents') }}</a></h3>
+ {{ toctree() }}
+ {%- endblock %}
+ {%- block sidebarrel %}
+ {#
+ {%- if prev %}
+ <h4>{{ _('Previous topic') }}</h4>
+ <p class="topless"><a href="{{ prev.link|e }}"
+ title="{{ _('previous chapter') }}">{{ prev.title }}</a></p>
+ {%- endif %}
+ {%- if next %}
+ <h4>{{ _('Next topic') }}</h4>
+ <p class="topless"><a href="{{ next.link|e }}"
+ title="{{ _('next chapter') }}">{{ next.title }}</a></p>
+ {%- endif %}
+ #}
+ {%- endblock %}
+ {%- block sidebarsourcelink %}
+ {%- if show_source and has_source and sourcename %}
+ <h3>{{ _('This Page') }}</h3>
+ <ul class="this-page-menu">
+ <li><a href="{{ pathto('_sources/' + sourcename, true)|e }}"
+ rel="nofollow">{{ _('Show Source') }}</a></li>
+ </ul>
+ {%- endif %}
+ {%- endblock %}
+ {%- if customsidebar %}
+ {% include customsidebar %}
+ {%- endif %}
+ {%- block sidebarsearch %}
+ {%- if pagename != "search" %}
+ <div id="searchbox" style="display: none">
+ <h3>{{ _('Quick search') }}</h3>
+ <form class="search" action="{{ pathto('search') }}" method="get">
+ <input type="text" name="q" size="18" />
+ <input type="submit" value="{{ _('Go') }}" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ {{ _('Enter search terms.') }}
+ </p>
+ </div>
+ <script type="text/javascript">$('#searchbox').show(0);</script>
+ {%- endif %}
+ {%- endblock %}
+ </div>
+ </div>
+ {%- endif %}{% endif %}
+{%- endmacro %}
diff --git a/rst/about.rst b/rst/about.rst
new file mode 100644
index 0000000..bef3684
--- /dev/null
+++ b/rst/about.rst
@@ -0,0 +1,91 @@
+.. -*- mode: rst -*-
+
+============
+ About S3QL
+============
+
+S3QL is a file system that stores all its data online. It supports
+`Amazon S3 <http://aws.amazon.com/s3 Amazon S3>`_ as well as arbitrary
+SFTP servers and effectively provides you with a hard disk of dynamic,
+infinite capacity that can be accessed from any computer with internet
+access.
+
+S3QL is providing a standard, full featured UNIX file system that is
+conceptually indistinguishable from any local file system.
+Furthermore, S3QL has additional features like compression,
+encryption, data de-duplication, immutable trees and snapshotting
+which make it especially suitable for online backup and archival.
+
+S3QL is designed to favor simplicity and elegance over performance and
+feature-creep. Care has been taken to make the source code as
+readable and serviceable as possible. Solid error detection and error
+handling have been included from the very first line, and S3QL comes
+with extensive automated test cases for all its components.
+
+Features
+========
+
+
+* **Transparency.** Conceptually, S3QL is indistinguishable from a
+ local file system. For example, it supports hardlinks, symlinks,
+ ACLs and standard unix permissions, extended attributes and file
+ sizes up to 2 TB.
+
+* **Dynamic Size.** The size of an S3QL file system grows and shrinks
+ dynamically as required.
+
+* **Compression.** Before storage, all data may compressed with the
+ LZMA, bzip2 or deflate (gzip) algorithm.
+
+* **Encryption.** After compression (but before upload), all data can
+ AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum
+ is used to protect the data against manipulation.
+
+* **Data De-duplication.** If several files have identical contents,
+ the redundant data will be stored only once. This works across all
+ files stored in the file system, and also if only some parts of the
+ files are identical while other parts differ.
+
+* **Immutable Trees.** Directory trees can be made immutable, so that
+ their contents can no longer be changed in any way whatsoever. This
+ can be used to ensure that backups can not be modified after they
+ have been made.
+
+* **Copy-on-Write/Snapshotting.** S3QL can replicate entire directory
+ trees without using any additional storage space. Only if one of the
+ copies is modified, the part of the data that has been modified will
+ take up additional storage space. This can be used to create
+ intelligent snapshots that preserve the state of a directory at
+ different points in time using a minimum amount of space.
+
+* **High Performance independent of network latency.** All operations
+ that do not write or read file contents (like creating directories
+ or moving, renaming, and changing permissions of files and
+ directories) are very fast because they are carried out without any
+ network transactions.
+
+ S3QL achieves this by saving the entire file and directory structure
+ in a database. This database is locally cached and the remote
+ copy updated asynchronously.
+
+* **Support for low bandwidth connections.** S3QL splits file contents
+ into smaller blocks and caches blocks locally. This minimizes both
+ the number of network transactions required for reading and writing
+ data, and the amount of data that has to be transferred when only
+ parts of a file are read or written.
+
+
+
+Development Status
+==================
+
+After two years of beta-testing by about 93 users did not reveal any
+data-critical bugs, S3QL was declared **stable** with the release of
+version 1.0 on May 13th, 2011. Note that this does not mean that S3QL
+is bug-free. S3QL still has several known, and probably many more
+unknown bugs. However, there is a high probability that these bugs
+will, although being inconvenient, not endanger any stored data.
+
+Please report any problems on the `mailing list
+<http://groups.google.com/group/s3ql>`_ or the `issue tracker
+<http://code.google.com/p/s3ql/issues/list>`_.
diff --git a/rst/adm.rst b/rst/adm.rst
new file mode 100644
index 0000000..3e50f64
--- /dev/null
+++ b/rst/adm.rst
@@ -0,0 +1,83 @@
+.. -*- mode: rst -*-
+
+
+Managing Buckets
+=====================
+
+
+The `s3qladm` command performs various operations on S3QL buckets.
+The file system contained in the bucket *must not be mounted* when
+using `s3qladm` or things will go wrong badly.
+
+The syntax is ::
+
+ s3qladm [options] <action> <storage-url>
+
+where :var:`action` may be either of :program:`passphrase`,
+:program:`upgrade`, :program:`delete` or :program:`download-metadata`.
+
+The :program:`s3qladm` accepts the following general options, no
+matter what specific action is being invoked:
+
+.. pipeinclude:: ../bin/s3qladm --help
+ :start-after: show this help message and exit
+
+
+Changing the Passphrase
+-----------------------
+
+To change the passphrase a bucket, use the `s3qladm` command::
+
+ s3qladm passphrase <storage url>
+
+The passphrase can only be changed when the bucket is not mounted.
+
+Upgrading the file system
+-------------------------
+
+If you have installed a new version of S3QL, it may sometimes be
+necessary to upgrade the file system metadata as well. Note that in
+this case the file system can no longer be accessed with older
+versions of S3QL after the upgrade.
+
+During the upgrade you have to make sure that the command is not
+interrupted, and that no one else tries to mount, check or upgrade the
+file system at the same time.
+
+To upgrade a file system from the previous to the current revision,
+execute ::
+
+ s3qladm upgrade <storage url>
+
+
+Deleting a file system
+----------------------
+
+A file system can be deleted with::
+
+ s3qladm delete <storage url>
+
+This physically deletes all the data and file system structures.
+
+
+Restoring Metadata Backups
+--------------------------
+
+If the most-recent copy of the file system metadata has been damaged
+irreparably, it is possible to restore one of the automatically
+created backup copies.
+
+The command ::
+
+ s3qladm download-metadata <storage url>
+
+will give you a list of the available metadata backups and allow you
+to download them. This will create two new files in the current
+directory, ending in ``.db`` and ``.params``. To actually use the
+downloaded backup, you need to move these files into the ``~/.s3ql/``
+directory and run ``fsck.s3ql``.
+
+.. WARNING::
+
+ You should probably not use this functionality without having asked
+ for help on the mailing list first (see :ref:`resources`).
diff --git a/rst/backends.rst b/rst/backends.rst
new file mode 100644
index 0000000..480ff90
--- /dev/null
+++ b/rst/backends.rst
@@ -0,0 +1,292 @@
+.. -*- mode: rst -*-
+
+==================
+ Storage Backends
+==================
+
+S3QL can use different protocols to store the file system data.
+Independent of the backend that you use, the place where your file
+system data is being stored is called a *bucket*. (This is mostly for
+historical reasons, since initially S3QL supported only the Amazon S3
+backend).
+
+
+On Backend Reliability
+======================
+
+S3QL has been designed for use with a storage backend where data loss
+is so infrequent that it can be completely neglected (e.g. the Amazon
+S3 backend). If you decide to use a less reliable backend, you should
+keep the following warning in mind and read this section carefully.
+
+.. WARNING::
+
+ S3QL is not able to compensate for any failures of the backend. In
+ particular, it is not able reconstruct any data that has been lost
+ or corrupted by the backend. The persistence and durability of data
+ stored in an S3QL file system is limited and determined by the
+ backend alone.
+
+
+On the plus side, if a backend looses or corrupts some of the stored
+data, S3QL *will* detect the problem. Missing data will be detected
+when running `fsck.s3ql` or when attempting to access the data in the
+mounted file system. In the later case you will get an IO Error, and
+on unmounting S3QL will warn you that the file system is damaged and
+you need to run `fsck.s3ql`.
+
+`fsck.s3ql` will report all the affected files and move them into the
+`/lost+found` directory of the file system.
+
+You should be aware that, because of S3QL's data de-duplication
+feature, the consequences of a data loss in the backend can be
+significantly more severe than you may expect. More concretely, a data
+loss in the backend at time *x* may cause data that is written *after*
+time *x* to be lost as well. What may happen is this:
+
+#. You store an important file in the S3QL file system.
+#. The backend looses the data blocks of this file. As long as you
+ do not access the file or run `fsck.s3ql`, S3QL
+ is not aware that the data has been lost by the backend.
+#. You save an additional copy of the important file in a different
+ location on the same S3QL file system.
+#. S3QL detects that the contents of the new file are identical to the
+ data blocks that have been stored earlier. Since at this point S3QL
+ is not aware that these blocks have been lost by the backend, it
+ does not save another copy of the file contents in the backend but
+ relies on the (presumably) existing blocks instead.
+#. Therefore, even though you saved another copy, you still do not
+ have a backup of the important file (since both copies refer to the
+ same data blocks that have been lost by the backend).
+
+As one can see, this effect becomes the less important the more often
+one runs `fsck.s3ql`, since `fsck.s3ql` will make S3QL aware of any
+blocks that the backend may have lost. Figuratively, this establishes
+a "checkpoint": data loss in the backend that occurred before running
+`fsck.s3ql` can not affect any file system operations performed after
+running `fsck.s3ql`.
+
+
+Nevertheless, (as said at the beginning) the recommended way to use
+S3QL is in combination with a sufficiently reliable storage backend.
+In that case none of the above will ever be a concern.
+
+
+The `authinfo` file
+===================
+
+Most backends first try to read the file `~/.s3ql/authinfo` to determine
+the username and password for connecting to the remote host. If this
+fails, both username and password are read from the terminal.
+
+The `authinfo` file has to contain entries of the form ::
+
+ backend <backend> machine <host> login <user> password <password>
+
+So to use the login `joe` with password `jibbadup` when using the FTP
+backend to connect to the host `backups.joesdomain.com`, you would
+specify ::
+
+ backend ftp machine backups.joesdomain.com login joe password jibbadup
+
+
+Consistency Guarantees
+======================
+
+The different backends provide different types of *consistency
+guarantees*. Informally, a consistency guarantee tells you how fast
+the backend will apply changes to the stored data.
+
+S3QL defines the following three levels:
+
+* **Read-after-Write Consistency.** This is the strongest consistency
+ guarantee. If a backend offers read-after-write consistency, it
+ guarantees that as soon as you have committed any changes to the
+ backend, subsequent requests will take into account these changes.
+
+* **Read-after-Create Consistency.** If a backend provides only
+ read-after-create consistency, only the creation of a new object is
+ guaranteed to be taken into account for subsequent requests. This
+ means that, for example, if you overwrite data in an existing
+ object, subsequent requests may still return the old data for a
+ certain period of time.
+
+* **Eventual consistency.** This is the lowest consistency level.
+ Basically, any changes that you make to the backend may not be
+ visible for a certain amount of time after the change has been made.
+ However, you are guaranteed that no change will be lost. All changes
+ will *eventually* become visible.
+
+ .
+
+
+As long as your backend provides read-after-write or read-after-create
+consistency, you do not have to worry about consistency guarantees at
+all. However, if you plan to use a backend with only eventual
+consistency, you have to be a bit careful in some situations.
+
+
+.. _eventual_consistency:
+
+Dealing with Eventual Consistency
+---------------------------------
+
+.. NOTE::
+
+ The following applies only to storage backends that do not provide
+ read-after-create or read-after-write consistency. Currently,
+ this is only the Amazon S3 backend *if used with the US-Standard
+ storage region*. If you use a different storage backend, or the S3
+ backend with a different storage region, this section does not apply
+ to you.
+
+While the file system is mounted, S3QL is able to automatically handle
+all issues related to the weak eventual consistency guarantee.
+However, some issues may arise during the mount process and when the
+file system is checked.
+
+Suppose that you mount the file system, store some new data, delete
+some old data and unmount it again. Now remember that eventual
+consistency means that there is no guarantee that these changes will
+be visible immediately. At least in theory it is therefore possible
+that if you mount the file system again, S3QL does not see any of the
+changes that you have done and presents you an "old version" of the
+file system without them. Even worse, if you notice the problem and
+unmount the file system, S3QL will upload the old status (which S3QL
+necessarily has to consider as current) and thereby permanently
+override the newer version (even though this change may not become
+immediately visible either).
+
+The same problem applies when checking the file system. If the backend
+provides S3QL with only partially updated data, S3QL has no way to
+find out if this a real consistency problem that needs to be fixed or
+if it is only a temporary problem that will resolve itself
+automatically (because there are still changes that have not become
+visible yet).
+
+While this may seem to be a rather big problem, the likelihood of it
+to occur is rather low. In practice, most storage providers rarely
+need more than a few seconds to apply incoming changes, so to trigger
+this problem one would have to unmount and remount the file system in
+a very short time window. Many people therefore make sure that they
+wait a few minutes between successive mounts (or file system checks)
+and decide that the remaining risk is negligible.
+
+Nevertheless, the eventual consistency guarantee does not impose an
+upper limit on the time that it may take for change to become visible.
+Therefore there is no "totally safe" waiting time that would totally
+eliminate this problem; a theoretical possibility always remains.
+
+
+
+The Amazon S3 Backend
+=====================
+
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form `s3://<bucketname>`. Bucket names must conform to the `S3
+Bucket Name Restrictions`_.
+
+The S3 backend offers exceptionally strong reliability guarantees. As
+of August 2010, Amazon guarantees a durability of 99.999999999% per
+year. In other words, if you store a thousand million objects then on
+average you would loose less than one object in a hundred years.
+
+The Amazon S3 backend provides read-after-create consistency for the
+EU, Asia-Pacific and US-West storage regions. *For the US-Standard
+storage region, Amazon S3 provides only eventual consistency* (please
+refer to :ref:`eventual_consistency` for information about
+what this entails).
+
+When connecting to Amazon S3, S3QL uses an unencrypted HTTP
+connection, so if you want your data to stay confidential, you have
+to create the S3QL file system with encryption (this is also the default).
+
+When reading the authentication information for the S3 backend from
+the `authinfo` file, the `host` field is ignored, i.e. the first entry
+with `s3` as a backend will be used. For example ::
+
+ backend s3 machine any login myAWSaccessKeyId password myAwsSecretAccessKey
+
+Note that the bucket names come from a global pool, so chances are
+that your favorite name has already been taken by another S3 user.
+Usually a longer bucket name containing some random numbers, like
+`19283712_yourname_s3ql`, will work better.
+
+If you do not already have one, you need to obtain an Amazon S3
+account from `Amazon AWS <http://aws.amazon.com/>`_. The account is
+free, you will pay only for the amount of storage that you actually
+use.
+
+Note that the login and password for accessing S3 are not the user id
+and password that you use to log into the Amazon Webpage, but the "AWS
+access key id" and "AWS secret access key" shown under `My
+Account/Access Identifiers
+<https://aws-portal.amazon.com/gp/aws/developer/account/index.html?ie=UTF8&action=access-key>`_.
+
+.. _`S3 Bucket Name Restrictions`: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
+
+.. NOTE::
+
+ S3QL also allows you to use `reduced redundancy storage
+ <http://aws.amazon.com/s3/#protecting>`_ by using ``s3rr://``
+ instead of ``s3://`` in the storage url. However, this not
+ recommended. The reason is a combination of three factors:
+
+ * RRS has a relatively low reliability, on average you loose one
+ out of every ten-thousand objects a year. So you can expect to
+ occasionally loose some data.
+
+ * When `fsck.s3ql` asks Amazon S3 for a list of the stored objects,
+ this list includes even those objects that have been lost.
+ Therefore `fsck.s3ql` *can not detect lost objects* and lost data
+ will only become apparent when you try to actually read from a
+ file whose data has been lost. This is a (very unfortunate)
+ peculiarity of Amazon S3.
+
+ * Due to the data de-duplication feature of S3QL, unnoticed lost
+ objects may cause subsequent data loss later in time (see `On
+ Backend Reliability`_ for details).
+
+ In other words, you should really only store an S3QL file system
+ using RRS if you know exactly what you are getting into.
+
+
+
+
+The Local Backend
+=================
+
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+`local://<path>`. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. `local:///var/archive`.
+
+The local backend provides read-after-write consistency.
+
+The SFTP Backend
+================
+
+The SFTP backend uses the SFTP protocol, which is a file transfer
+protocol similar to ftp, but uses an encrypted SSH connection.
+It provides read-after-write consistency.
+
+Note that the SFTP backend is rather slow and has not been tested
+as extensively as the S3 and Local backends.
+
+The storage URL for SFTP connections has the form ::
+
+ sftp://<host>[:port]/<path>
+
+The SFTP backend will always ask you for a password if you haven't
+defined one in `~/.s3ql/authinfo`. However, public key authentication
+is tried first and the password will only be used if the public key
+authentication fails.
+
+The public and private keys will be read from the standard files in
+`~/.ssh/`. Note that S3QL will refuse to connect to a computer with
+unknown host key; to add the key to your local keyring you have to
+establish a connection to that computer with the standard SSH command
+line programs first.
+
+
+
diff --git a/rst/conf.py b/rst/conf.py
new file mode 100644
index 0000000..a36b2fb
--- /dev/null
+++ b/rst/conf.py
@@ -0,0 +1,244 @@
+# -*- coding: utf-8 -*-
+#
+# This file containts the Sphinx configuration to generate the
+# HTML and PDF documentation from the plain text (RST) source.
+#
+# This file does not contain any S3QL documentation itself.
+#
+
+import sys, os
+
+# Add a custom role for command line options that does not try to
+# reference anything.
+def add_literal_role(rolename):
+ from docutils.parsers.rst import roles
+ from docutils import nodes
+ nodeclass = nodes.literal
+ generic = roles.GenericRole(rolename, nodeclass)
+ role = roles.CustomRole(rolename, generic, {'classes': [rolename]})
+ roles.register_local_role(rolename, role)
+add_literal_role('cmdopt')
+add_literal_role('var') # A variable defined in a :samp: role
+
+# Add our own Pygments Lexer
+import pygments.lexers._mapping as pmap
+pmap.LEXERS['CommandLineLexer'] = ('cmdline_lexer', 'CommandLine',
+ ('CommandLine', 'commandline'), (), ())
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [ 'sphinx.ext.ifconfig', 'sphinx_pipeinclude' ]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# Warn about unresolved references
+nitpicky = True
+
+# General information about the project.
+project = u'S3QL'
+copyright = u'2008-2011, Nikolaus Rath'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+#version =
+# The full version, including alpha/beta/rc tags.
+#release =
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = [ 'include' ]
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+default_role = 'file'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'colorful'
+highlight_language = 'commandline'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'sphinxdoc'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+# html_theme_options = {'stickysidebar': 'true'}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+html_use_modindex = False
+
+# If false, no index is generated.
+html_use_index = False
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'S3QLdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'manual.tex', u'S3QL Documentation',
+ u'Nikolaus Rath', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('man/mkfs', 'mkfs.s3ql', u'Create an S3QL file system',
+ None, 1),
+ ('man/mount', 'mount.s3ql', u'Mount an S3QL file system',
+ None, 1),
+ ('man/umount', 'umount.s3ql', u'Unmount an S3QL file system',
+ None, 1),
+ ('man/fsck', 'fsck.s3ql', u'Check an S3QL file system for errors',
+ None, 1),
+ ('man/ctrl', 's3qlctrl', u'Control a mounted S3QL file system',
+ None, 1),
+ ('man/adm', 's3qladm', u'Manage S3QL buckets',
+ None, 1),
+ ('man/cp', 's3qlcp', u'Copy-on-write replication on S3QL file systems',
+ None, 1),
+ ('man/lock', 's3qllock', u'Make trees on an S3QL file system immutable',
+ None, 1),
+ ('man/rm', 's3qlrm', u'Fast tree removal on S3QL file systems',
+ None, 1),
+ ('man/stat', 's3qlstat', u'Gather S3QL file system statistics',
+ None, 1),
+ ('man/expire_backups', 'expire_backups', u'Intelligently expire old backups',
+ None, 1),
+ ('man/pcp', 'pcp', u'Recursive, parallel copy of directory trees',
+ None, 1),
+]
+
+
diff --git a/rst/contrib.rst b/rst/contrib.rst
new file mode 100644
index 0000000..3ee2323
--- /dev/null
+++ b/rst/contrib.rst
@@ -0,0 +1,87 @@
+.. -*- mode: rst -*-
+
+=====================
+Contributed Programs
+=====================
+
+S3QL comes with a few contributed programs that are not part of the
+core distribution (and are therefore not installed automatically by
+default), but which may nevertheless be useful. These programs are in
+the `contrib` directory of the source distribution or in
+`/usr/share/doc/s3ql/contrib` if you installed S3QL from a package.
+
+
+benchmark.py
+============
+
+This program measures your uplink bandwidth and compression speed and
+recommends a compression algorithm for optimal throughput.
+
+
+s3_copy.py
+==========
+
+This program physically duplicates Amazon S3 bucket. It can be used to
+migrate buckets to a different storage region or storage class
+(standard or reduced redundancy).
+
+.. _pcp:
+
+pcp.py
+======
+
+``pcp.py`` is a wrapper program that starts several rsync processes to
+copy directory trees in parallel. This is important because
+transferring files in parallel significantly enhances performance when
+copying data from an S3QL file system (see :ref:`copy_performance` for
+details).
+
+To recursively copy the directory ``/mnt/home-backup`` into
+``/home/joe`` using 8 parallel processes and preserving permissions,
+you would execute ::
+
+ pcp.py -a --processes=8 /mnt/home-backup/ /home/joe
+
+
+s3_backup.sh
+============
+
+This is an example script that demonstrates how to set up a simple but
+powerful backup solution using S3QL and `rsync
+<http://samba.org/rsync>`_.
+
+The `s3_backup.sh` script automates the following steps:
+
+#. Mount the file system
+#. Replicate the previous backup with :ref:`s3qlcp <s3qlcp>`
+#. Update the new copy with the data from the backup source using rsync
+#. Make the new backup immutable with :ref:`s3qllock <s3qllock>`
+#. Delete old backups that are no longer needed
+#. Unmount the file system
+
+The backups are stored in directories of the form
+`YYYY-MM-DD_HH:mm:SS` and the `expire_backups.py`_ command is used to
+delete old backups.
+
+
+expire_backups.py
+=================
+
+:program:`expire_backups.py` is a program to intelligently remove old
+backups that are no longer needed.
+
+.. include:: man/expire_backups.rst
+ :start-after: begin_main_content
+ :end-before: end_main_content
+
+For a full list of available options, run :program:`expire_backups.py
+--help`.
+
+
+s3ql.conf
+=========
+
+``s3ql.conf`` is an example upstart job definition file. It defines a
+job that automatically mounts an S3QL file system on system start, and
+properly unmounts it when the system is shut down.
+
diff --git a/rst/fsck.rst b/rst/fsck.rst
new file mode 100644
index 0000000..24ee2ed
--- /dev/null
+++ b/rst/fsck.rst
@@ -0,0 +1,16 @@
+.. -*- mode: rst -*-
+
+
+Checking for Errors
+===================
+
+If, for some reason, the filesystem has not been correctly unmounted,
+or if you suspect that there might be errors, you should run the
+`fsck.s3ql` utility. It has the following syntax::
+
+ fsck.s3ql [options] <storage url>
+
+This command accepts the following options:
+
+.. pipeinclude:: ../bin/fsck.s3ql --help
+ :start-after: show this help message and exit
diff --git a/rst/include/about.rst b/rst/include/about.rst
new file mode 100644
index 0000000..27488e9
--- /dev/null
+++ b/rst/include/about.rst
@@ -0,0 +1,11 @@
+.. -*- mode: rst -*-
+
+.. only:: man
+
+ S3QL is a file system for online data storage. Before using S3QL, make
+ sure to consult the full documentation (rather than just the man pages
+ which only briefly document the available userspace commands).
+
+
+
+
diff --git a/rst/include/backends.rst b/rst/include/backends.rst
new file mode 100644
index 0000000..5892edd
--- /dev/null
+++ b/rst/include/backends.rst
@@ -0,0 +1,28 @@
+.. -*- mode: rst -*-
+
+The form of the storage url depends on the backend that is used. The
+following backends are supported:
+
+Amazon S3
+---------
+
+To store your file system in an Amazon S3 bucket, use a storage URL of
+the form `s3://<bucketname>`. Bucket names must conform to the S3 Bucket
+Name Restrictions.
+
+
+Local
+------
+
+The local backend stores file system data in a directory on your
+computer. The storage URL for the local backend has the form
+`local://<path>`. Note that you have to write three consecutive
+slashes to specify an absolute path, e.g. `local:///var/archive`.
+
+SFTP
+----
+
+The storage URL for SFTP connections has the form ::
+
+ sftp://<host>[:port]/<path>
+
diff --git a/rst/include/postman.rst b/rst/include/postman.rst
new file mode 100644
index 0000000..853254b
--- /dev/null
+++ b/rst/include/postman.rst
@@ -0,0 +1,22 @@
+.. -*- mode: rst -*-
+
+
+Exit Status
+===========
+
+|command| returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+See Also
+========
+
+The S3QL homepage is at http://code.google.com/p/s3ql/.
+
+The full S3QL documentation should also be installed somewhere on your
+system, conventional locations are :file:`/usr/share/doc/s3ql` or
+:file:`/usr/local/doc/s3ql`.
+
+
+
+
diff --git a/rst/index.rst b/rst/index.rst
new file mode 100644
index 0000000..f3b5b72
--- /dev/null
+++ b/rst/index.rst
@@ -0,0 +1,23 @@
+.. -*- mode: rst -*-
+
+===================
+ S3QL User's Guide
+===================
+
+.. toctree::
+ :maxdepth: 2
+
+ about
+ installation
+ backends
+ mkfs
+ adm
+ mount
+ special
+ umount
+ fsck
+ contrib
+ tips
+ issues
+ man/index
+ resources
diff --git a/rst/installation.rst b/rst/installation.rst
new file mode 100644
index 0000000..b57325e
--- /dev/null
+++ b/rst/installation.rst
@@ -0,0 +1,101 @@
+.. -*- mode: rst -*-
+
+
+==============
+ Installation
+==============
+
+S3QL depends on several other programs and libraries that have to be
+installed first. The best method to satisfy these dependencies depends
+on your distribution. In some cases S3QL and all its dependencies can
+be installed with as little as three commands, while in other cases more work
+may be required.
+
+The `S3QL Wiki <http://code.google.com/p/s3ql/w/list>`_ contains
+installation instructions for quite a few different Linux
+distributions. You should only use the generic instructions in this
+manual if your distribution is not included in the `distribution-specific
+installation instructions
+<http://code.google.com/p/s3ql/w/list?q=label:Installation>`_ on the wiki.
+
+
+Dependencies
+============
+
+The following is a list of the programs and libraries required for
+running S3QL. Generally, you should first check if your distribution
+already provides a suitable packages and only install from source if
+that is not the case.
+
+* Kernel version 2.6.9 or newer. Starting with kernel 2.6.26
+ you will get significantly better write performance, so you should
+ actually use *2.6.26 or newer whenever possible*.
+
+* The `FUSE Library <http://fuse.sourceforge.net/>`_ should already be
+ installed on your system. However, you have to make sure that you
+ have at least version 2.8.0.
+
+* The `PyCrypto++ Python Module
+ <http://pypi.python.org/pypi/pycryptopp>`_. To check if this module
+ is installed, try to execute `python -c 'import pycryptopp'`.
+
+* The `argparse Python Module
+ <http://pypi.python.org/pypi/argparse>`_. To check if this module is
+ installed, try to execute `python -c 'import argparse; print
+ argparse.__version__'`. If argparse is installed, this will print
+ the version number. You need version 1.1 or later.
+
+* The `APSW Python Module <http://code.google.com/p/apsw/>`_. To check
+ which (if any) version of APWS is installed, run the command ::
+
+ python -c 'import apsw; print apsw.apswversion(), apsw.sqlitelibversion()'
+
+ If APSW is installed, this should print two version numbers which
+ both have to be at least 3.7.0.
+
+* The `PyLibLZMA Python module
+ <http://pypi.python.org/pypi/pyliblzma>`_. To check if this module
+ is installed, execute `python -c 'import lzma; print
+ lzma.__version__'`. This should print a version number. You need at
+ least version 0.5.3.
+
+* The `Python LLFUSE module
+ <http://code.google.com/p/python-llfuse/>`_. To check if this module
+ is installed, execute `python -c 'import llfuse; print
+ llfuse.__version__'`. This should print a version number. You need at
+ least version 0.29.
+
+ Note that earlier S3QL versions shipped with a builtin version of
+ this module. If you are upgrading from such a version, make sure to
+ completely remove the old S3QL version first.
+
+* If you want to use the SFTP backend, then you also need the
+ `Paramiko Python Module <http://www.lag.net/paramiko/>`_. To check
+ if this module is installed, try to execute `python -c 'import
+ paramiko'`.
+
+
+.. _inst-s3ql:
+
+Installing S3QL
+===============
+
+To install S3QL itself, proceed as follows:
+
+1. Download S3QL from http://code.google.com/p/s3ql/downloads/list
+2. Unpack it into a folder of your choice
+3. Run `python setup.py test` to run a self-test. If this fails, ask
+ for help on the `mailing list
+ <http://groups.google.com/group/s3ql>`_ or report a bug in the
+ `issue tracker <http://code.google.com/p/s3ql/issues/list>`_.
+
+Now you have three options:
+
+* You can run the S3QL commands from the `bin/` directory.
+
+* You can install S3QL system-wide for all users. To do that, you
+ have to run `sudo python setup.py install`.
+
+* You can install S3QL into `~/.local` by executing `python
+ setup.py install --user`. In this case you should make sure that
+ `~/.local/bin` is in your `$PATH` environment variable.
diff --git a/rst/issues.rst b/rst/issues.rst
new file mode 100644
index 0000000..29b76ce
--- /dev/null
+++ b/rst/issues.rst
@@ -0,0 +1,89 @@
+.. -*- mode: rst -*-
+
+============
+Known Issues
+============
+
+* S3QL is rather slow when an application tries to write data in
+ unreasonably small chunks. If a 1 MB file is copied in chunks of 1
+ KB, this will take more than 10 times as long as when it's copied
+ with the (recommended) chunk size of 128 KB.
+
+ This is a limitation of the FUSE library (which does not yet support
+ write caching) which will hopefully be addressed in some future FUSE
+ version.
+
+ Most applications, including e.g. GNU `cp` and `rsync`, use
+ reasonably large buffers and are therefore not affected by this
+ problem and perform very efficient on S3QL file systems.
+
+ However, if you encounter unexpectedly slow performance with a
+ specific program, this might be due to the program using very small
+ write buffers. Although this is not really a bug in the program,
+ it might be worth to ask the program's authors for help.
+
+* S3QL always updates file and directory access times as if the ``relatime``
+ mount option has been specified: the access time ("atime") is only updated
+ if it is currently earlier than either the status change time
+ ("ctime") or modification time ("mtime").
+
+* S3QL directories always have an `st_nlink` value of 1. This may confuse
+ programs that rely on directories having `st_nlink` values of *(2 +
+ number of sub directories)*.
+
+ Note that this is not a bug in S3QL. Including sub directories in
+ the `st_nlink` value is a Unix convention, but by no means a
+ requirement. If an application blindly relies on this convention
+ being followed, then this is a bug in the application.
+
+ A prominent example are early versions of GNU find, which required
+ the `--noleaf` option to work correctly on S3QL file systems. This
+ bug has already been fixed in recent find versions.
+
+
+* In theory, S3QL is not fully compatible with NFS. Since S3QL does
+ not support *inode generation numbers*, NFS clients may (once again,
+ in theory) accidentally read or write the wrong file in the
+ following situation:
+
+ #. An S3QL file system is exported over NFS
+ #. NFS client 1 opens a file A
+ #. Another NFS client 2 (or the server itself) deletes file A (without
+ client 1 knowing about this)
+ #. A new file B is created by either of the clients or the server
+ #. NFS client 1 tries to read or write file A (which has actually already been deleted).
+
+ In this situation it is possible that NFS client 1 actually writes
+ or reads the newly created file B instead. The chances of this are 1
+ to (2^32 - *n*) where *n* is the total number of directory entries
+ in the S3QL file system (as displayed by `s3qlstat`).
+
+ Luckily enough, as long as you have less than about 2 thousand
+ million directory entries (2^31), the chances for this are totally
+ irrelevant and you don't have to worry about it.
+
+* The `umount` and `fusermount -u` commands will *not* block until all
+ data has been uploaded to the backend. (this is a FUSE limitation
+ that will hopefully be removed in the future, see `issue 159
+ <http://code.google.com/p/s3ql/issues/detail?id=159>`_). If you use
+ either command to unmount an S3QL file system, you have to take care
+ to explicitly wait for the `mount.s3ql` process to terminate before
+ you shut down or restart the system. Therefore it is generally not a
+ good idea to mount an S3QL file system in `/etc/fstab` (you should
+ use a dedicated init script instead).
+
+* S3QL relies on the backends not to run out of space. This is a given
+ for big storage providers like Amazon S3, but you may stumble upon
+ this if you store buckets e.g. on a small sftp server.
+
+ If there is no space left in the backend, attempts to write more
+ data into the S3QL file system will fail and the file system will be
+ in an inconsistent state and require a file system check (and you
+ should make sure to make space available in the backend before
+ running the check).
+
+ Unfortunately, there is no way to handle insufficient space in the
+ backend without leaving the file system inconsistent. Since
+ S3QL first writes data into the cache, it can no longer return an
+ error when it later turns out that the cache can not be committed to
+ the backend.
diff --git a/rst/man/adm.rst b/rst/man/adm.rst
new file mode 100644
index 0000000..c23865e
--- /dev/null
+++ b/rst/man/adm.rst
@@ -0,0 +1,66 @@
+.. -*- mode: rst -*-
+
+==============================
+The :program:`s3qladm` command
+==============================
+
+Synopsis
+========
+
+::
+
+ s3qladm [options] <action> <storage url>
+
+where :var:`action` may be either of :program:`passphrase`,
+:program:`upgrade`, :program:`delete` or :program:`download-metadata`.
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command performs various operations on S3QL buckets.
+The file system contained in the bucket *must not be mounted* when
+using |command| or things will go wrong badly.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/s3qladm --help
+ :start-after: show this help message and exit
+
+Actions
+=======
+
+The following actions may be specified:
+
+passphrase
+ Changes the encryption passphrase of the bucket.
+
+upgrade
+ Upgrade the file system contained in the bucket to the newest revision.
+
+delete
+ Delete the bucket and all its contents.
+
+download-metadata
+ Interactively download backups of the file system metadata.
+
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :program:`s3qladm`
diff --git a/rst/man/cp.rst b/rst/man/cp.rst
new file mode 100644
index 0000000..d0cbb41
--- /dev/null
+++ b/rst/man/cp.rst
@@ -0,0 +1,100 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlcp` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlcp [options] <source-dir> <dest-dir>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command duplicates the directory tree :var:`source-dir`
+into :var:`dest-dir` without physically copying the file contents.
+Both source and destination must lie inside the same S3QL file system.
+
+.. begin_main_content
+
+The replication will not take any additional space. Only if one of
+directories is modified later on, the modified data will take
+additional storage space.
+
+`s3qlcp` can only be called by the user that mounted the file system
+and (if the file system was mounted with `--allow-other` or `--allow-root`)
+the root user. This limitation might be removed in the future (see `issue 155
+<http://code.google.com/p/s3ql/issues/detail?id=155>`_).
+
+Note that:
+
+* After the replication, both source and target directory will still
+ be completely ordinary directories. You can regard `<src>` as a
+ snapshot of `<target>` or vice versa. However, the most common
+ usage of `s3qlcp` is to regularly duplicate the same source
+ directory, say `documents`, to different target directories. For a
+ e.g. monthly replication, the target directories would typically be
+ named something like `documents_Januray` for the replication in
+ January, `documents_February` for the replication in February etc.
+ In this case it is clear that the target directories should be
+ regarded as snapshots of the source directory.
+
+* Exactly the same effect could be achieved by an ordinary copy
+ program like `cp -a`. However, this procedure would be orders of
+ magnitude slower, because `cp` would have to read every file
+ completely (so that S3QL had to fetch all the data over the network
+ from the backend) before writing them into the destination folder.
+
+* Before starting with the replication, S3QL has to flush the local
+ cache. So if you just copied lots of new data into the file system
+ that has not yet been uploaded, replication will take longer than
+ usual.
+
+
+
+Snapshotting vs Hardlinking
+---------------------------
+
+Snapshot support in S3QL is inspired by the hardlinking feature that
+is offered by programs like `rsync <http://www.samba.org/rsync>`_ or
+`storeBackup <http://savannah.nongnu.org/projects/storebackup>`_.
+These programs can create a hardlink instead of copying a file if an
+identical file already exists in the backup. However, using hardlinks
+has two large disadvantages:
+
+* backups and restores always have to be made with a special program
+ that takes care of the hardlinking. The backup must not be touched
+ by any other programs (they may make changes that inadvertently
+ affect other hardlinked files)
+
+* special care needs to be taken to handle files which are already
+ hardlinked (the restore program needs to know that the hardlink was
+ not just introduced by the backup program to safe space)
+
+S3QL snapshots do not have these problems, and they can be used with
+any backup program.
+
+
+.. end_main_content
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qlcp --help
+ :start-after: show this help message and exit
+
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`s3qlcp`
+
diff --git a/rst/man/ctrl.rst b/rst/man/ctrl.rst
new file mode 100644
index 0000000..4afa33b
--- /dev/null
+++ b/rst/man/ctrl.rst
@@ -0,0 +1,69 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlctrl` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlctrl [options] <action> <mountpoint> ...
+
+where :var:`action` may be either of :program:`flushcache`,
+:program:`upload-meta`, :program:`cachesize` or
+:program:`log-metadata`.
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command performs various actions on the S3QL file system mounted
+in :var:`mountpoint`.
+
+The following actions may be specified:
+
+flushcache
+ Uploads all changed file data to the backend.
+
+upload-meta
+ Upload metadata to the backend. All file system operations will
+ block while a snapshot of the metadata is prepared for upload.
+
+cachesize
+ Changes the cache size of the file system. This action requires an
+ additional argument that specifies the new cache size in kB, so the
+ complete command line is::
+
+ s3qlctrl [options] cachesize <mountpoint> <new-cache-size>
+
+log
+ Change the amount of information that is logged into
+ :file:`~/.s3ql/mount.log` file. The complete syntax is::
+
+ s3qlctrl [options] log <mountpoint> <level> [<module> [<module> ...]]
+
+ here :var:`level` is the desired new log level and may be either of
+ *debug*, *info* or *warn*. One or more :var:`module` may only be
+ specified with the *debug* level and allow to restrict the debug
+ output to just the listed modules.
+
+
+Options
+=======
+
+The |command| command also accepts the following options, no matter
+what specific action is being invoked:
+
+.. pipeinclude:: ../../bin/s3qlctrl --help
+ :start-after: show this help message and exit
+
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`s3qlctrl`
+
diff --git a/rst/man/expire_backups.rst b/rst/man/expire_backups.rst
new file mode 100644
index 0000000..6b2f538
--- /dev/null
+++ b/rst/man/expire_backups.rst
@@ -0,0 +1,106 @@
+.. -*- mode: rst -*-
+
+
+=======================================
+ The :program:`expire_backups` command
+=======================================
+
+Synopsis
+========
+
+::
+
+ expire_backups [options] <age> [<age> ...]
+
+
+Description
+===========
+
+The |command| command intelligently remove old backups that are no
+longer needed.
+
+.. begin_main_content
+
+To define what backups you want to keep for how long, you define a
+number of *age ranges*. :program:`expire_backups` ensures that you
+will have at least one backup in each age range at all times. It will
+keep exactly as many backups as are required for that and delete any
+backups that become redundant.
+
+Age ranges are specified by giving a list of range boundaries in terms
+of backup cycles. Every time you create a new backup, the existing
+backups age by one cycle.
+
+Example: when :program:`expire_backups` is called with the age range
+definition ``1 3 7 14 31``, it will guarantee that you always have the
+following backups available:
+
+#. A backup that is 0 to 1 cycles old (i.e, the most recent backup)
+#. A backup that is 1 to 3 cycles old
+#. A backup that is 3 to 7 cycles old
+#. A backup that is 7 to 14 cycles old
+#. A backup that is 14 to 31 cycles old
+
+.. NOTE::
+
+ If you do backups in fixed intervals, then one cycle will be
+ equivalent to the backup interval. The advantage of specifying the
+ age ranges in terms of backup cycles rather than days or weeks is
+ that it allows you to gracefully handle irregular backup intervals.
+ Imagine that for some reason you do not turn on your computer for
+ one month. Now all your backups are at least a month old, and if you
+ had specified the above backup strategy in terms of absolute ages,
+ they would all be deleted! Specifying age ranges in terms of backup
+ cycles avoids these sort of problems.
+
+:program:`expire_backups` usage is simple. It requires backups to have
+names of the forms ``year-month-day_hour:minute:seconds``
+(``YYYY-MM-DD_HH:mm:ss``) and works on all backups in the current
+directory. So for the above backup strategy, the correct invocation
+would be::
+
+ expire_backups.py 1 3 7 14 31
+
+When storing your backups on an S3QL file system, you probably want to
+specify the ``--use-s3qlrm`` option as well. This tells
+:program:`expire_backups` to use the :ref:`s3qlrm <s3qlrm>` command to
+delete directories.
+
+:program:`expire_backups` uses a "state file" to keep track which
+backups are how many cycles old (since this cannot be inferred from
+the dates contained in the directory names). The standard name for
+this state file is :file:`.expire_backups.dat`. If this file gets
+damaged or deleted, :program:`expire_backups` no longer knows the ages
+of the backups and refuses to work. In this case you can use the
+:cmdopt:`--reconstruct-state` option to try to reconstruct the state
+from the backup dates. However, the accuracy of this reconstruction
+depends strongly on how rigorous you have been with making backups (it
+is only completely correct if the time between subsequent backups has
+always been exactly the same), so it's generally a good idea not to
+tamper with the state file.
+
+.. end_main_content
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../contrib/expire_backups.py --help
+ :start-after: show this help message and exit
+
+Exit Status
+===========
+
+|command| returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+See Also
+========
+
+|command| is shipped as part of S3QL, http://code.google.com/p/s3ql/.
+
+.. |command| replace:: :command:`expire_backups`
+
diff --git a/rst/man/fsck.rst b/rst/man/fsck.rst
new file mode 100644
index 0000000..ef6ed2d
--- /dev/null
+++ b/rst/man/fsck.rst
@@ -0,0 +1,44 @@
+.. -*- mode: rst -*-
+
+================================
+The :program:`fsck.s3ql` command
+================================
+
+Synopsis
+========
+
+::
+
+ fsck.s3ql [options] <storage url>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command checks the new file system in the location
+specified by *storage url* for errors and attempts to repair any
+problems.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/fsck.s3ql --help
+ :start-after: show this help message and exit
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`mkfs.s3ql`
diff --git a/rst/man/index.rst b/rst/man/index.rst
new file mode 100644
index 0000000..39d1154
--- /dev/null
+++ b/rst/man/index.rst
@@ -0,0 +1,23 @@
+
+Manpages
+========
+
+The man pages are installed with S3QL on your system and can be viewed
+with the :command:`man` command. For reference, they are also included
+here in the User's Guide.
+
+.. toctree::
+ :maxdepth: 1
+
+ mkfs
+ adm
+ mount
+ stat
+ ctrl
+ cp
+ rm
+ lock
+ umount
+ fsck
+ pcp
+ expire_backups
diff --git a/rst/man/lock.rst b/rst/man/lock.rst
new file mode 100644
index 0000000..f17bf32
--- /dev/null
+++ b/rst/man/lock.rst
@@ -0,0 +1,78 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qllock` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qllock [options] <directory>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The :program:`s3qllock` command makes a directory tree in an S3QL file
+system immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the :program:`s3qlrm` command.
+
+Rationale
+=========
+
+.. begin_main_content
+
+Immutability is a feature designed for backups. Traditionally, backups
+have been made on external tape drives. Once a backup was made, the
+tape drive was removed and locked somewhere in a shelf. This has the
+great advantage that the contents of the backup are now permanently
+fixed. Nothing (short of physical destruction) can change or delete
+files in the backup.
+
+In contrast, when backing up into an online storage system like S3QL,
+all backups are available every time the file system is mounted.
+Nothing prevents a file in an old backup from being changed again
+later on. In the worst case, this may make your entire backup system
+worthless. Imagine that your system gets infected by a nasty virus
+that simply deletes all files it can find -- if the virus is active
+while the backup file system is mounted, the virus will destroy all
+your old backups as well!
+
+Even if the possibility of a malicious virus or trojan horse is
+excluded, being able to change a backup after it has been made is
+generally not a good idea. A common S3QL use case is to keep the file
+system mounted at all times and periodically create backups with
+:program:`rsync -a`. This allows every user to recover her files from a
+backup without having to call the system administrator. However, this
+also allows every user to accidentally change or delete files *in* one
+of the old backups.
+
+Making a backup immutable protects you against all these problems.
+Unless you happen to run into a virus that was specifically programmed
+to attack S3QL file systems, backups can be neither deleted nor
+changed after they have been made immutable.
+
+
+.. end_main_content
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qllock --help
+ :start-after: show this help message and exit
+
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`s3qllock`
+
diff --git a/rst/man/mkfs.rst b/rst/man/mkfs.rst
new file mode 100644
index 0000000..c61270a
--- /dev/null
+++ b/rst/man/mkfs.rst
@@ -0,0 +1,43 @@
+.. -*- mode: rst -*-
+
+================================
+The :program:`mkfs.s3ql` command
+================================
+
+Synopsis
+========
+
+::
+
+ mkfs.s3ql [options] <storage url>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command creates a new file system in the location
+specified by *storage url*.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/mkfs.s3ql --help
+ :start-after: show this help message and exit
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`mkfs.s3ql`
diff --git a/rst/man/mount.rst b/rst/man/mount.rst
new file mode 100644
index 0000000..3905c03
--- /dev/null
+++ b/rst/man/mount.rst
@@ -0,0 +1,48 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`mount.s3ql` command
+=================================
+
+Synopsis
+========
+
+::
+
+ mount.s3ql [options] <storage url> <mount point>
+
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command mounts the S3QL file system stored in *storage
+url* in the directory *mount point*.
+
+.. include:: ../include/backends.rst
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/mount.s3ql --help
+ :start-after: show this help message and exit
+
+
+Files
+=====
+
+Authentication data for backends and bucket encryption passphrases are
+read from :file:`authinfo` in :file:`~/.s3ql` or the directory
+specified with :cmdopt:`--homedir`. Log files are placed in the same
+directory.
+
+.. include:: ../include/postman.rst
+
+
+.. |command| replace:: :command:`mount.s3ql`
+
diff --git a/rst/man/pcp.rst b/rst/man/pcp.rst
new file mode 100644
index 0000000..cd7a66c
--- /dev/null
+++ b/rst/man/pcp.rst
@@ -0,0 +1,46 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`pcp` command
+=================================
+
+Synopsis
+========
+
+::
+
+ pcp [options] <source> [<source> ...] <destination>
+
+
+Description
+===========
+
+The |command| command is a is a wrapper that starts several
+:program:`sync` processes to copy directory trees in parallel. This is
+allows much better copying performance on file system that have
+relatively high latency when retrieving individual files like S3QL.
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../contrib/pcp.py --help
+ :start-after: show this help message and exit
+
+Exit Status
+===========
+
+|command| returns exit code 0 if the operation succeeded and 1 if some
+error occured.
+
+
+See Also
+========
+
+|command| is shipped as part of S3QL, http://code.google.com/p/s3ql/.
+
+.. |command| replace:: :command:`pcp`
+
diff --git a/rst/man/rm.rst b/rst/man/rm.rst
new file mode 100644
index 0000000..0832e27
--- /dev/null
+++ b/rst/man/rm.rst
@@ -0,0 +1,41 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlrm` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlrm [options] <directory>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command recursively deletes files and directories on an
+S3QL file system. Although |command| is faster than using e.g.
+:command:`rm -r``, the main reason for its existence is that it allows
+you to delete immutable trees (which can be created with
+:program:`s3qllock`) as well.
+
+Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qlrm --help
+ :start-after: show this help message and exit
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`s3qlrm`
+
diff --git a/rst/man/stat.rst b/rst/man/stat.rst
new file mode 100644
index 0000000..7578e19
--- /dev/null
+++ b/rst/man/stat.rst
@@ -0,0 +1,41 @@
+.. -*- mode: rst -*-
+
+
+=================================
+The :program:`s3qlstat` command
+=================================
+
+Synopsis
+========
+
+::
+
+ s3qlstat [options] <mountpoint>
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command prints statistics about the S3QL file system mounted
+at :var:`mountpoint`.
+
+|command| can only be called by the user that mounted the file system
+and (if the file system was mounted with :cmdopt:`--allow-other` or
+:cmdopt:`--allow-root`) the root user. This limitation might be
+removed in the future (see `issue 155
+<http://code.google.com/p/s3ql/issues/detail?id=155>`_).
+
+
+Options
+=======
+
+The |command| command accepts the following options:
+
+.. pipeinclude:: ../../bin/s3qlstat --help
+ :start-after: show this help message and exit
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`s3qlstat`
+
diff --git a/rst/man/umount.rst b/rst/man/umount.rst
new file mode 100644
index 0000000..e1ef7f0
--- /dev/null
+++ b/rst/man/umount.rst
@@ -0,0 +1,44 @@
+.. -*- mode: rst -*-
+
+==================================
+The :program:`umount.s3ql` command
+==================================
+
+Synopsis
+========
+
+::
+
+ umount.s3ql [options] <mount point>
+
+
+Description
+===========
+
+.. include:: ../include/about.rst
+
+The |command| command unmounts the S3QL file system mounted in the
+directory *mount point* and blocks until all data has been uploaded to
+the storage backend.
+
+Only the user who mounted the file system with :program:`mount.s3ql`
+is able to unmount it with |command|. If you are root and want to
+unmount an S3QL file system mounted by an ordinary user, you have to
+use the :program:`fusermount -u` or :command:`umount` command instead.
+Note that these commands do not block until all data has been
+uploaded, so if you use them instead of :program:`umount.s3ql` then
+you should manually wait for the :program:`mount.s3ql` process to
+terminate before shutting down the system.
+
+
+Options
+=======
+
+The |command| command accepts the following options.
+
+.. pipeinclude:: ../../bin/umount.s3ql --help
+ :start-after: show this help message and exit
+
+.. include:: ../include/postman.rst
+
+.. |command| replace:: :command:`umount.s3ql`
diff --git a/rst/mkfs.rst b/rst/mkfs.rst
new file mode 100644
index 0000000..0b9fa97
--- /dev/null
+++ b/rst/mkfs.rst
@@ -0,0 +1,20 @@
+.. -*- mode: rst -*-
+
+====================
+File System Creation
+====================
+
+A S3QL file system is created with the `mkfs.s3ql` command. It has the
+following syntax::
+
+ mkfs.s3ql [options] <storage url>
+
+This command accepts the following options:
+
+.. pipeinclude:: ../bin/mkfs.s3ql --help
+ :start-after: show this help message and exit
+
+Unless you have specified the `--plain` option, `mkfs.s3ql` will ask you
+to enter an encryption password. If you do not want to enter this
+password every time that you mount the file system, you can store it
+in the `~/.s3ql/authinfo` file, see :ref:`bucket_pw`.
diff --git a/rst/mount.rst b/rst/mount.rst
new file mode 100644
index 0000000..609c4a4
--- /dev/null
+++ b/rst/mount.rst
@@ -0,0 +1,175 @@
+.. -*- mode: rst -*-
+
+==========
+ Mounting
+==========
+
+
+A S3QL file system is mounted with the `mount.s3ql` command. It has
+the following syntax::
+
+ mount.s3ql [options] <storage url> <mountpoint>
+
+.. NOTE::
+
+ S3QL is not a network file system like `NFS
+ <http://en.wikipedia.org/wiki/Network_File_System_%28protocol%29>`_
+ or `CIFS <http://en.wikipedia.org/wiki/CIFS>`_. It can only be
+ mounted on one computer at a time.
+
+This command accepts the following options:
+
+.. pipeinclude:: ../bin/mount.s3ql --help
+ :start-after: show this help message and exit
+
+.. _bucket_pw:
+
+Storing Encryption Passwords
+============================
+
+If you are trying to mount an encrypted bucket, `mount.s3ql` will first
+try to read the password from the `.s3ql/authinfo` file (the same file
+that is used to read the backend authentication data) and prompt the
+user to enter the password only if this fails.
+
+The `authinfo` entries to specify bucket passwords are of the form ::
+
+ storage-url <storage-url> password <password>
+
+So to always use the password `topsecret` when mounting `s3://joes_bucket`,
+the entry would be ::
+
+ storage-url s3://joes_bucket password topsecret
+
+.. NOTE::
+
+ If you are using the local backend, the storage url will
+ always be converted to an absolute path. So if you are in the
+ `/home/john` directory and try to mount `local://bucket`, the matching
+ `authinfo` entry has to have a storage url of
+ `local:///home/john/bucket`.
+
+
+Compression Algorithms
+======================
+
+S3QL supports three compression algorithms, LZMA, Bzip2 and zlib (with
+LZMA being the default). The compression algorithm can be specified
+freely whenever the file system is mounted, since it affects only the
+compression of new data blocks.
+
+Roughly speaking, LZMA is slower but achieves better compression
+ratios than Bzip2, while Bzip2 in turn is slower but achieves better
+compression ratios than zlib.
+
+For maximum file system performance, the best algorithm therefore
+depends on your network connection speed: the compression algorithm
+should be fast enough to saturate your network connection.
+
+To find the optimal algorithm for your system, S3QL ships with a
+program called `benchmark.py` in the `contrib` directory. You should
+run this program on a file that has a size that is roughly equal to
+the block size of your file system and has similar contents. It will
+then determine the compression speeds for the different algorithms and
+the upload speeds for the specified backend and recommend the best
+algorithm that is fast enough to saturate your network connection.
+
+Obviously you should make sure that there is little other system load
+when you run `benchmark.py` (i.e., don't compile software or encode
+videos at the same time).
+
+
+Parallel Compression
+====================
+
+If you are running S3QL on a system with multiple cores, you might
+want to set ``--compression-threads`` to a value bigger than one. This
+will instruct S3QL to compress and encrypt several blocks at the same
+time.
+
+If you want to do this in combination with using the LZMA compression
+algorithm, you should keep an eye on memory usage though. Every
+LZMA compression threads requires about 200 MB of RAM.
+
+
+.. NOTE::
+
+ To determine the optimal compression algorithm for your network
+ connection when using multiple threads, you can pass the
+ ``--compression-threads`` option to `contrib/benchmark.py`.
+
+
+Notes about Caching
+===================
+
+S3QL maintains a local cache of the file system data to speed up
+access. The cache is block based, so it is possible that only parts of
+a file are in the cache.
+
+Maximum Number of Cache Entries
+-------------------------------
+
+The maximum size of the cache can be configured with the `--cachesize`
+option. In addition to that, the maximum number of objects in the
+cache is limited by the `--max-cache-entries` option, so it is
+possible that the cache does not grow up to the maximum cache size
+because the maximum number of cache elements has been reached. The
+reason for this limit is that each cache entry requires one open
+file descriptor, and Linux distributions usually limit the total
+number of file descriptors per process to about a thousand.
+
+If you specify a value for `--max-cache-entries`, you should therefore
+make sure to also configure your system to increase the maximum number
+of open file handles. This can be done temporarily with the `umask -n`
+command. The method to permanently change this limit system-wide
+depends on your distribution.
+
+
+
+Cache Flushing and Expiration
+-----------------------------
+
+S3QL flushes changed blocks in the cache to the backend whenever a block
+has not been accessed for at least 10 seconds. Note that when a block is
+flushed, it still remains in the cache.
+
+Cache expiration (i.e., removal of blocks from the cache) is only done
+when the maximum cache size is reached. S3QL always expires the least
+recently used blocks first.
+
+
+Automatic Mounting
+==================
+
+If you want to mount and umount an S3QL file system automatically at
+system startup and shutdown, you should do so with one dedicated S3QL
+init script for each S3QL file system.
+
+If your system is using upstart, an appropriate job can be defined
+as follows (and should be placed in `/etc/init/`):
+
+.. literalinclude:: ../contrib/s3ql.conf
+ :linenos:
+ :lines: 5-
+
+.. NOTE::
+
+ In principle, it is also possible to automatically mount an S3QL
+ file system with an appropriate entry in `/etc/fstab`. However,
+ this is not recommended for several reasons:
+
+ * file systems mounted in `/etc/fstab` will be unmounted with the
+ `umount` command, so your system will not wait until all data has
+ been uploaded but shutdown (or restart) immediately (this is a
+ FUSE limitation, see `issue 159
+ <http://code.google.com/p/s3ql/issues/detail?id=159>`_).
+
+ * There is no way to tell the system that mounting S3QL requires a
+ Python interpreter to be available, so it may attempt to run
+ `mount.s3ql` before it has mounted the volume containing the
+ Python interpreter.
+
+ * There is no standard way to tell the system that internet
+ connection has to be up before the S3QL file system can be
+ mounted.
+
diff --git a/rst/resources.rst b/rst/resources.rst
new file mode 100644
index 0000000..2c435bf
--- /dev/null
+++ b/rst/resources.rst
@@ -0,0 +1,22 @@
+.. -*- mode: rst -*-
+
+.. _resources:
+
+================================
+Further Resources / Getting Help
+================================
+
+If you have questions or problems with S3QL that you weren't able to
+resolve with this manual, you might want to consider the following other resources:
+
+* The `S3QL Wiki <http://code.google.com/p/s3ql/w/list>`_
+
+* The `S3QL FAQ <http://code.google.com/p/s3ql/wiki/FAQ>`_
+
+* The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You
+ can subscribe by sending a mail to
+ `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_.
+
+
+Please report any bugs you may encounter in the `Issue Tracker
+<http://code.google.com/p/s3ql/issues/list>`_.
diff --git a/rst/special.rst b/rst/special.rst
new file mode 100644
index 0000000..c5acade
--- /dev/null
+++ b/rst/special.rst
@@ -0,0 +1,114 @@
+.. -*- mode: rst -*-
+
+
+========================
+ Advanced S3QL Features
+========================
+
+.. _s3qlcp:
+
+Snapshotting and Copy-on-Write
+==============================
+
+The command `s3qlcp` can be used to duplicate a directory tree without
+physically copying the file contents. This is possible due to the data
+de-duplication feature of S3QL.
+
+The syntax of `s3qlcp` is::
+
+ s3qlcp [options] <src> <target>
+
+This will replicate the contents of the directory `<src>` in the
+directory `<target>`. `<src>` has to be an existing directory and
+`<target>` must not exist. Moreover, both directories have to be
+within the same S3QL file system.
+
+.. include:: man/cp.rst
+ :start-after: begin_main_content
+ :end-before: end_main_content
+
+.. _s3qlstat:
+
+Getting Statistics
+==================
+
+You can get more information about a mounted S3QL file system with the
+`s3qlstat` command. It has the following syntax::
+
+ s3qlstat [options] <mountpoint>
+
+Probably the most interesting numbers are the total size of your data,
+the total size after duplication, and the final size after
+de-duplication and compression.
+
+`s3qlstat` can only be called by the user that mounted the file system
+and (if the file system was mounted with `--allow-other` or `--allow-root`)
+the root user. This limitation might be removed in the future (see `issue 155
+<http://code.google.com/p/s3ql/issues/detail?id=155>`_).
+
+For a full list of available options, run `s3qlstat --help`.
+
+.. _s3qllock:
+
+Immutable Trees
+===============
+
+The command :program:`s3qllock` can be used to make a directory tree
+immutable. Immutable trees can no longer be changed in any way
+whatsoever. You can not add new files or directories and you can not
+change or delete existing files and directories. The only way to get
+rid of an immutable tree is to use the :program:`s3qlrm` command (see
+below).
+
+For example, to make the directory tree beneath the directory
+``2010-04-21`` immutable, execute ::
+
+ s3qllock 2010-04-21
+
+.. include:: man/lock.rst
+ :start-after: begin_main_content
+ :end-before: end_main_content
+
+
+.. _s3qlrm:
+
+Fast Recursive Removal
+======================
+
+The ``s3qlrm`` command can be used to recursively delete files and
+directories on an S3QL file system. Although ``s3qlrm`` is faster than
+using e.g. ``rm -r``, the main reason for its existence is that it
+allows you to delete immutable trees as well. The syntax is rather
+simple::
+
+ s3qlrm <directory>
+
+Be warned that there is no additional confirmation. The directory will
+be removed entirely and immediately.
+
+.. _s3qlctrl:
+
+Runtime Configuration
+=====================
+
+
+The `s3qlctrl` can be used to control a mounted S3QL file system. Its
+syntax is ::
+
+ s3qlctrl [options] <action> <mountpoint> ...
+
+`<mountpoint>` must be the location of a mounted S3QL file system.
+For a list of valid options, run `s3qlctrl --help`. `<action>`
+may be either of:
+
+ :flushcache:
+ Flush file system cache. The command blocks until the cache has
+ been flushed.
+ :log:
+ Change log level.
+ :cachesize:
+ Change file system cache size.
+ :upload-meta:
+ Trigger a metadata upload.
+
+
diff --git a/rst/tips.rst b/rst/tips.rst
new file mode 100644
index 0000000..b857f75
--- /dev/null
+++ b/rst/tips.rst
@@ -0,0 +1,81 @@
+.. -*- mode: rst -*-
+
+=============
+Tips & Tricks
+=============
+
+
+.. _copy_performance:
+
+
+Permanently mounted backup file system
+======================================
+
+If you use S3QL as a backup file system, it can be useful to mount the
+file system permanently (rather than just mounting it for a backup and
+unmounting it afterwards). Especially if your file system becomes
+large, this saves you long mount- and unmount times if you only want
+to restore a single file.
+
+If you decide to do so, you should make sure to
+
+* Use :ref:`s3qllock <s3qllock>` to ensure that backups are immutable
+ after they have been made.
+
+* Call :ref:`s3qlctrl upload-meta <s3qlctrl>` right after a every
+ backup to make sure that the newest metadata is stored safely (if
+ you do backups often enough, this may also allow you to set the
+ :cmdopt:`--metadata-upload-interval` option of :program:`mount.s3ql`
+ to zero).
+
+
+
+Improving copy performance
+==========================
+
+If you want to copy a lot of smaller files *from* an S3QL file system
+(e.g. for a system restore) you will probably notice that the
+performance is rather bad.
+
+The reason for this is intrinsic to the way S3QL works. Whenever you
+read a file, S3QL first has to retrieve this file over the network
+from the storage backend. This takes a minimum amount of time (the
+network latency), no matter how big or small the file is. So when you
+copy lots of small files, 99% of the time is actually spend waiting
+for network data.
+
+Theoretically, this problem is easy to solve: you just have to copy
+several files at the same time. In practice, however, almost all unix
+utilities (``cp``, ``rsync``, ``tar`` and friends) insist on copying
+data one file at a time. This makes a lot of sense when copying data
+on the local hard disk, but in case of S3QL this is really
+unfortunate.
+
+The best workaround that has been found so far is to copy files by
+starting several rsync processes at once and use exclusion rules to
+make sure that they work on different sets of files.
+
+For example, the following script will start 3 rsync instances. The
+first instance handles all filenames starting with a-f, the second the
+filenames from g-l and the third covers the rest. The ``+ */`` rule
+ensures that every instance looks into all directories. ::
+
+ #!/bin/bash
+
+ RSYNC_ARGS="-aHv /mnt/s3ql/ /home/restore/"
+
+ rsync -f "+ */" -f "-! [a-f]*" $RSYNC_ARGS &
+ rsync -f "+ */" -f "-! [g-l]*" $RSYNC_ARGS &
+ rsync -f "+ */" -f "- [a-l]*" $RSYNC_ARGS &
+
+ wait
+
+The optimum number of parallel processes depends on your network
+connection and the size of the files that you want to transfer.
+However, starting about 10 processes seems to be a good compromise
+that increases performance dramatically in almost all situations.
+
+S3QL comes with a script named ``pcp.py`` in the ``contrib`` directory
+that can be used to transfer files in parallel without having to write
+an explicit script first. See the description of :ref:`pcp` for
+details.
diff --git a/rst/umount.rst b/rst/umount.rst
new file mode 100644
index 0000000..dac248e
--- /dev/null
+++ b/rst/umount.rst
@@ -0,0 +1,31 @@
+.. -*- mode: rst -*-
+
+==========
+Unmounting
+==========
+
+To unmount an S3QL file system, use the command::
+
+ umount.s3ql [options] <mountpoint>
+
+This will block until all data has been committed to the storage
+backend.
+
+Only the user who mounted the file system with :command:`mount.s3ql`
+is able to unmount it again. If you are root and want to unmount an
+S3QL file system mounted by an ordinary user, you have to use the
+:command:`fusermount -u` or :command:`umount` command instead. Note
+that these commands do not block until all data has been uploaded, so
+if you use them instead of `umount.s3ql` then you should manually wait
+for the `mount.s3ql` process to terminate before shutting down the
+system.
+
+The :command:`umount.s3ql` command accepts the following options:
+
+.. pipeinclude:: ../bin/umount.s3ql --help
+ :start-after: show this help message and exit
+
+If, for some reason, the `umount.sql` command does not work, the file
+system can also be unmounted with `fusermount -u -z`. Note that this
+command will return immediately and the file system may continue to
+upload data in the background for a while longer.
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..861a9f5
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..285894d
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+'''
+setup.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import sys
+import os
+import subprocess
+import logging
+from glob import glob
+
+# Work around setuptools bug
+# http://bitbucket.org/tarek/distribute/issue/152/
+#pylint: disable=W0611
+import multiprocessing
+
+# Add S3QL sources
+basedir = os.path.abspath(os.path.dirname(sys.argv[0]))
+sys.path.insert(0, os.path.join(basedir, 'src'))
+import s3ql
+
+# Import distribute
+sys.path.insert(0, os.path.join(basedir, 'util'))
+from distribute_setup import use_setuptools
+use_setuptools(version='0.6.14', download_delay=5)
+import setuptools
+import setuptools.command.test as setuptools_test
+
+class build_docs(setuptools.Command):
+ description = 'Build Sphinx documentation'
+ user_options = [
+ ('fresh-env', 'E', 'discard saved environment'),
+ ('all-files', 'a', 'build all files'),
+ ]
+ boolean_options = ['fresh-env', 'all-files']
+
+ def initialize_options(self):
+ self.fresh_env = False
+ self.all_files = False
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ try:
+ from sphinx.application import Sphinx
+ from docutils.utils import SystemMessage
+ except ImportError:
+ raise QuietError('This command requires Sphinx to be installed.')
+
+ dest_dir = os.path.join(basedir, 'doc')
+ src_dir = os.path.join(basedir, 'rst')
+
+ confoverrides = {}
+ confoverrides['version'] = s3ql.VERSION
+ confoverrides['release'] = s3ql.VERSION
+
+ for builder in ('html', 'latex', 'man'):
+ print('Running %s builder...' % builder)
+ self.mkpath(os.path.join(dest_dir, builder))
+ app = Sphinx(srcdir=src_dir, confdir=src_dir,
+ outdir=os.path.join(dest_dir, builder),
+ doctreedir=os.path.join(dest_dir, 'doctrees'),
+ buildername=builder, confoverrides=confoverrides,
+ freshenv=self.fresh_env)
+ self.fresh_env = False
+ self.all_files = False
+
+ try:
+ if self.all_files:
+ app.builder.build_all()
+ else:
+ app.builder.build_update()
+ except SystemMessage as err:
+ print('reST markup error:',
+ err.args[0].encode('ascii', 'backslashreplace'),
+ file=sys.stderr)
+
+ # These shouldn't be installed by default
+ for name in ('expire_backups.1', 'pcp.1'):
+ os.rename(os.path.join(dest_dir, 'man', name),
+ os.path.join(basedir, 'contrib', name))
+
+ print('Running pdflatex...')
+ for _ in range(3):
+ subprocess.check_call(['pdflatex', '-interaction',
+ 'batchmode', 'manual.tex'],
+ cwd=os.path.join(dest_dir, 'latex'),
+ stdout=open('/dev/null', 'wb'))
+ os.rename(os.path.join(dest_dir, 'latex', 'manual.pdf'),
+ os.path.join(dest_dir, 'manual.pdf'))
+
+
+def main():
+
+ with open(os.path.join(basedir, 'rst', 'about.rst'), 'r') as fh:
+ long_desc = fh.read()
+
+ setuptools.setup(
+ name='s3ql',
+ zip_safe=True,
+ version=s3ql.VERSION,
+ description='a full-featured file system for online data storage',
+ long_description=long_desc,
+ author='Nikolaus Rath',
+ author_email='Nikolaus@rath.org',
+ url='http://code.google.com/p/s3ql/',
+ download_url='http://code.google.com/p/s3ql/downloads/list',
+ license='LGPL',
+ classifiers=['Development Status :: 4 - Beta',
+ 'Environment :: No Input/Output (Daemon)',
+ 'Environment :: Console',
+ 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
+ 'Topic :: Internet',
+ 'Operating System :: POSIX',
+ 'Topic :: System :: Archiving'],
+ platforms=[ 'POSIX', 'UNIX', 'Linux' ],
+ keywords=['FUSE', 'backup', 'archival', 'compression', 'encryption',
+ 'deduplication', 'aws', 's3' ],
+ package_dir={'': 'src'},
+ packages=setuptools.find_packages('src'),
+ provides=['s3ql'],
+ data_files = [ ('share/man/man1',
+ [ os.path.join('doc/man/', x) for x
+ in glob(os.path.join(basedir, 'doc', 'man', '*.1')) ]) ],
+ entry_points={ 'console_scripts':
+ [
+ 'mkfs.s3ql = s3ql.cli.mkfs:main',
+ 'fsck.s3ql = s3ql.cli.fsck:main',
+ 'mount.s3ql = s3ql.cli.mount:main',
+ 'umount.s3ql = s3ql.cli.umount:main',
+ 's3qlcp = s3ql.cli.cp:main',
+ 's3qlstat = s3ql.cli.statfs:main',
+ 's3qladm = s3ql.cli.adm:main',
+ 's3qlctrl = s3ql.cli.ctrl:main',
+ 's3qllock = s3ql.cli.lock:main',
+ 's3qlrm = s3ql.cli.remove:main',
+ ]
+ },
+ install_requires=['apsw >= 3.7.0',
+ 'pycryptopp',
+ 'llfuse >= 0.31',
+ 'argparse >= 1.1',
+ 'pyliblzma >= 0.5.3' ],
+ tests_require=['apsw >= 3.7.0', 'unittest2',
+ 'pycryptopp',
+ 'llfuse >= 0.29',
+ 'argparse >= 1.1',
+ 'pyliblzma >= 0.5.3' ],
+ test_suite='tests',
+ cmdclass={'test': test,
+ 'upload_docs': upload_docs,
+ 'build_sphinx': build_docs },
+ command_options = { 'sdist': { 'formats': ('setup.py', 'bztar') } },
+ )
+
+
+class test(setuptools_test.test):
+ # Attributes defined outside init, required by setuptools.
+ # pylint: disable=W0201
+ description = "Run self-tests"
+ user_options = (setuptools_test.test.user_options +
+ [('debug=', None, 'Activate debugging for specified modules '
+ '(separated by commas, specify "all" for all modules)'),
+ ('awskey=', None, 'Specify AWS access key to use, secret key will be asked for. '
+ 'If this option is not specified, tests requiring access '
+ 'to Amazon Web Services will be skipped.')])
+
+
+ def initialize_options(self):
+ setuptools_test.test.initialize_options(self)
+ self.debug = None
+ self.awskey = None
+
+ def finalize_options(self):
+ setuptools_test.test.finalize_options(self)
+ self.test_loader = "ScanningLoader"
+ if self.debug:
+ self.debug = [ x.strip() for x in self.debug.split(',') ]
+
+
+ def run_tests(self):
+
+ # Add test modules
+ sys.path.insert(0, os.path.join(basedir, 'tests'))
+ import unittest2 as unittest
+ import _common
+ from s3ql.common import (setup_excepthook, add_file_logging, add_stdout_logging,
+ LoggerFilter)
+ from getpass import getpass
+
+ # Initialize logging if not yet initialized
+ root_logger = logging.getLogger()
+ if not root_logger.handlers:
+ add_stdout_logging(quiet=True)
+ add_file_logging(os.path.join(basedir, 'setup.log'))
+ setup_excepthook()
+ if self.debug:
+ root_logger.setLevel(logging.DEBUG)
+ if 'all' not in self.debug:
+ root_logger.addFilter(LoggerFilter(self.debug, logging.INFO))
+ else:
+ root_logger.setLevel(logging.INFO)
+ else:
+ root_logger.debug("Logging already initialized.")
+
+ # Init AWS
+ if self.awskey:
+ if sys.stdin.isatty():
+ pw = getpass("Enter AWS password: ")
+ else:
+ pw = sys.stdin.readline().rstrip()
+ _common.aws_credentials = (self.awskey, pw)
+
+ # Define our own test loader to order modules alphabetically
+ from pkg_resources import resource_listdir, resource_exists
+ class ScanningLoader(unittest.TestLoader):
+ # Yes, this is a nasty hack
+ # pylint: disable=W0232,W0221,W0622
+ def loadTestsFromModule(self, module):
+ """Return a suite of all tests cases contained in the given module"""
+ tests = []
+ if module.__name__!='setuptools.tests.doctest': # ugh
+ tests.append(unittest.TestLoader.loadTestsFromModule(self,module))
+ if hasattr(module, "additional_tests"):
+ tests.append(module.additional_tests())
+ if hasattr(module, '__path__'):
+ for file in sorted(resource_listdir(module.__name__, '')):
+ if file.endswith('.py') and file!='__init__.py':
+ submodule = module.__name__+'.'+file[:-3]
+ else:
+ if resource_exists(
+ module.__name__, file+'/__init__.py'
+ ):
+ submodule = module.__name__+'.'+file
+ else:
+ continue
+ tests.append(self.loadTestsFromName(submodule))
+ if len(tests)!=1:
+ return self.suiteClass(tests)
+ else:
+ return tests[0] # don't create a nested suite for only one return
+
+ unittest.main(
+ None, None, [unittest.__file__]+self.test_args,
+ testLoader = ScanningLoader())
+
+
+class upload_docs(setuptools.Command):
+ user_options = []
+ boolean_options = []
+ description = "Upload documentation"
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ subprocess.check_call(['rsync', '-aHv', '--del', os.path.join(basedir, 'doc', 'html') + '/',
+ 'ebox.rath.org:/var/www/s3ql-docs/'])
+ subprocess.check_call(['rsync', '-aHv', '--del', os.path.join(basedir, 'doc', 'manual.pdf'),
+ 'ebox.rath.org:/var/www/s3ql-docs/'])
+
+if __name__ == '__main__':
+ main()
diff --git a/src/s3ql.egg-info/PKG-INFO b/src/s3ql.egg-info/PKG-INFO
new file mode 100644
index 0000000..015c3a4
--- /dev/null
+++ b/src/s3ql.egg-info/PKG-INFO
@@ -0,0 +1,113 @@
+Metadata-Version: 1.1
+Name: s3ql
+Version: 1.0.1
+Summary: a full-featured file system for online data storage
+Home-page: http://code.google.com/p/s3ql/
+Author: Nikolaus Rath
+Author-email: Nikolaus@rath.org
+License: LGPL
+Download-URL: http://code.google.com/p/s3ql/downloads/list
+Description: .. -*- mode: rst -*-
+
+ ============
+ About S3QL
+ ============
+
+ S3QL is a file system that stores all its data online. It supports
+ `Amazon S3 <http://aws.amazon.com/s3 Amazon S3>`_ as well as arbitrary
+ SFTP servers and effectively provides you with a hard disk of dynamic,
+ infinite capacity that can be accessed from any computer with internet
+ access.
+
+ S3QL is providing a standard, full featured UNIX file system that is
+ conceptually indistinguishable from any local file system.
+ Furthermore, S3QL has additional features like compression,
+ encryption, data de-duplication, immutable trees and snapshotting
+ which make it especially suitable for online backup and archival.
+
+ S3QL is designed to favor simplicity and elegance over performance and
+ feature-creep. Care has been taken to make the source code as
+ readable and serviceable as possible. Solid error detection and error
+ handling have been included from the very first line, and S3QL comes
+ with extensive automated test cases for all its components.
+
+ Features
+ ========
+
+
+ * **Transparency.** Conceptually, S3QL is indistinguishable from a
+ local file system. For example, it supports hardlinks, symlinks,
+ ACLs and standard unix permissions, extended attributes and file
+ sizes up to 2 TB.
+
+ * **Dynamic Size.** The size of an S3QL file system grows and shrinks
+ dynamically as required.
+
+ * **Compression.** Before storage, all data may compressed with the
+ LZMA, bzip2 or deflate (gzip) algorithm.
+
+ * **Encryption.** After compression (but before upload), all data can
+ AES encrypted with a 256 bit key. An additional SHA256 HMAC checksum
+ is used to protect the data against manipulation.
+
+ * **Data De-duplication.** If several files have identical contents,
+ the redundant data will be stored only once. This works across all
+ files stored in the file system, and also if only some parts of the
+ files are identical while other parts differ.
+
+ * **Immutable Trees.** Directory trees can be made immutable, so that
+ their contents can no longer be changed in any way whatsoever. This
+ can be used to ensure that backups can not be modified after they
+ have been made.
+
+ * **Copy-on-Write/Snapshotting.** S3QL can replicate entire directory
+ trees without using any additional storage space. Only if one of the
+ copies is modified, the part of the data that has been modified will
+ take up additional storage space. This can be used to create
+ intelligent snapshots that preserve the state of a directory at
+ different points in time using a minimum amount of space.
+
+ * **High Performance independent of network latency.** All operations
+ that do not write or read file contents (like creating directories
+ or moving, renaming, and changing permissions of files and
+ directories) are very fast because they are carried out without any
+ network transactions.
+
+ S3QL achieves this by saving the entire file and directory structure
+ in a database. This database is locally cached and the remote
+ copy updated asynchronously.
+
+ * **Support for low bandwidth connections.** S3QL splits file contents
+ into smaller blocks and caches blocks locally. This minimizes both
+ the number of network transactions required for reading and writing
+ data, and the amount of data that has to be transferred when only
+ parts of a file are read or written.
+
+
+
+ Development Status
+ ==================
+
+ After two years of beta-testing by about 93 users did not reveal any
+ data-critical bugs, S3QL was declared **stable** with the release of
+ version 1.0 on May 13th, 2011. Note that this does not mean that S3QL
+ is bug-free. S3QL still has several known, and probably many more
+ unknown bugs. However, there is a high probability that these bugs
+ will, although being inconvenient, not endanger any stored data.
+
+ Please report any problems on the `mailing list
+ <http://groups.google.com/group/s3ql>`_ or the `issue tracker
+ <http://code.google.com/p/s3ql/issues/list>`_.
+
+Keywords: FUSE,backup,archival,compression,encryption,deduplication,aws,s3
+Platform: POSIX
+Platform: UNIX
+Platform: Linux
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: No Input/Output (Daemon)
+Classifier: Environment :: Console
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Topic :: Internet
+Classifier: Operating System :: POSIX
+Classifier: Topic :: System :: Archiving
+Provides: s3ql
diff --git a/src/s3ql.egg-info/SOURCES.txt b/src/s3ql.egg-info/SOURCES.txt
new file mode 100644
index 0000000..828f8ca
--- /dev/null
+++ b/src/s3ql.egg-info/SOURCES.txt
@@ -0,0 +1,229 @@
+Changes.txt
+INSTALL.txt
+setup.py
+bin/fsck.s3ql
+bin/mkfs.s3ql
+bin/mount.s3ql
+bin/s3qladm
+bin/s3qlcp
+bin/s3qlctrl
+bin/s3qllock
+bin/s3qlrm
+bin/s3qlstat
+bin/umount.s3ql
+contrib/benchmark.py
+contrib/expire_backups.1
+contrib/expire_backups.py
+contrib/make_dummy.py
+contrib/pcp.1
+contrib/pcp.py
+contrib/s3ql.conf
+contrib/s3ql_backup.sh
+doc/manual.pdf
+doc/html/.buildinfo
+doc/html/about.html
+doc/html/adm.html
+doc/html/backends.html
+doc/html/contrib.html
+doc/html/fsck.html
+doc/html/index.html
+doc/html/installation.html
+doc/html/issues.html
+doc/html/mkfs.html
+doc/html/mount.html
+doc/html/objects.inv
+doc/html/resources.html
+doc/html/search.html
+doc/html/searchindex.js
+doc/html/special.html
+doc/html/tips.html
+doc/html/umount.html
+doc/html/_sources/about.txt
+doc/html/_sources/adm.txt
+doc/html/_sources/backends.txt
+doc/html/_sources/contrib.txt
+doc/html/_sources/fsck.txt
+doc/html/_sources/index.txt
+doc/html/_sources/installation.txt
+doc/html/_sources/issues.txt
+doc/html/_sources/mkfs.txt
+doc/html/_sources/mount.txt
+doc/html/_sources/resources.txt
+doc/html/_sources/special.txt
+doc/html/_sources/tips.txt
+doc/html/_sources/umount.txt
+doc/html/_sources/man/adm.txt
+doc/html/_sources/man/cp.txt
+doc/html/_sources/man/ctrl.txt
+doc/html/_sources/man/expire_backups.txt
+doc/html/_sources/man/fsck.txt
+doc/html/_sources/man/index.txt
+doc/html/_sources/man/lock.txt
+doc/html/_sources/man/mkfs.txt
+doc/html/_sources/man/mount.txt
+doc/html/_sources/man/pcp.txt
+doc/html/_sources/man/rm.txt
+doc/html/_sources/man/stat.txt
+doc/html/_sources/man/umount.txt
+doc/html/_static/ajax-loader.gif
+doc/html/_static/basic.css
+doc/html/_static/comment-bright.png
+doc/html/_static/comment-close.png
+doc/html/_static/comment.png
+doc/html/_static/contents.png
+doc/html/_static/doctools.js
+doc/html/_static/down-pressed.png
+doc/html/_static/down.png
+doc/html/_static/file.png
+doc/html/_static/jquery.js
+doc/html/_static/minus.png
+doc/html/_static/navigation.png
+doc/html/_static/plus.png
+doc/html/_static/pygments.css
+doc/html/_static/searchtools.js
+doc/html/_static/sphinxdoc.css
+doc/html/_static/underscore.js
+doc/html/_static/up-pressed.png
+doc/html/_static/up.png
+doc/html/_static/websupport.js
+doc/html/man/adm.html
+doc/html/man/cp.html
+doc/html/man/ctrl.html
+doc/html/man/expire_backups.html
+doc/html/man/fsck.html
+doc/html/man/index.html
+doc/html/man/lock.html
+doc/html/man/mkfs.html
+doc/html/man/mount.html
+doc/html/man/pcp.html
+doc/html/man/rm.html
+doc/html/man/stat.html
+doc/html/man/umount.html
+doc/latex/Makefile
+doc/latex/fncychap.sty
+doc/latex/manual.aux
+doc/latex/manual.idx
+doc/latex/manual.log
+doc/latex/manual.out
+doc/latex/manual.tex
+doc/latex/manual.toc
+doc/latex/python.ist
+doc/latex/sphinx.sty
+doc/latex/sphinxhowto.cls
+doc/latex/sphinxmanual.cls
+doc/latex/tabulary.sty
+doc/man/fsck.s3ql.1
+doc/man/mkfs.s3ql.1
+doc/man/mount.s3ql.1
+doc/man/s3qladm.1
+doc/man/s3qlcp.1
+doc/man/s3qlctrl.1
+doc/man/s3qllock.1
+doc/man/s3qlrm.1
+doc/man/s3qlstat.1
+doc/man/umount.s3ql.1
+rst/about.rst
+rst/adm.rst
+rst/backends.rst
+rst/conf.py
+rst/contrib.rst
+rst/fsck.rst
+rst/index.rst
+rst/installation.rst
+rst/issues.rst
+rst/mkfs.rst
+rst/mount.rst
+rst/resources.rst
+rst/special.rst
+rst/tips.rst
+rst/umount.rst
+rst/_static/sphinxdoc.css
+rst/_templates/layout.html
+rst/include/about.rst
+rst/include/backends.rst
+rst/include/postman.rst
+rst/man/adm.rst
+rst/man/cp.rst
+rst/man/ctrl.rst
+rst/man/expire_backups.rst
+rst/man/fsck.rst
+rst/man/index.rst
+rst/man/lock.rst
+rst/man/mkfs.rst
+rst/man/mount.rst
+rst/man/pcp.rst
+rst/man/rm.rst
+rst/man/stat.rst
+rst/man/umount.rst
+src/s3ql/__init__.py
+src/s3ql/block_cache.py
+src/s3ql/common.py
+src/s3ql/daemonize.py
+src/s3ql/database.py
+src/s3ql/fs.py
+src/s3ql/fsck.py
+src/s3ql/inode_cache.py
+src/s3ql/multi_lock.py
+src/s3ql/ordered_dict.py
+src/s3ql/parse_args.py
+src/s3ql/thread_group.py
+src/s3ql/upload_manager.py
+src/s3ql.egg-info/PKG-INFO
+src/s3ql.egg-info/SOURCES.txt
+src/s3ql.egg-info/dependency_links.txt
+src/s3ql.egg-info/entry_points.txt
+src/s3ql.egg-info/requires.txt
+src/s3ql.egg-info/top_level.txt
+src/s3ql.egg-info/zip-safe
+src/s3ql/backends/__init__.py
+src/s3ql/backends/common.py
+src/s3ql/backends/ftp.py
+src/s3ql/backends/ftplib.py
+src/s3ql/backends/local.py
+src/s3ql/backends/s3.py
+src/s3ql/backends/sftp.py
+src/s3ql/backends/boto/__init__.py
+src/s3ql/backends/boto/connection.py
+src/s3ql/backends/boto/exception.py
+src/s3ql/backends/boto/handler.py
+src/s3ql/backends/boto/resultset.py
+src/s3ql/backends/boto/storage_uri.py
+src/s3ql/backends/boto/utils.py
+src/s3ql/backends/boto/pyami/__init__.py
+src/s3ql/backends/boto/pyami/config.py
+src/s3ql/backends/boto/s3/__init__.py
+src/s3ql/backends/boto/s3/acl.py
+src/s3ql/backends/boto/s3/bucket.py
+src/s3ql/backends/boto/s3/bucketlistresultset.py
+src/s3ql/backends/boto/s3/connection.py
+src/s3ql/backends/boto/s3/key.py
+src/s3ql/backends/boto/s3/prefix.py
+src/s3ql/backends/boto/s3/user.py
+src/s3ql/cli/__init__.py
+src/s3ql/cli/adm.py
+src/s3ql/cli/cp.py
+src/s3ql/cli/ctrl.py
+src/s3ql/cli/fsck.py
+src/s3ql/cli/lock.py
+src/s3ql/cli/mkfs.py
+src/s3ql/cli/mount.py
+src/s3ql/cli/remove.py
+src/s3ql/cli/statfs.py
+src/s3ql/cli/umount.py
+tests/__init__.py
+tests/_common.py
+tests/data.tar.bz2
+tests/t1_backends.py
+tests/t1_multi_lock.py
+tests/t1_ordered_dict.py
+tests/t2_block_cache.py
+tests/t3_fs_api.py
+tests/t3_fsck.py
+tests/t3_inode_cache.py
+tests/t4_adm.py
+tests/t4_fuse.py
+tests/t5_cli.py
+tests/t5_cp.py
+util/cmdline_lexer.py
+util/distribute_setup.py
+util/sphinx_pipeinclude.py \ No newline at end of file
diff --git a/src/s3ql.egg-info/dependency_links.txt b/src/s3ql.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/src/s3ql.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/src/s3ql.egg-info/entry_points.txt b/src/s3ql.egg-info/entry_points.txt
new file mode 100644
index 0000000..1d80f48
--- /dev/null
+++ b/src/s3ql.egg-info/entry_points.txt
@@ -0,0 +1,12 @@
+[console_scripts]
+fsck.s3ql = s3ql.cli.fsck:main
+s3qllock = s3ql.cli.lock:main
+s3qladm = s3ql.cli.adm:main
+s3qlctrl = s3ql.cli.ctrl:main
+umount.s3ql = s3ql.cli.umount:main
+s3qlcp = s3ql.cli.cp:main
+s3qlrm = s3ql.cli.remove:main
+mount.s3ql = s3ql.cli.mount:main
+s3qlstat = s3ql.cli.statfs:main
+mkfs.s3ql = s3ql.cli.mkfs:main
+
diff --git a/src/s3ql.egg-info/requires.txt b/src/s3ql.egg-info/requires.txt
new file mode 100644
index 0000000..be48a76
--- /dev/null
+++ b/src/s3ql.egg-info/requires.txt
@@ -0,0 +1,5 @@
+apsw >= 3.7.0
+pycryptopp
+llfuse >= 0.31
+argparse >= 1.1
+pyliblzma >= 0.5.3 \ No newline at end of file
diff --git a/src/s3ql.egg-info/top_level.txt b/src/s3ql.egg-info/top_level.txt
new file mode 100644
index 0000000..9d43dc9
--- /dev/null
+++ b/src/s3ql.egg-info/top_level.txt
@@ -0,0 +1 @@
+s3ql
diff --git a/src/s3ql.egg-info/zip-safe b/src/s3ql.egg-info/zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/src/s3ql.egg-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/src/s3ql/__init__.py b/src/s3ql/__init__.py
new file mode 100644
index 0000000..8efcd98
--- /dev/null
+++ b/src/s3ql/__init__.py
@@ -0,0 +1,16 @@
+'''
+__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+__all__ = [ 'backends', 'cli', 'parse_args', 'block_cache', "common", 'daemonize',
+ 'database', 'fs', 'fsck', 'multi_lock', 'ordered_dict', 'thread_group',
+ 'upload_manager', 'VERSION', 'CURRENT_FS_REV' ]
+
+VERSION = '1.0.1'
+CURRENT_FS_REV = 11
diff --git a/src/s3ql/backends/__init__.py b/src/s3ql/backends/__init__.py
new file mode 100644
index 0000000..eb8a8da
--- /dev/null
+++ b/src/s3ql/backends/__init__.py
@@ -0,0 +1,11 @@
+'''
+__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+__all__ = [ 'common', 'ftp', 'ftplib', 'local', 's3', 'sftp', 'boto' ]
diff --git a/src/s3ql/backends/boto/__init__.py b/src/s3ql/backends/boto/__init__.py
new file mode 100644
index 0000000..244450b
--- /dev/null
+++ b/src/s3ql/backends/boto/__init__.py
@@ -0,0 +1,358 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+from .pyami.config import Config, BotoConfigLocations
+from .storage_uri import BucketStorageUri, FileStorageUri
+import os, re, sys
+import logging
+import logging.config
+from .exception import InvalidUriError
+
+Version = '1.9b'
+UserAgent = 'Boto/%s (%s)' % (Version, sys.platform)
+config = Config()
+
+def init_logging():
+ pass
+
+class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+log = logging.getLogger('boto')
+log.addHandler(NullHandler())
+
+# convenience function to set logging to a particular file
+def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
+ global log
+ if not format_string:
+ format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
+ logger = logging.getLogger(name)
+ logger.setLevel(level)
+ fh = logging.FileHandler(filepath)
+ fh.setLevel(level)
+ formatter = logging.Formatter(format_string)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+ log = logger
+
+def set_stream_logger(name, level=logging.DEBUG, format_string=None):
+ global log
+ if not format_string:
+ format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
+ logger = logging.getLogger(name)
+ logger.setLevel(level)
+ fh = logging.StreamHandler()
+ fh.setLevel(level)
+ formatter = logging.Formatter(format_string)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+ log = logger
+
+def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.sqs.connection.SQSConnection`
+ :return: A connection to Amazon's SQS
+ """
+ from boto.sqs.connection import SQSConnection
+ return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.s3.connection.S3Connection`
+ :return: A connection to Amazon's S3
+ """
+ from boto.s3.connection import S3Connection
+ return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
+ """
+ @type gs_access_key_id: string
+ @param gs_access_key_id: Your Google Storage Access Key ID
+
+ @type gs_secret_access_key: string
+ @param gs_secret_access_key: Your Google Storage Secret Access Key
+
+ @rtype: L{GSConnection<boto.gs.connection.GSConnection>}
+ @return: A connection to Google's Storage service
+ """
+ from boto.gs.connection import GSConnection
+ return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
+
+def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.connection.EC2Connection`
+ :return: A connection to Amazon's EC2
+ """
+ from boto.ec2.connection import EC2Connection
+ return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.elb.ELBConnection`
+ :return: A connection to Amazon's Load Balancing Service
+ """
+ from boto.ec2.elb import ELBConnection
+ return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
+ :return: A connection to Amazon's Auto Scaling Service
+ """
+ from boto.ec2.autoscale import AutoScaleConnection
+ return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection`
+ :return: A connection to Amazon's EC2 Monitoring service
+ """
+ from boto.ec2.cloudwatch import CloudWatchConnection
+ return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.sdb.connection.SDBConnection`
+ :return: A connection to Amazon's SDB
+ """
+ from boto.sdb.connection import SDBConnection
+ return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.fps.connection.FPSConnection`
+ :return: A connection to FPS
+ """
+ from boto.fps.connection import FPSConnection
+ return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.fps.connection.FPSConnection`
+ :return: A connection to FPS
+ """
+ from boto.cloudfront import CloudFrontConnection
+ return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.vpc.VPCConnection`
+ :return: A connection to VPC
+ """
+ from boto.vpc import VPCConnection
+ return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.rds.RDSConnection`
+ :return: A connection to RDS
+ """
+ from boto.rds import RDSConnection
+ return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.emr.EmrConnection`
+ :return: A connection to Elastic mapreduce
+ """
+ from boto.emr import EmrConnection
+ return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.sns.SNSConnection`
+ :return: A connection to Amazon's SNS
+ """
+ from boto.sns import SNSConnection
+ return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+
+def check_extensions(module_name, module_path):
+ """
+ This function checks for extensions to boto modules. It should be called in the
+ __init__.py file of all boto modules. See:
+ http://code.google.com/p/boto/wiki/ExtendModules
+
+ for details.
+ """
+ option_name = '%s_extend' % module_name
+ version = config.get('Boto', option_name, None)
+ if version:
+ dirname = module_path[0]
+ path = os.path.join(dirname, version)
+ if os.path.isdir(path):
+ log.info('extending module %s with: %s' % (module_name, path))
+ module_path.insert(0, path)
+
+_aws_cache = {}
+
+def _get_aws_conn(service):
+ global _aws_cache
+ conn = _aws_cache.get(service)
+ if not conn:
+ meth = getattr(sys.modules[__name__], 'connect_'+service)
+ conn = meth()
+ _aws_cache[service] = conn
+ return conn
+
+def lookup(service, name):
+ global _aws_cache
+ conn = _get_aws_conn(service)
+ obj = _aws_cache.get('.'.join((service,name)), None)
+ if not obj:
+ obj = conn.lookup(name)
+ _aws_cache['.'.join((service,name))] = obj
+ return obj
+
+def storage_uri(uri_str, default_provider='file', debug=False):
+ """Instantiate a StorageUri from a URI string.
+
+ :type uri_str: string
+ :param uri_str: URI naming bucket + optional object.
+ :type default_provider: string
+ :param default_provider: default provider for provider-less URIs.
+
+ :rtype: :class:`boto.StorageUri` subclass
+ :return: StorageUri subclass for given URI.
+
+ uri_str must be one of the following formats:
+ gs://bucket/name
+ s3://bucket/name
+ gs://bucket
+ s3://bucket
+ filename
+ The last example uses the default provider ('file', unless overridden)
+ """
+
+ # Manually parse URI components instead of using urlparse.urlparse because
+ # what we're calling URIs don't really fit the standard syntax for URIs
+ # (the latter includes an optional host/net location part).
+ end_provider_idx = uri_str.find('://')
+ if end_provider_idx == -1:
+ provider = default_provider.lower()
+ path = uri_str
+ else:
+ provider = uri_str[0:end_provider_idx].lower()
+ path = uri_str[end_provider_idx + 3:]
+
+ if provider not in ['file', 's3', 'gs']:
+ raise InvalidUriError('Unrecognized provider "%s"' % provider)
+ if provider == 'file':
+ # For file URIs we have no bucket name, and use the complete path
+ # (minus 'file://') as the object name.
+ return FileStorageUri(path, debug)
+ else:
+ path_parts = path.split('/', 1)
+ bucket_name = path_parts[0]
+ # Ensure the bucket name is valid, to avoid possibly confusing other
+ # parts of the code. (For example if we didn't catch bucket names
+ # containing ':', when a user tried to connect to the server with that
+ # name they might get a confusing error about non-integer port numbers.)
+ if (bucket_name and
+ not re.match('^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$', bucket_name)):
+ raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
+ object_name = ''
+ if len(path_parts) > 1:
+ object_name = path_parts[1]
+ return BucketStorageUri(provider, bucket_name, object_name, debug)
diff --git a/src/s3ql/backends/boto/connection.py b/src/s3ql/backends/boto/connection.py
new file mode 100644
index 0000000..7c225c7
--- /dev/null
+++ b/src/s3ql/backends/boto/connection.py
@@ -0,0 +1,683 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2008 rPath, Inc.
+# Copyright (c) 2009 The Echo Nest Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#
+# Parts of this code were copied or derived from sample code supplied by AWS.
+# The following notice applies to that code.
+#
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006 Amazon Digital Services, Inc. or its
+# affiliates.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+"""
+Handles basic connections to AWS
+"""
+
+import base64
+import hmac
+import httplib
+import socket, errno
+import re
+import sys
+import time
+import urllib, urlparse
+import os
+import xml.sax
+import Queue
+from .. import boto
+from .exception import BotoClientError, BotoServerError
+from .resultset import ResultSet
+from . import utils
+from . import config, UserAgent, handler
+
+#
+# the following is necessary because of the incompatibilities
+# between Python 2.4, 2.5, and 2.6 as well as the fact that some
+# people running 2.4 have installed hashlib as a separate module
+# this fix was provided by boto user mccormix.
+# see: http://code.google.com/p/boto/issues/detail?id=172
+# for more details.
+#
+try:
+ from hashlib import sha1 as sha
+ from hashlib import sha256 as sha256
+
+ if sys.version[:3] == "2.4":
+ # we are using an hmac that expects a .new() method.
+ class Faker:
+ def __init__(self, which):
+ self.which = which
+ self.digest_size = self.which().digest_size
+
+ def new(self, *args, **kwargs):
+ return self.which(*args, **kwargs)
+
+ sha = Faker(sha)
+ sha256 = Faker(sha256)
+
+except ImportError:
+ import sha
+ sha256 = None
+
+PORTS_BY_SECURITY = { True: 443, False: 80 }
+
+class ConnectionPool:
+ def __init__(self, hosts, connections_per_host):
+ self._hosts = boto.utils.LRUCache(hosts)
+ self.connections_per_host = connections_per_host
+
+ def __getitem__(self, key):
+ if key not in self._hosts:
+ self._hosts[key] = Queue.Queue(self.connections_per_host)
+ return self._hosts[key]
+
+ def __repr__(self):
+ return 'ConnectionPool:%s' % ','.join(self._hosts._dict.keys())
+
+class ProviderCredentials(object):
+
+ ProviderCredentialMap = {
+ 'aws' : ('aws_access_key_id', 'aws_secret_access_key'),
+ 'google' : ('gs_access_key_id', 'gs_secret_access_key'),
+ }
+
+ def __init__(self, provider, access_key=None, secret_key=None):
+ self.provider = provider
+ self.access_key = None
+ self.secret_key = None
+ provider_map = self.ProviderCredentialMap[self.provider]
+ access_key_name, secret_key_name = self.ProviderCredentialMap[provider]
+ if access_key:
+ self.access_key = access_key
+ elif os.environ.has_key(access_key_name.upper()):
+ self.access_key = os.environ[access_key_name.upper()]
+ elif config.has_option('Credentials', access_key_name):
+ self.access_key = config.get('Credentials', access_key_name)
+
+ if secret_key:
+ self.secret_key = secret_key
+ elif os.environ.has_key(secret_key_name.upper()):
+ self.secret_key = os.environ[secret_key_name.upper()]
+ elif config.has_option('Credentials', secret_key_name):
+ self.secret_key = config.get('Credentials', secret_key_name)
+
+class AWSAuthConnection(object):
+
+ def __init__(self, host, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, path='/', provider='aws'):
+ """
+ :type host: string
+ :param host: The host to make the connection to
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: AWS Access Key ID (provided by Amazon)
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Secret Access Key (provided by Amazon)
+
+ :type is_secure: boolean
+ :param is_secure: Whether the connection is over SSL
+
+ :type https_connection_factory: list or tuple
+ :param https_connection_factory: A pair of an HTTP connection
+ factory and the exceptions to catch.
+ The factory should have a similar
+ interface to L{httplib.HTTPSConnection}.
+
+ :type proxy:
+ :param proxy:
+
+ :type proxy_port: int
+ :param proxy_port: The port to use when connecting over a proxy
+
+ :type proxy_user: string
+ :param proxy_user: The username to connect with on the proxy
+
+ :type proxy_pass: string
+ :param proxy_pass: The password to use when connection over a proxy.
+
+ :type port: integer
+ :param port: The port to use to connect
+ """
+
+ self.num_retries = 5
+ self.is_secure = is_secure
+ self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
+ # define exceptions from httplib that we want to catch and retry
+ self.http_exceptions = (httplib.HTTPException, socket.error, socket.gaierror)
+ # define values in socket exceptions we don't want to catch
+ self.socket_exception_values = (errno.EINTR,)
+ if https_connection_factory is not None:
+ self.https_connection_factory = https_connection_factory[0]
+ self.http_exceptions += https_connection_factory[1]
+ else:
+ self.https_connection_factory = None
+ if (is_secure):
+ self.protocol = 'https'
+ else:
+ self.protocol = 'http'
+ self.host = host
+ self.path = path
+ if debug:
+ self.debug = debug
+ else:
+ self.debug = config.getint('Boto', 'debug', debug)
+ if port:
+ self.port = port
+ else:
+ self.port = PORTS_BY_SECURITY[is_secure]
+
+ self.provider_credentials = ProviderCredentials(provider,
+ aws_access_key_id,
+ aws_secret_access_key)
+
+ # initialize an HMAC for signatures, make copies with each request
+ self.hmac = hmac.new(self.aws_secret_access_key, digestmod=sha)
+ if sha256:
+ self.hmac_256 = hmac.new(self.aws_secret_access_key, digestmod=sha256)
+ else:
+ self.hmac_256 = None
+
+ # cache up to 20 connections per host, up to 20 hosts
+ self._pool = ConnectionPool(20, 20)
+ self._connection = (self.server_name(), self.is_secure)
+ self._last_rs = None
+
+ def __repr__(self):
+ return '%s:%s' % (self.__class__.__name__, self.host)
+
+ def _cached_name(self, host, is_secure):
+ if host is None:
+ host = self.server_name()
+ cached_name = is_secure and 'https://' or 'http://'
+ cached_name += host
+ return cached_name
+
+ def connection(self):
+ return self.get_http_connection(*self._connection)
+ connection = property(connection)
+
+ def aws_access_key_id(self):
+ return self.provider_credentials.access_key
+ aws_access_key_id = property(aws_access_key_id)
+ gs_access_key_id = aws_access_key_id
+ access_key = aws_access_key_id
+
+ def aws_secret_access_key(self):
+ return self.provider_credentials.secret_key
+ aws_secret_access_key = property(aws_secret_access_key)
+ gs_secret_access_key = aws_secret_access_key
+ secret_key = aws_secret_access_key
+
+ def get_path(self, path='/'):
+ pos = path.find('?')
+ if pos >= 0:
+ params = path[pos:]
+ path = path[:pos]
+ else:
+ params = None
+ if path[-1] == '/':
+ need_trailing = True
+ else:
+ need_trailing = False
+ path_elements = self.path.split('/')
+ path_elements.extend(path.split('/'))
+ path_elements = [p for p in path_elements if p]
+ path = '/' + '/'.join(path_elements)
+ if path[-1] != '/' and need_trailing:
+ path += '/'
+ if params:
+ path = path + params
+ return path
+
+ def server_name(self, port=None):
+ if not port:
+ port = self.port
+ if port == 80:
+ signature_host = self.host
+ else:
+ # This unfortunate little hack can be attributed to
+ # a difference in the 2.6 version of httplib. In old
+ # versions, it would append ":443" to the hostname sent
+ # in the Host header and so we needed to make sure we
+ # did the same when calculating the V2 signature. In 2.6
+ # it no longer does that. Hence, this kludge.
+ if sys.version[:3] == "2.6" and port == 443:
+ signature_host = self.host
+ else:
+ signature_host = '%s:%d' % (self.host, port)
+ return signature_host
+
+ def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
+ self.proxy = proxy
+ self.proxy_port = proxy_port
+ self.proxy_user = proxy_user
+ self.proxy_pass = proxy_pass
+ if os.environ.has_key('http_proxy') and not self.proxy:
+ pattern = re.compile(
+ '(?:http://)?' \
+ '(?:(?P<user>\w+):(?P<pass>.*)@)?' \
+ '(?P<host>[\w\-\.]+)' \
+ '(?::(?P<port>\d+))?'
+ )
+ match = pattern.match(os.environ['http_proxy'])
+ if match:
+ self.proxy = match.group('host')
+ self.proxy_port = match.group('port')
+ self.proxy_user = match.group('user')
+ self.proxy_pass = match.group('pass')
+ else:
+ if not self.proxy:
+ self.proxy = config.get_value('Boto', 'proxy', None)
+ if not self.proxy_port:
+ self.proxy_port = config.get_value('Boto', 'proxy_port', None)
+ if not self.proxy_user:
+ self.proxy_user = config.get_value('Boto', 'proxy_user', None)
+ if not self.proxy_pass:
+ self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
+
+ if not self.proxy_port and self.proxy:
+ print "http_proxy environment variable does not specify " \
+ "a port, using default"
+ self.proxy_port = self.port
+ self.use_proxy = (self.proxy != None)
+
+ def get_http_connection(self, host, is_secure):
+ queue = self._pool[self._cached_name(host, is_secure)]
+ try:
+ return queue.get_nowait()
+ except Queue.Empty:
+ return self.new_http_connection(host, is_secure)
+
+ def new_http_connection(self, host, is_secure):
+ if self.use_proxy:
+ host = '%s:%d' % (self.proxy, int(self.proxy_port))
+ if host is None:
+ host = self.server_name()
+ boto.log.debug('establishing HTTP connection')
+ if is_secure:
+ if self.use_proxy:
+ connection = self.proxy_ssl()
+ elif self.https_connection_factory:
+ connection = self.https_connection_factory(host)
+ else:
+ connection = httplib.HTTPSConnection(host)
+ else:
+ connection = httplib.HTTPConnection(host)
+ if self.debug > 1:
+ connection.set_debuglevel(self.debug)
+ # self.connection must be maintained for backwards-compatibility
+ # however, it must be dynamically pulled from the connection pool
+ # set a private variable which will enable that
+ if host.split(':')[0] == self.host and is_secure == self.is_secure:
+ self._connection = (host, is_secure)
+ return connection
+
+ def put_http_connection(self, host, is_secure, connection):
+ try:
+ self._pool[self._cached_name(host, is_secure)].put_nowait(connection)
+ except Queue.Full:
+ # gracefully fail in case of pool overflow
+ connection.close()
+
+ def proxy_ssl(self):
+ host = '%s:%d' % (self.host, self.port)
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((self.proxy, int(self.proxy_port)))
+ except:
+ raise
+ sock.sendall("CONNECT %s HTTP/1.0\r\n" % host)
+ sock.sendall("User-Agent: %s\r\n" % UserAgent)
+ if self.proxy_user and self.proxy_pass:
+ for k, v in self.get_proxy_auth_header().items():
+ sock.sendall("%s: %s\r\n" % (k, v))
+ sock.sendall("\r\n")
+ resp = httplib.HTTPResponse(sock, strict=True)
+ resp.begin()
+
+ if resp.status != 200:
+ # Fake a socket error, use a code that make it obvious it hasn't
+ # been generated by the socket library
+ raise socket.error(-71,
+ "Error talking to HTTP proxy %s:%s: %s (%s)" %
+ (self.proxy, self.proxy_port, resp.status, resp.reason))
+
+ # We can safely close the response, it duped the original socket
+ resp.close()
+
+ h = httplib.HTTPConnection(host)
+
+ # Wrap the socket in an SSL socket
+ if hasattr(httplib, 'ssl'):
+ sslSock = httplib.ssl.SSLSocket(sock)
+ else: # Old Python, no ssl module
+ sslSock = socket.ssl(sock, None, None)
+ sslSock = httplib.FakeSocket(sock, sslSock)
+ # This is a bit unclean
+ h.sock = sslSock
+ return h
+
+ def prefix_proxy_to_path(self, path, host=None):
+ path = self.protocol + '://' + (host or self.server_name()) + path
+ return path
+
+ def get_proxy_auth_header(self):
+ auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
+ return {'Proxy-Authorization': 'Basic %s' % auth}
+
+ def _mexe(self, method, path, data, headers, host=None, sender=None):
+ """
+ mexe - Multi-execute inside a loop, retrying multiple times to handle
+ transient Internet errors by simply trying again.
+ Also handles redirects.
+
+ This code was inspired by the S3Utils classes posted to the boto-users
+ Google group by Larry Bates. Thanks!
+ """
+ boto.log.debug('Method: %s' % method)
+ boto.log.debug('Path: %s' % path)
+ boto.log.debug('Data: %s' % data)
+ boto.log.debug('Headers: %s' % headers)
+ boto.log.debug('Host: %s' % host)
+ response = None
+ body = None
+ e = None
+ num_retries = config.getint('Boto', 'num_retries', self.num_retries)
+ i = 0
+ connection = self.get_http_connection(host, self.is_secure)
+ while i <= num_retries:
+ try:
+ if callable(sender):
+ response = sender(connection, method, path, data, headers)
+ else:
+ connection.request(method, path, data, headers)
+ response = connection.getresponse()
+ location = response.getheader('location')
+ # -- gross hack --
+ # httplib gets confused with chunked responses to HEAD requests
+ # so I have to fake it out
+ if method == 'HEAD' and getattr(response, 'chunked', False):
+ response.chunked = 0
+ if response.status == 500 or response.status == 503:
+ boto.log.debug('received %d response, retrying in %d seconds' % (response.status, 2 ** i))
+ body = response.read()
+ elif response.status == 408:
+ body = response.read()
+ print '-------------------------'
+ print ' 4 0 8 '
+ print 'path=%s' % path
+ print body
+ print '-------------------------'
+ elif response.status < 300 or response.status >= 400 or \
+ not location:
+ self.put_http_connection(host, self.is_secure, connection)
+ return response
+ else:
+ scheme, host, path, params, query, fragment = \
+ urlparse.urlparse(location)
+ if query:
+ path += '?' + query
+ boto.log.debug('Redirecting: %s' % scheme + '://' + host + path)
+ connection = self.get_http_connection(host,
+ scheme == 'https')
+ continue
+ except KeyboardInterrupt:
+ sys.exit('Keyboard Interrupt')
+ except httplib.BadStatusLine as e:
+ boto.log.warn('Bad status line: %r, retrying..', e.line)
+ connection = self.new_http_connection(host, self.is_secure)
+ except self.http_exceptions, e:
+ boto.log.warn('encountered http exception, reconnecting',
+ exc_info=True)
+ connection = self.new_http_connection(host, self.is_secure)
+ time.sleep(2 ** i)
+ i += 1
+ # If we made it here, it's because we have exhausted our retries and stil haven't
+ # succeeded. So, if we have a response object, use it to raise an exception.
+ # Otherwise, raise the exception that must have already happened.
+ if response:
+ raise BotoServerError(response.status, response.reason, body)
+ elif e:
+ raise e
+ else:
+ raise BotoClientError('Please report this exception as a Boto Issue!')
+
+ def make_request(self, method, path, headers=None, data='', host=None,
+ auth_path=None, sender=None):
+ path = self.get_path(path)
+ if headers == None:
+ headers = {}
+ else:
+ headers = headers.copy()
+ headers['User-Agent'] = UserAgent
+ if not headers.has_key('Content-Length'):
+ headers['Content-Length'] = str(len(data))
+ if self.use_proxy:
+ path = self.prefix_proxy_to_path(path, host)
+ if self.proxy_user and self.proxy_pass and not self.is_secure:
+ # If is_secure, we don't have to set the proxy authentication
+ # header here, we did that in the CONNECT to the proxy.
+ headers.update(self.get_proxy_auth_header())
+ request_string = auth_path or path
+ for key in headers:
+ val = headers[key]
+ if isinstance(val, unicode):
+ headers[key] = urllib.quote_plus(val.encode('utf-8'))
+ self.add_aws_auth_header(headers, method, request_string)
+ return self._mexe(method, path, data, headers, host, sender)
+
+ def add_aws_auth_header(self, headers, method, path):
+ path = self.get_path(path)
+ if not headers.has_key('Date'):
+ headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ time.gmtime())
+
+ c_string = boto.utils.canonical_string(method, path, headers)
+ boto.log.debug('Canonical: %s' % c_string)
+ hmac = self.hmac.copy()
+ hmac.update(c_string)
+ b64_hmac = base64.encodestring(hmac.digest()).strip()
+ headers['Authorization'] = "AWS %s:%s" % (self.aws_access_key_id, b64_hmac)
+
+ def close(self):
+ """(Optional) Close any open HTTP connections. This is non-destructive,
+ and making a new request will open a connection again."""
+
+ boto.log.debug('closing all HTTP connections')
+ self.connection = None # compat field
+
+
+class AWSQueryConnection(AWSAuthConnection):
+
+ APIVersion = ''
+ SignatureVersion = '1'
+ ResponseError = BotoServerError
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, host=None, debug=0,
+ https_connection_factory=None, path='/'):
+ AWSAuthConnection.__init__(self, host, aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ debug, https_connection_factory, path)
+
+ def get_utf8_value(self, value):
+ if not isinstance(value, str) and not isinstance(value, unicode):
+ value = str(value)
+ if isinstance(value, unicode):
+ return value.encode('utf-8')
+ else:
+ return value
+
+ def calc_signature_0(self, params):
+ boto.log.debug('using calc_signature_0')
+ hmac = self.hmac.copy()
+ s = params['Action'] + params['Timestamp']
+ hmac.update(s)
+ keys = params.keys()
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
+ pairs = []
+ for key in keys:
+ val = self.get_utf8_value(params[key])
+ pairs.append(key + '=' + urllib.quote(val))
+ qs = '&'.join(pairs)
+ return (qs, base64.b64encode(hmac.digest()))
+
+ def calc_signature_1(self, params):
+ boto.log.debug('using calc_signature_1')
+ hmac = self.hmac.copy()
+ keys = params.keys()
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
+ pairs = []
+ for key in keys:
+ hmac.update(key)
+ val = self.get_utf8_value(params[key])
+ hmac.update(val)
+ pairs.append(key + '=' + urllib.quote(val))
+ qs = '&'.join(pairs)
+ return (qs, base64.b64encode(hmac.digest()))
+
+ def calc_signature_2(self, params, verb, path):
+ boto.log.debug('using calc_signature_2')
+ string_to_sign = '%s\n%s\n%s\n' % (verb, self.server_name().lower(), path)
+ if self.hmac_256:
+ hmac = self.hmac_256.copy()
+ params['SignatureMethod'] = 'HmacSHA256'
+ else:
+ hmac = self.hmac.copy()
+ params['SignatureMethod'] = 'HmacSHA1'
+ keys = params.keys()
+ keys.sort()
+ pairs = []
+ for key in keys:
+ val = self.get_utf8_value(params[key])
+ pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~'))
+ qs = '&'.join(pairs)
+ boto.log.debug('query string: %s' % qs)
+ string_to_sign += qs
+ boto.log.debug('string_to_sign: %s' % string_to_sign)
+ hmac.update(string_to_sign)
+ b64 = base64.b64encode(hmac.digest())
+ boto.log.debug('len(b64)=%d' % len(b64))
+ boto.log.debug('base64 encoded digest: %s' % b64)
+ return (qs, b64)
+
+ def get_signature(self, params, verb, path):
+ if self.SignatureVersion == '0':
+ t = self.calc_signature_0(params)
+ elif self.SignatureVersion == '1':
+ t = self.calc_signature_1(params)
+ elif self.SignatureVersion == '2':
+ t = self.calc_signature_2(params, verb, path)
+ else:
+ raise BotoClientError('Unknown Signature Version: %s' % self.SignatureVersion)
+ return t
+
+ def make_request(self, action, params=None, path='/', verb='GET'):
+ headers = {}
+ if params == None:
+ params = {}
+ params['Action'] = action
+ params['Version'] = self.APIVersion
+ params['AWSAccessKeyId'] = self.aws_access_key_id
+ params['SignatureVersion'] = self.SignatureVersion
+ params['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
+ qs, signature = self.get_signature(params, verb, self.get_path(path))
+ if verb == 'POST':
+ headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
+ request_body = qs + '&Signature=' + urllib.quote(signature)
+ qs = path
+ else:
+ request_body = ''
+ qs = path + '?' + qs + '&Signature=' + urllib.quote(signature)
+ return AWSAuthConnection.make_request(self, verb, qs,
+ data=request_body,
+ headers=headers)
+
+ def build_list_params(self, params, items, label):
+ if isinstance(items, str):
+ items = [items]
+ for i in range(1, len(items) + 1):
+ params['%s.%d' % (label, i)] = items[i - 1]
+
+ # generics
+
+ def get_list(self, action, params, markers, path='/', parent=None, verb='GET'):
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ rs = ResultSet(markers)
+ h = handler.XmlHandler(rs, parent)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def get_object(self, action, params, cls, path='/', parent=None, verb='GET'):
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ obj = cls(parent)
+ h = handler.XmlHandler(obj, parent)
+ xml.sax.parseString(body, h)
+ return obj
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def get_status(self, action, params, path='/', parent=None, verb='GET'):
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, parent)
+ xml.sax.parseString(body, h)
+ return rs.status
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
diff --git a/src/s3ql/backends/boto/exception.py b/src/s3ql/backends/boto/exception.py
new file mode 100644
index 0000000..609998f
--- /dev/null
+++ b/src/s3ql/backends/boto/exception.py
@@ -0,0 +1,305 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+"""
+Exception classes - Subclassing allows you to check for specific errors
+"""
+import base64
+import xml.sax
+from . import handler
+from .resultset import ResultSet
+import base64
+
+
+class BotoClientError(StandardError):
+ """
+ General Boto Client error (error accessing AWS)
+ """
+
+ def __init__(self, reason):
+ StandardError.__init__(self)
+ self.reason = reason
+
+ def __repr__(self):
+ return 'S3Error: %s' % self.reason
+
+ def __str__(self):
+ return 'S3Error: %s' % self.reason
+
+class SDBPersistenceError(StandardError):
+
+ pass
+
+class S3PermissionsError(BotoClientError):
+ """
+ Permissions error when accessing a bucket or key on S3.
+ """
+ pass
+
+class BotoServerError(StandardError):
+
+ def __init__(self, status, reason, body=None):
+ StandardError.__init__(self)
+ self.status = status
+ self.reason = reason
+ self.body = body or ''
+ self.request_id = None
+ self.error_code = None
+ self.error_message = None
+ self.box_usage = None
+
+ # Attempt to parse the error response. If body isn't present,
+ # then just ignore the error response.
+ if self.body:
+ try:
+ h = handler.XmlHandler(self, self)
+ xml.sax.parseString(self.body, h)
+ except xml.sax.SAXParseException, pe:
+ # Go ahead and clean up anything that may have
+ # managed to get into the error data so we
+ # don't get partial garbage.
+ print "Warning: failed to parse error message from AWS: %s" % pe
+ self._cleanupParsedProperties()
+
+ def __getattr__(self, name):
+ if name == 'message':
+ return self.error_message
+ if name == 'code':
+ return self.error_code
+ raise AttributeError
+
+ def __repr__(self):
+ return '%s: %s %s\n%s' % (self.__class__.__name__,
+ self.status, self.reason, self.body)
+
+ def __str__(self):
+ return '%s: %s %s\n%s' % (self.__class__.__name__,
+ self.status, self.reason, self.body)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name in ('RequestId', 'RequestID'):
+ self.request_id = value
+ elif name == 'Code':
+ self.error_code = value
+ elif name == 'Message':
+ self.error_message = value
+ elif name == 'BoxUsage':
+ self.box_usage = value
+ return None
+
+ def _cleanupParsedProperties(self):
+ self.request_id = None
+ self.error_code = None
+ self.error_message = None
+ self.box_usage = None
+
+class ConsoleOutput:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.instance_id = None
+ self.timestamp = None
+ self.comment = None
+ self.output = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.instance_id = value
+ elif name == 'output':
+ self.output = base64.b64decode(value)
+ else:
+ setattr(self, name, value)
+
+class S3CreateError(BotoServerError):
+ """
+ Error creating a bucket or key on S3.
+ """
+ def __init__(self, status, reason, body=None):
+ self.bucket = None
+ BotoServerError.__init__(self, status, reason, body)
+
+ def endElement(self, name, value, connection):
+ if name == 'BucketName':
+ self.bucket = value
+ else:
+ return BotoServerError.endElement(self, name, value, connection)
+
+class S3CopyError(BotoServerError):
+ """
+ Error copying a key on S3.
+ """
+ pass
+
+class SQSError(BotoServerError):
+ """
+ General Error on Simple Queue Service.
+ """
+ def __init__(self, status, reason, body=None):
+ self.detail = None
+ self.type = None
+ BotoServerError.__init__(self, status, reason, body)
+
+ def startElement(self, name, attrs, connection):
+ return BotoServerError.startElement(self, name, attrs, connection)
+
+ def endElement(self, name, value, connection):
+ if name == 'Detail':
+ self.detail = value
+ elif name == 'Type':
+ self.type = value
+ else:
+ return BotoServerError.endElement(self, name, value, connection)
+
+ def _cleanupParsedProperties(self):
+ BotoServerError._cleanupParsedProperties(self)
+ for p in ('detail', 'type'):
+ setattr(self, p, None)
+
+class SQSDecodeError(BotoClientError):
+ """
+ Error when decoding an SQS message.
+ """
+ def __init__(self, reason, message):
+ BotoClientError.__init__(self, reason)
+ self.message = message
+
+ def __repr__(self):
+ return 'SQSDecodeError: %s' % self.reason
+
+ def __str__(self):
+ return 'SQSDecodeError: %s' % self.reason
+
+class S3ResponseError(BotoServerError):
+ """
+ Error in response from S3.
+ """
+ def __init__(self, status, reason, body=None):
+ self.resource = None
+ BotoServerError.__init__(self, status, reason, body)
+
+ def startElement(self, name, attrs, connection):
+ return BotoServerError.startElement(self, name, attrs, connection)
+
+ def endElement(self, name, value, connection):
+ if name == 'Resource':
+ self.resource = value
+ else:
+ return BotoServerError.endElement(self, name, value, connection)
+
+ def _cleanupParsedProperties(self):
+ BotoServerError._cleanupParsedProperties(self)
+ for p in ('resource'):
+ setattr(self, p, None)
+
+class EC2ResponseError(BotoServerError):
+ """
+ Error in response from EC2.
+ """
+
+ def __init__(self, status, reason, body=None):
+ self.errors = None
+ self._errorResultSet = []
+ BotoServerError.__init__(self, status, reason, body)
+ self.errors = [ (e.error_code, e.error_message) \
+ for e in self._errorResultSet ]
+ if len(self.errors):
+ self.error_code, self.error_message = self.errors[0]
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Errors':
+ self._errorResultSet = ResultSet([('Error', _EC2Error)])
+ return self._errorResultSet
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'RequestID':
+ self.request_id = value
+ else:
+ return None # don't call subclass here
+
+ def _cleanupParsedProperties(self):
+ BotoServerError._cleanupParsedProperties(self)
+ self._errorResultSet = []
+ for p in ('errors'):
+ setattr(self, p, None)
+
+class EmrResponseError(BotoServerError):
+ """
+ Error in response from EMR
+ """
+ pass
+
+class _EC2Error:
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.error_code = None
+ self.error_message = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Code':
+ self.error_code = value
+ elif name == 'Message':
+ self.error_message = value
+ else:
+ return None
+
+class SDBResponseError(BotoServerError):
+ """
+ Error in respones from SDB.
+ """
+ pass
+
+class AWSConnectionError(BotoClientError):
+ """
+ General error connecting to Amazon Web Services.
+ """
+ pass
+
+class S3DataError(BotoClientError):
+ """
+ Error receiving data from S3.
+ """
+ pass
+
+class FPSResponseError(BotoServerError):
+ pass
+
+
+class InvalidUriError(Exception):
+ """Exception raised when URI is invalid."""
+
+ def __init__(self, message):
+ Exception.__init__(self)
+ self.message = message
diff --git a/src/s3ql/backends/boto/handler.py b/src/s3ql/backends/boto/handler.py
new file mode 100644
index 0000000..528fbae
--- /dev/null
+++ b/src/s3ql/backends/boto/handler.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+import xml.sax
+
+class XmlHandler(xml.sax.ContentHandler):
+
+ def __init__(self, root_node, connection):
+ self.connection = connection
+ self.nodes = [('root', root_node)]
+ self.current_text = ''
+
+ def startElement(self, name, attrs):
+ self.current_text = ''
+ new_node = self.nodes[-1][1].startElement(name, attrs, self.connection)
+ if new_node != None:
+ self.nodes.append((name, new_node))
+
+ def endElement(self, name):
+ self.nodes[-1][1].endElement(name, self.current_text, self.connection)
+ if self.nodes[-1][0] == name:
+ self.nodes.pop()
+ self.current_text = ''
+
+ def characters(self, content):
+ self.current_text += content
+
+
diff --git a/src/s3ql/backends/boto/pyami/__init__.py b/src/s3ql/backends/boto/pyami/__init__.py
new file mode 100644
index 0000000..403f980
--- /dev/null
+++ b/src/s3ql/backends/boto/pyami/__init__.py
@@ -0,0 +1 @@
+# Dummy
diff --git a/src/s3ql/backends/boto/pyami/config.py b/src/s3ql/backends/boto/pyami/config.py
new file mode 100644
index 0000000..aa8d72c
--- /dev/null
+++ b/src/s3ql/backends/boto/pyami/config.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+import StringIO, os, re
+import ConfigParser
+
+BotoConfigPath = '/etc/boto.cfg'
+BotoConfigLocations = [BotoConfigPath]
+if 'HOME' in os.environ:
+ UserConfigPath = os.path.expanduser('~/.boto')
+ BotoConfigLocations.append(UserConfigPath)
+else:
+ UserConfigPath = None
+if 'BOTO_CONFIG' in os.environ:
+ BotoConfigLocations.append(os.path.expanduser(os.environ['BOTO_CONFIG']))
+
+class Config(ConfigParser.SafeConfigParser):
+
+ def __init__(self, path=None, fp=None, do_load=True):
+ ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami',
+ 'debug' : '0'})
+ if do_load:
+ if path:
+ self.load_from_path(path)
+ elif fp:
+ self.readfp(fp)
+ else:
+ self.read(BotoConfigLocations)
+ if "AWS_CREDENTIAL_FILE" in os.environ:
+ self.load_credential_file(os.path.expanduser(os.environ['AWS_CREDENTIAL_FILE']))
+
+ def load_credential_file(self, path):
+ """Load a credential file as is setup like the Java utilities"""
+ c_data = StringIO.StringIO()
+ c_data.write("[Credentials]\n")
+ for line in open(path, "r").readlines():
+ c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
+ c_data.seek(0)
+ self.readfp(c_data)
+
+ def load_from_path(self, path):
+ file = open(path)
+ for line in file.readlines():
+ match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
+ if match:
+ extended_file = match.group(1)
+ (dir, file) = os.path.split(path)
+ self.load_from_path(os.path.join(dir, extended_file))
+ self.read(path)
+
+ def save_option(self, path, section, option, value):
+ """
+ Write the specified Section.Option to the config file specified by path.
+ Replace any previous value. If the path doesn't exist, create it.
+ Also add the option the the in-memory config.
+ """
+ config = ConfigParser.SafeConfigParser()
+ config.read(path)
+ if not config.has_section(section):
+ config.add_section(section)
+ config.set(section, option, value)
+ fp = open(path, 'w')
+ config.write(fp)
+ fp.close()
+ if not self.has_section(section):
+ self.add_section(section)
+ self.set(section, option, value)
+
+ def save_user_option(self, section, option, value):
+ self.save_option(UserConfigPath, section, option, value)
+
+ def save_system_option(self, section, option, value):
+ self.save_option(BotoConfigPath, section, option, value)
+
+ def get_instance(self, name, default=None):
+ try:
+ val = self.get('Instance', name)
+ except:
+ val = default
+ return val
+
+ def get_user(self, name, default=None):
+ try:
+ val = self.get('User', name)
+ except:
+ val = default
+ return val
+
+ def getint_user(self, name, default=0):
+ try:
+ val = self.getint('User', name)
+ except:
+ val = default
+ return val
+
+ def get_value(self, section, name, default=None):
+ return self.get(section, name, default)
+
+ def get(self, section, name, default=None):
+ try:
+ val = ConfigParser.SafeConfigParser.get(self, section, name)
+ except:
+ val = default
+ return val
+
+ def getint(self, section, name, default=0):
+ try:
+ val = ConfigParser.SafeConfigParser.getint(self, section, name)
+ except:
+ val = int(default)
+ return val
+
+ def getfloat(self, section, name, default=0.0):
+ try:
+ val = ConfigParser.SafeConfigParser.getfloat(self, section, name)
+ except:
+ val = float(default)
+ return val
+
+ def getbool(self, section, name, default=False):
+ if self.has_option(section, name):
+ val = self.get(section, name)
+ if val.lower() == 'true':
+ val = True
+ else:
+ val = False
+ else:
+ val = default
+ return val
+
+ def setbool(self, section, name, value):
+ if value:
+ self.set(section, name, 'true')
+ else:
+ self.set(section, name, 'false')
+
+ def dump(self):
+ s = StringIO.StringIO()
+ self.write(s)
+ print s.getvalue()
+
+ def dump_safe(self, fp=None):
+ if not fp:
+ fp = StringIO.StringIO()
+ for section in self.sections():
+ fp.write('[%s]\n' % section)
+ for option in self.options(section):
+ if option == 'aws_secret_access_key':
+ fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
+ else:
+ fp.write('%s = %s\n' % (option, self.get(section, option)))
+
+ def dump_to_sdb(self, domain_name, item_name):
+ import simplejson
+ sdb = boto.connect_sdb()
+ domain = sdb.lookup(domain_name)
+ if not domain:
+ domain = sdb.create_domain(domain_name)
+ item = domain.new_item(item_name)
+ item.active = False
+ for section in self.sections():
+ d = {}
+ for option in self.options(section):
+ d[option] = self.get(section, option)
+ item[section] = simplejson.dumps(d)
+ item.save()
+
+ def load_from_sdb(self, domain_name, item_name):
+ import simplejson
+ sdb = boto.connect_sdb()
+ domain = sdb.lookup(domain_name)
+ item = domain.get_item(item_name)
+ for section in item.keys():
+ if not self.has_section(section):
+ self.add_section(section)
+ d = simplejson.loads(item[section])
+ for attr_name in d.keys():
+ attr_value = d[attr_name]
+ if attr_value == None:
+ attr_value = 'None'
+ if isinstance(attr_value, bool):
+ self.setbool(section, attr_name, attr_value)
+ else:
+ self.set(section, attr_name, attr_value)
diff --git a/src/s3ql/backends/boto/resultset.py b/src/s3ql/backends/boto/resultset.py
new file mode 100644
index 0000000..234b140
--- /dev/null
+++ b/src/s3ql/backends/boto/resultset.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+class ResultSet(list):
+ """
+ The ResultSet is used to pass results back from the Amazon services
+ to the client. It has an ugly but workable mechanism for parsing
+ the XML results from AWS. Because I don't really want any dependencies
+ on external libraries, I'm using the standard SAX parser that comes
+ with Python. The good news is that it's quite fast and efficient but
+ it makes some things rather difficult.
+
+ You can pass in, as the marker_elem parameter, a list of tuples.
+ Each tuple contains a string as the first element which represents
+ the XML element that the resultset needs to be on the lookout for
+ and a Python class as the second element of the tuple. Each time the
+ specified element is found in the XML, a new instance of the class
+ will be created and popped onto the stack.
+
+ """
+
+ def __init__(self, marker_elem=None):
+ list.__init__(self)
+ if isinstance(marker_elem, list):
+ self.markers = marker_elem
+ else:
+ self.markers = []
+ self.marker = None
+ self.key_marker = None
+ self.next_key_marker = None
+ self.next_version_id_marker = None
+ self.version_id_marker = None
+ self.is_truncated = False
+ self.next_token = None
+ self.status = True
+
+ def startElement(self, name, attrs, connection):
+ for t in self.markers:
+ if name == t[0]:
+ obj = t[1](connection)
+ self.append(obj)
+ return obj
+ return None
+
+ def to_boolean(self, value, true_value='true'):
+ if value == true_value:
+ return True
+ else:
+ return False
+
+ def endElement(self, name, value, connection):
+ if name == 'IsTruncated':
+ self.is_truncated = self.to_boolean(value)
+ elif name == 'Marker':
+ self.marker = value
+ elif name == 'KeyMarker':
+ self.key_marker = value
+ elif name == 'VersionIdMarker':
+ self.version_id_marker = value
+ elif name == 'NextKeyMarker':
+ self.next_key_marker = value
+ elif name == 'NextVersionIdMarker':
+ self.next_version_id_marker = value
+ elif name == 'Prefix':
+ self.prefix = value
+ elif name == 'return':
+ self.status = self.to_boolean(value)
+ elif name == 'StatusCode':
+ self.status = self.to_boolean(value, 'Success')
+ elif name == 'ItemName':
+ self.append(value)
+ elif name == 'NextToken':
+ self.next_token = value
+ elif name == 'BoxUsage':
+ try:
+ connection.box_usage += float(value)
+ except:
+ pass
+ elif name == 'IsValid':
+ self.status = self.to_boolean(value, 'True')
+ else:
+ setattr(self, name, value)
+
+class BooleanResult(object):
+
+ def __init__(self, marker_elem=None):
+ self.status = True
+ self.request_id = None
+ self.box_usage = None
+
+ def __repr__(self):
+ if self.status:
+ return 'True'
+ else:
+ return 'False'
+
+ def __nonzero__(self):
+ return self.status
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def to_boolean(self, value, true_value='true'):
+ if value == true_value:
+ return True
+ else:
+ return False
+
+ def endElement(self, name, value, connection):
+ if name == 'return':
+ self.status = self.to_boolean(value)
+ elif name == 'StatusCode':
+ self.status = self.to_boolean(value, 'Success')
+ elif name == 'IsValid':
+ self.status = self.to_boolean(value, 'True')
+ elif name == 'RequestId':
+ self.request_id = value
+ elif name == 'requestId':
+ self.request_id = value
+ elif name == 'BoxUsage':
+ self.request_id = value
+ else:
+ setattr(self, name, value)
+
diff --git a/src/s3ql/backends/boto/s3/__init__.py b/src/s3ql/backends/boto/s3/__init__.py
new file mode 100644
index 0000000..d13fba5
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/__init__.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+from ... import boto
+
+from .connection import S3Connection as Connection
+from .key import Key
+from .bucket import Bucket
+
+__all__ = ['Connection', 'Key', 'Bucket']
diff --git a/src/s3ql/backends/boto/s3/acl.py b/src/s3ql/backends/boto/s3/acl.py
new file mode 100644
index 0000000..27df47e
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/acl.py
@@ -0,0 +1,165 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+from .user import User
+import StringIO
+
+CannedACLStrings = ['private', 'public-read',
+ 'public-read-write', 'authenticated-read']
+
+
+class Policy:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.acl = None
+
+ def __repr__(self):
+ grants = []
+ for g in self.acl.grants:
+ if g.id == self.owner.id:
+ grants.append("%s (owner) = %s" % (g.display_name, g.permission))
+ else:
+ if g.type == 'CanonicalUser':
+ u = g.display_name
+ elif g.type == 'Group':
+ u = g.uri
+ else:
+ u = g.email
+ grants.append("%s = %s" % (u, g.permission))
+ return "<Policy: %s>" % ", ".join(grants)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Owner':
+ self.owner = User(self)
+ return self.owner
+ elif name == 'AccessControlList':
+ self.acl = ACL(self)
+ return self.acl
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Owner':
+ pass
+ elif name == 'AccessControlList':
+ pass
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<AccessControlPolicy>'
+ s += self.owner.to_xml()
+ s += self.acl.to_xml()
+ s += '</AccessControlPolicy>'
+ return s
+
+class ACL:
+
+ def __init__(self, policy=None):
+ self.policy = policy
+ self.grants = []
+
+ def add_grant(self, grant):
+ self.grants.append(grant)
+
+ def add_email_grant(self, permission, email_address):
+ grant = Grant(permission=permission, type='AmazonCustomerByEmail',
+ email_address=email_address)
+ self.grants.append(grant)
+
+ def add_user_grant(self, permission, user_id):
+ grant = Grant(permission=permission, type='CanonicalUser', id=user_id)
+ self.grants.append(grant)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Grant':
+ self.grants.append(Grant(self))
+ return self.grants[-1]
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Grant':
+ pass
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<AccessControlList>'
+ for grant in self.grants:
+ s += grant.to_xml()
+ s += '</AccessControlList>'
+ return s
+
+class Grant:
+
+ NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+
+ def __init__(self, permission=None, type=None, id=None,
+ display_name=None, uri=None, email_address=None):
+ self.permission = permission
+ self.id = id
+ self.display_name = display_name
+ self.uri = uri
+ self.email_address = email_address
+ self.type = type
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Grantee':
+ self.type = attrs['xsi:type']
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ID':
+ self.id = value
+ elif name == 'DisplayName':
+ self.display_name = value
+ elif name == 'URI':
+ self.uri = value
+ elif name == 'EmailAddress':
+ self.email_address = value
+ elif name == 'Grantee':
+ pass
+ elif name == 'Permission':
+ self.permission = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<Grant>'
+ s += '<Grantee %s xsi:type="%s">' % (self.NameSpace, self.type)
+ if self.type == 'CanonicalUser':
+ s += '<ID>%s</ID>' % self.id
+ s += '<DisplayName>%s</DisplayName>' % self.display_name
+ elif self.type == 'Group':
+ s += '<URI>%s</URI>' % self.uri
+ else:
+ s += '<EmailAddress>%s</EmailAddress>' % self.email_address
+ s += '</Grantee>'
+ s += '<Permission>%s</Permission>' % self.permission
+ s += '</Grant>'
+ return s
+
+
diff --git a/src/s3ql/backends/boto/s3/bucket.py b/src/s3ql/backends/boto/s3/bucket.py
new file mode 100644
index 0000000..cb3872b
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/bucket.py
@@ -0,0 +1,749 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+from ... import boto
+from .. import handler
+from ..resultset import ResultSet
+from .acl import Policy, CannedACLStrings, ACL, Grant
+from .user import User
+from .key import Key
+from .prefix import Prefix
+from ..exception import S3ResponseError, S3PermissionsError, S3CopyError
+from .bucketlistresultset import BucketListResultSet
+from .. import utils
+import xml.sax
+import urllib
+import re
+
+S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
+
+class Bucket:
+
+ BucketLoggingBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <LoggingEnabled>
+ <TargetBucket>%s</TargetBucket>
+ <TargetPrefix>%s</TargetPrefix>
+ </LoggingEnabled>
+ </BucketLoggingStatus>"""
+
+ EmptyBucketLoggingBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ </BucketLoggingStatus>"""
+
+ LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
+
+ BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Payer>%s</Payer>
+ </RequestPaymentConfiguration>"""
+
+ VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Status>%s</Status>
+ <MfaDelete>%s</MfaDelete>
+ </VersioningConfiguration>"""
+
+ VersionRE = '<Status>([A-Za-z]+)</Status>'
+ MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
+
+ def __init__(self, connection=None, name=None, key_class=Key):
+ self.name = name
+ self.connection = connection
+ self.key_class = key_class
+
+ def __repr__(self):
+ return '<Bucket: %s>' % self.name
+
+ def __iter__(self):
+ return iter(BucketListResultSet(self))
+
+ def __contains__(self, key_name):
+ return not (self.get_key(key_name) is None)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Name':
+ self.name = value
+ elif name == 'CreationDate':
+ self.creation_date = value
+ else:
+ setattr(self, name, value)
+
+ def set_key_class(self, key_class):
+ """
+ Set the Key class associated with this bucket. By default, this
+ would be the boto.s3.key.Key class but if you want to subclass that
+ for some reason this allows you to associate your new class with a
+ bucket so that when you call bucket.new_key() or when you get a listing
+ of keys in the bucket you will get an instances of your key class
+ rather than the default.
+
+ :type key_class: class
+ :param key_class: A subclass of Key that can be more specific
+ """
+ self.key_class = key_class
+
+ def lookup(self, key_name, headers=None):
+ """
+ Deprecated: Please use get_key method.
+
+ :type key_name: string
+ :param key_name: The name of the key to retrieve
+
+ :rtype: :class:`boto.s3.key.Key`
+ :returns: A Key object from this bucket.
+ """
+ return self.get_key(key_name, headers=headers)
+
+ def get_key(self, key_name, headers=None, version_id=None):
+ """
+ Check to see if a particular key exists within the bucket. This
+ method uses a HEAD request to check for the existance of the key.
+ Returns: An instance of a Key object or None
+
+ :type key_name: string
+ :param key_name: The name of the key to retrieve
+
+ :rtype: :class:`boto.s3.key.Key`
+ :returns: A Key object from this bucket.
+ """
+ if version_id:
+ query_args = 'versionId=%s' % version_id
+ else:
+ query_args = None
+ response = self.connection.make_request('HEAD', self.name, key_name,
+ headers=headers,
+ query_args=query_args)
+ if response.status == 200:
+ response.read()
+ k = self.key_class(self)
+ k.metadata = boto.utils.get_aws_metadata(response.msg)
+ k.etag = response.getheader('etag')
+ k.content_type = response.getheader('content-type')
+ k.content_encoding = response.getheader('content-encoding')
+ k.last_modified = response.getheader('last-modified')
+ k.size = int(response.getheader('content-length'))
+ k.name = key_name
+ k.handle_version_headers(response)
+ return k
+ else:
+ if response.status == 404:
+ response.read()
+ return None
+ else:
+ raise S3ResponseError(response.status, response.reason, '')
+
+ def list(self, prefix='', delimiter='', marker='', headers=None):
+ """
+ List key objects within a bucket. This returns an instance of an
+ BucketListResultSet that automatically handles all of the result
+ paging, etc. from S3. You just need to keep iterating until
+ there are no more results.
+ Called with no arguments, this will return an iterator object across
+ all keys within the bucket.
+
+ :type prefix: string
+ :param prefix: allows you to limit the listing to a particular
+ prefix. For example, if you call the method with
+ prefix='/foo/' then the iterator will only cycle
+ through the keys that begin with the string '/foo/'.
+
+ :type delimiter: string
+ :param delimiter: can be used in conjunction with the prefix
+ to allow you to organize and browse your keys
+ hierarchically. See:
+ http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
+ for more details.
+
+ :type marker: string
+ :param marker: The "marker" of where you are in the result set
+
+ :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
+ :return: an instance of a BucketListResultSet that handles paging, etc
+ """
+ return BucketListResultSet(self, prefix, delimiter, marker, headers)
+
+ def list_versions(self, prefix='', delimiter='', key_marker='',
+ version_id_marker='', headers=None):
+ """
+ List key objects within a bucket. This returns an instance of an
+ BucketListResultSet that automatically handles all of the result
+ paging, etc. from S3. You just need to keep iterating until
+ there are no more results.
+ Called with no arguments, this will return an iterator object across
+ all keys within the bucket.
+
+ :type prefix: string
+ :param prefix: allows you to limit the listing to a particular
+ prefix. For example, if you call the method with
+ prefix='/foo/' then the iterator will only cycle
+ through the keys that begin with the string '/foo/'.
+
+ :type delimiter: string
+ :param delimiter: can be used in conjunction with the prefix
+ to allow you to organize and browse your keys
+ hierarchically. See:
+ http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
+ for more details.
+
+ :type marker: string
+ :param marker: The "marker" of where you are in the result set
+
+ :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
+ :return: an instance of a BucketListResultSet that handles paging, etc
+ """
+ return VersionedBucketListResultSet(self, prefix, delimiter, key_marker,
+ version_id_marker, headers)
+
+ def _get_all(self, element_map, initial_query_string='',
+ headers=None, **params):
+ l = []
+ for k,v in params.items():
+ k = k.replace('_', '-')
+ if k == 'maxkeys':
+ k = 'max-keys'
+ if isinstance(v, unicode):
+ v = v.encode('utf-8')
+ if v is not None and v != '':
+ l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
+ if len(l):
+ s = initial_query_string + '&' + '&'.join(l)
+ else:
+ s = initial_query_string
+ response = self.connection.make_request('GET', self.name,
+ headers=headers, query_args=s)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ rs = ResultSet(element_map)
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def get_all_keys(self, headers=None, **params):
+ """
+ A lower-level method for listing contents of a bucket.
+ This closely models the actual S3 API and requires you to manually
+ handle the paging of results. For a higher-level method
+ that handles the details of paging for you, you can use the list method.
+
+ :type max_keys: int
+ :param max_keys: The maximum number of keys to retrieve
+
+ :type prefix: string
+ :param prefix: The prefix of the keys you want to retrieve
+
+ :type marker: string
+ :param marker: The "marker" of where you are in the result set
+
+ :type delimiter: string
+ :param delimiter: If this optional, Unicode string parameter
+ is included with your request, then keys that
+ contain the same string between the prefix and
+ the first occurrence of the delimiter will be
+ rolled up into a single result element in the
+ CommonPrefixes collection. These rolled-up keys
+ are not returned elsewhere in the response.
+
+ :rtype: ResultSet
+ :return: The result from S3 listing the keys requested
+
+ """
+ return self._get_all([('Contents', self.key_class),
+ ('CommonPrefixes', Prefix)],
+ '', headers, **params)
+
+ def get_all_versions(self, headers=None, **params):
+ """
+ A lower-level, version-aware method for listing contents of a bucket.
+ This closely models the actual S3 API and requires you to manually
+ handle the paging of results. For a higher-level method
+ that handles the details of paging for you, you can use the list method.
+
+ :type max_keys: int
+ :param max_keys: The maximum number of keys to retrieve
+
+ :type prefix: string
+ :param prefix: The prefix of the keys you want to retrieve
+
+ :type key_marker: string
+ :param key_marker: The "marker" of where you are in the result set
+ with respect to keys.
+
+ :type version_id_marker: string
+ :param version_id_marker: The "marker" of where you are in the result
+ set with respect to version-id's.
+
+ :type delimiter: string
+ :param delimiter: If this optional, Unicode string parameter
+ is included with your request, then keys that
+ contain the same string between the prefix and
+ the first occurrence of the delimiter will be
+ rolled up into a single result element in the
+ CommonPrefixes collection. These rolled-up keys
+ are not returned elsewhere in the response.
+
+ :rtype: ResultSet
+ :return: The result from S3 listing the keys requested
+
+ """
+ return self._get_all([('Version', self.key_class),
+ ('CommonPrefixes', Prefix),
+ ('DeleteMarker', DeleteMarker)],
+ 'versions', headers, **params)
+
+ def new_key(self, key_name=None):
+ """
+ Creates a new key
+
+ :type key_name: string
+ :param key_name: The name of the key to create
+
+ :rtype: :class:`boto.s3.key.Key` or subclass
+ :returns: An instance of the newly created key object
+ """
+ return self.key_class(self, key_name)
+
+ def generate_url(self, expires_in, method='GET',
+ headers=None, force_http=False):
+ return self.connection.generate_url(expires_in, method, self.name,
+ headers=headers,
+ force_http=force_http)
+
+ def delete_key(self, key_name, headers=None,
+ version_id=None, mfa_token=None):
+ """
+ Deletes a key from the bucket. If a version_id is provided,
+ only that version of the key will be deleted.
+
+ :type key_name: string
+ :param key_name: The key name to delete
+
+ :type version_id: string
+ :param version_id: The version ID (optional)
+
+ :type mfa_token: tuple or list of strings
+ :param mfa_token: A tuple or list consisting of the serial number
+ from the MFA device and the current value of
+ the six-digit token associated with the device.
+ This value is required anytime you are
+ deleting versioned objects from a bucket
+ that has the MFADelete option on the bucket.
+ """
+ if version_id:
+ query_args = 'versionId=%s' % version_id
+ else:
+ query_args = None
+ if mfa_token:
+ if not headers:
+ headers = {}
+ headers['x-amz-mfa'] = ' '.join(mfa_token)
+ response = self.connection.make_request('DELETE', self.name, key_name,
+ headers=headers,
+ query_args=query_args)
+ body = response.read()
+ if response.status != 204:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def copy_key(self, new_key_name, src_bucket_name,
+ src_key_name, metadata=None, src_version_id=None,
+ storage_class='STANDARD', preserve_acl=False):
+ """
+ Create a new key in the bucket by copying another existing key.
+
+ :type new_key_name: string
+ :param new_key_name: The name of the new key
+
+ :type src_bucket_name: string
+ :param src_bucket_name: The name of the source bucket
+
+ :type src_key_name: string
+ :param src_key_name: The name of the source key
+
+ :type src_version_id: string
+ :param src_version_id: The version id for the key. This param
+ is optional. If not specified, the newest
+ version of the key will be copied.
+
+ :type metadata: dict
+ :param metadata: Metadata to be associated with new key.
+ If metadata is supplied, it will replace the
+ metadata of the source key being copied.
+ If no metadata is supplied, the source key's
+ metadata will be copied to the new key.
+
+ :type storage_class: string
+ :param storage_class: The storage class of the new key.
+ By default, the new key will use the
+ standard storage class. Possible values are:
+ STANDARD | REDUCED_REDUNDANCY
+
+ :type preserve_acl: bool
+ :param preserve_acl: If True, the ACL from the source key
+ will be copied to the destination
+ key. If False, the destination key
+ will have the default ACL.
+ Note that preserving the ACL in the
+ new key object will require two
+ additional API calls to S3, one to
+ retrieve the current ACL and one to
+ set that ACL on the new object. If
+ you don't care about the ACL, a value
+ of False will be significantly more
+ efficient.
+
+ :rtype: :class:`boto.s3.key.Key` or subclass
+ :returns: An instance of the newly created key object
+ """
+ if preserve_acl:
+ acl = self.get_xml_acl(src_key_name)
+ src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))
+ if src_version_id:
+ src += '?version_id=%s' % src_version_id
+ headers = {'x-amz-copy-source' : src}
+ if storage_class != 'STANDARD':
+ headers['x-amz-storage-class'] = storage_class
+ if metadata:
+ headers['x-amz-metadata-directive'] = 'REPLACE'
+ headers = boto.utils.merge_meta(headers, metadata)
+ else:
+ headers['x-amz-metadata-directive'] = 'COPY'
+ response = self.connection.make_request('PUT', self.name, new_key_name,
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ key = self.new_key(new_key_name)
+ h = handler.XmlHandler(key, self)
+ xml.sax.parseString(body, h)
+ if hasattr(key, 'Error'):
+ raise S3CopyError(key.Code, key.Message, body)
+ key.handle_version_headers(response)
+ if preserve_acl:
+ self.set_xml_acl(acl, new_key_name)
+ return key
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def set_canned_acl(self, acl_str, key_name='', headers=None,
+ version_id=None):
+ assert acl_str in CannedACLStrings
+
+ if headers:
+ headers['x-amz-acl'] = acl_str
+ else:
+ headers={'x-amz-acl': acl_str}
+
+ query_args='acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('PUT', self.name, key_name,
+ headers=headers, query_args=query_args)
+ body = response.read()
+ if response.status != 200:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def get_xml_acl(self, key_name='', headers=None, version_id=None):
+ query_args = 'acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('GET', self.name, key_name,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ if response.status != 200:
+ raise S3ResponseError(response.status, response.reason, body)
+ return body
+
+ def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None):
+ query_args = 'acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('PUT', self.name, key_name,
+ data=acl_str,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ if response.status != 200:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
+ if isinstance(acl_or_str, Policy):
+ self.set_xml_acl(acl_or_str.to_xml(), key_name,
+ headers, version_id)
+ else:
+ self.set_canned_acl(acl_or_str, key_name,
+ headers, version_id)
+
+ def get_acl(self, key_name='', headers=None, version_id=None):
+ query_args = 'acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('GET', self.name, key_name,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ policy = Policy(self)
+ h = handler.XmlHandler(policy, self)
+ xml.sax.parseString(body, h)
+ return policy
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def make_public(self, recursive=False, headers=None):
+ self.set_canned_acl('public-read', headers=headers)
+ if recursive:
+ for key in self:
+ self.set_canned_acl('public-read', key.name, headers=headers)
+
+ def add_email_grant(self, permission, email_address,
+ recursive=False, headers=None):
+ """
+ Convenience method that provides a quick way to add an email grant
+ to a bucket. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL
+ and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
+ :type email_address: string
+ :param email_address: The email address associated with the AWS
+ account your are granting the permission to.
+
+ :type recursive: boolean
+ :param recursive: A boolean value to controls whether the command
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ if permission not in S3Permissions:
+ raise S3PermissionsError('Unknown Permission: %s' % permission)
+ policy = self.get_acl(headers=headers)
+ policy.acl.add_email_grant(permission, email_address)
+ self.set_acl(policy, headers=headers)
+ if recursive:
+ for key in self:
+ key.add_email_grant(permission, email_address, headers=headers)
+
+ def add_user_grant(self, permission, user_id, recursive=False, headers=None):
+ """
+ Convenience method that provides a quick way to add a canonical user grant to a bucket.
+ This method retrieves the current ACL, creates a new grant based on the parameters
+ passed in, adds that grant to the ACL and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
+ :type user_id: string
+ :param user_id: The canonical user id associated with the AWS account your are granting
+ the permission to.
+
+ :type recursive: boolean
+ :param recursive: A boolean value to controls whether the command
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ if permission not in S3Permissions:
+ raise S3PermissionsError('Unknown Permission: %s' % permission)
+ policy = self.get_acl(headers=headers)
+ policy.acl.add_user_grant(permission, user_id)
+ self.set_acl(policy, headers=headers)
+ if recursive:
+ for key in self:
+ key.add_user_grant(permission, user_id, headers=headers)
+
+ def list_grants(self, headers=None):
+ policy = self.get_acl(headers=headers)
+ return policy.acl.grants
+
+ def get_location(self):
+ """
+ Returns the LocationConstraint for the bucket.
+
+ :rtype: str
+ :return: The LocationConstraint for the bucket or the empty string if
+ no constraint was specified when bucket was created.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='location')
+ body = response.read()
+ if response.status == 200:
+ rs = ResultSet(self)
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs.LocationConstraint
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def enable_logging(self, target_bucket, target_prefix='', headers=None):
+ if isinstance(target_bucket, Bucket):
+ target_bucket = target_bucket.name
+ body = self.BucketLoggingBody % (target_bucket, target_prefix)
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='logging', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def disable_logging(self, headers=None):
+ body = self.EmptyBucketLoggingBody
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='logging', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def get_logging_status(self, headers=None):
+ response = self.connection.make_request('GET', self.name,
+ query_args='logging', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return body
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def set_as_logging_target(self, headers=None):
+ policy = self.get_acl(headers=headers)
+ g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
+ g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
+ policy.acl.add_grant(g1)
+ policy.acl.add_grant(g2)
+ self.set_acl(policy, headers=headers)
+
+ def get_request_payment(self, headers=None):
+ response = self.connection.make_request('GET', self.name,
+ query_args='requestPayment', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return body
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def set_request_payment(self, payer='BucketOwner', headers=None):
+ body = self.BucketPaymentBody % payer
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='requestPayment', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def configure_versioning(self, versioning, mfa_delete=False,
+ mfa_token=None, headers=None):
+ """
+ Configure versioning for this bucket.
+ Note: This feature is currently in beta release and is available
+ only in the Northern California region.
+
+ :type versioning: bool
+ :param versioning: A boolean indicating whether version is
+ enabled (True) or disabled (False).
+
+ :type mfa_delete: bool
+ :param mfa_delete: A boolean indicating whether the Multi-Factor
+ Authentication Delete feature is enabled (True)
+ or disabled (False). If mfa_delete is enabled
+ then all Delete operations will require the
+ token from your MFA device to be passed in
+ the request.
+
+ :type mfa_token: tuple or list of strings
+ :param mfa_token: A tuple or list consisting of the serial number
+ from the MFA device and the current value of
+ the six-digit token associated with the device.
+ This value is required when you are changing
+ the status of the MfaDelete property of
+ the bucket.
+ """
+ if versioning:
+ ver = 'Enabled'
+ else:
+ ver = 'Suspended'
+ if mfa_delete:
+ mfa = 'Enabled'
+ else:
+ mfa = 'Disabled'
+ body = self.VersioningBody % (ver, mfa)
+ if mfa_token:
+ if not headers:
+ headers = {}
+ headers['x-amz-mfa'] = ' '.join(mfa_token)
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='versioning', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def get_versioning_status(self, headers=None):
+ """
+ Returns the current status of versioning on the bucket.
+
+ :rtype: dict
+ :returns: A dictionary containing a key named 'Versioning'
+ that can have a value of either Enabled, Disabled,
+ or Suspended. Also, if MFADelete has ever been enabled
+ on the bucket, the dictionary will contain a key
+ named 'MFADelete' which will have a value of either
+ Enabled or Suspended.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='versioning', headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ d = {}
+ ver = re.search(self.VersionRE, body)
+ if ver:
+ d['Versioning'] = ver.group(1)
+ mfa = re.search(self.MFADeleteRE, body)
+ if mfa:
+ d['MfaDelete'] = mfa.group(1)
+ return d
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def delete(self, headers=None):
+ return self.connection.delete_bucket(self.name, headers=headers)
diff --git a/src/s3ql/backends/boto/s3/bucketlistresultset.py b/src/s3ql/backends/boto/s3/bucketlistresultset.py
new file mode 100644
index 0000000..b3ce4b6
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/bucketlistresultset.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):
+ """
+ A generator function for listing keys in a bucket.
+ """
+ more_results = True
+ k = None
+ while more_results:
+ rs = bucket.get_all_keys(prefix=prefix, marker=marker,
+ delimiter=delimiter, headers=headers)
+ for k in rs:
+ yield k
+ if k:
+ marker = k.name
+ more_results= rs.is_truncated
+
+class BucketListResultSet:
+ """
+ A resultset for listing keys within a bucket. Uses the bucket_lister
+ generator function and implements the iterator interface. This
+ transparently handles the results paging from S3 so even if you have
+ many thousands of keys within the bucket you can iterate over all
+ keys in a reasonably efficient manner.
+ """
+
+ def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None):
+ self.bucket = bucket
+ self.prefix = prefix
+ self.delimiter = delimiter
+ self.marker = marker
+ self.headers = headers
+
+ def __iter__(self):
+ return bucket_lister(self.bucket, prefix=self.prefix,
+ delimiter=self.delimiter, marker=self.marker, headers=self.headers)
+
+def versioned_bucket_lister(bucket, prefix='', delimiter='',
+ key_marker='', version_id_marker='', headers=None):
+ """
+ A generator function for listing versions in a bucket.
+ """
+ more_results = True
+ k = None
+ while more_results:
+ rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
+ version_id_marker=version_id_marker,
+ delimiter=delimiter, headers=headers)
+ for k in rs:
+ yield k
+ key_marker = rs.next_key_marker
+ version_id_marker = rs.next_version_id_marker
+ more_results= rs.is_truncated
+
+class VersionedBucketListResultSet:
+ """
+ A resultset for listing versions within a bucket. Uses the bucket_lister
+ generator function and implements the iterator interface. This
+ transparently handles the results paging from S3 so even if you have
+ many thousands of keys within the bucket you can iterate over all
+ keys in a reasonably efficient manner.
+ """
+
+ def __init__(self, bucket=None, prefix='', delimiter='', key_marker='',
+ version_id_marker='', headers=None):
+ self.bucket = bucket
+ self.prefix = prefix
+ self.delimiter = delimiter
+ self.key_marker = key_marker
+ self.version_id_marker = version_id_marker
+ self.headers = headers
+
+ def __iter__(self):
+ return versioned_bucket_lister(self.bucket, prefix=self.prefix,
+ delimiter=self.delimiter,
+ key_marker=self.key_marker,
+ version_id_marker=self.version_id_marker,
+ headers=self.headers)
+
+
diff --git a/src/s3ql/backends/boto/s3/connection.py b/src/s3ql/backends/boto/s3/connection.py
new file mode 100644
index 0000000..2770a4d
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/connection.py
@@ -0,0 +1,360 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+import xml.sax
+import urllib, base64
+import time
+from .. import utils
+import types
+from ..connection import AWSAuthConnection
+from .. import handler
+from .bucket import Bucket
+from .key import Key
+from ..resultset import ResultSet
+from ..exception import S3ResponseError, S3CreateError, BotoClientError
+
+def assert_case_insensitive(f):
+ def wrapper(*args, **kwargs):
+ if len(args) == 3 and not (args[2].islower() or args[2].isalnum()):
+ raise BotoClientError("Bucket names cannot contain upper-case " \
+ "characters when using either the sub-domain or virtual " \
+ "hosting calling format.")
+ return f(*args, **kwargs)
+ return wrapper
+
+class _CallingFormat:
+
+ def build_url_base(self, connection, protocol, server, bucket, key=''):
+ url_base = '%s://' % protocol
+ url_base += self.build_host(server, bucket)
+ url_base += connection.get_path(self.build_path_base(bucket, key))
+ return url_base
+
+ def build_host(self, server, bucket):
+ if bucket == '':
+ return server
+ else:
+ return self.get_bucket_server(server, bucket)
+
+ def build_auth_path(self, bucket, key=''):
+ path = ''
+ if bucket != '':
+ path = '/' + bucket
+ return path + '/%s' % urllib.quote(key)
+
+ def build_path_base(self, bucket, key=''):
+ return '/%s' % urllib.quote(key)
+
+class SubdomainCallingFormat(_CallingFormat):
+
+ @assert_case_insensitive
+ def get_bucket_server(self, server, bucket):
+ return '%s.%s' % (bucket, server)
+
+class VHostCallingFormat(_CallingFormat):
+
+ @assert_case_insensitive
+ def get_bucket_server(self, server, bucket):
+ return bucket
+
+class OrdinaryCallingFormat(_CallingFormat):
+
+ def get_bucket_server(self, server, bucket):
+ return server
+
+ def build_path_base(self, bucket, key=''):
+ path_base = '/'
+ if bucket:
+ path_base += "%s/" % bucket
+ return path_base + urllib.quote(key)
+
+class Location:
+ DEFAULT = ''
+ EU = 'EU'
+ USWest = 'us-west-1'
+
+#boto.set_stream_logger('s3')
+
+class S3Connection(AWSAuthConnection):
+
+ DefaultHost = 's3.amazonaws.com'
+ QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None,
+ host=DefaultHost, debug=0, https_connection_factory=None,
+ calling_format=SubdomainCallingFormat(), path='/', provider='aws'):
+ self.calling_format = calling_format
+ AWSAuthConnection.__init__(self, host,
+ aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ debug=debug, https_connection_factory=https_connection_factory,
+ path=path, provider=provider)
+
+ def __iter__(self):
+ for bucket in self.get_all_buckets():
+ yield bucket
+
+ def __contains__(self, bucket_name):
+ return not (self.lookup(bucket_name) is None)
+
+ def build_post_policy(self, expiration_time, conditions):
+ """
+ Taken from the AWS book Python examples and modified for use with boto
+ """
+ if type(expiration_time) != time.struct_time:
+ raise 'Policy document must include a valid expiration Time object'
+
+ # Convert conditions object mappings to condition statements
+
+ return '{"expiration": "%s",\n"conditions": [%s]}' % \
+ (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
+
+
+ def build_post_form_args(self, bucket_name, key, expires_in = 6000,
+ acl = None, success_action_redirect = None, max_content_length = None,
+ http_method = "http", fields=None, conditions=None):
+ """
+ Taken from the AWS book Python examples and modified for use with boto
+ This only returns the arguments required for the post form, not the actual form
+ This does not return the file input field which also needs to be added
+
+ :param bucket_name: Bucket to submit to
+ :type bucket_name: string
+
+ :param key: Key name, optionally add ${filename} to the end to attach the submitted filename
+ :type key: string
+
+ :param expires_in: Time (in seconds) before this expires, defaults to 6000
+ :type expires_in: integer
+
+ :param acl: ACL rule to use, if any
+ :type acl: :class:`boto.s3.acl.ACL`
+
+ :param success_action_redirect: URL to redirect to on success
+ :type success_action_redirect: string
+
+ :param max_content_length: Maximum size for this file
+ :type max_content_length: integer
+
+ :type http_method: string
+ :param http_method: HTTP Method to use, "http" or "https"
+
+
+ :rtype: dict
+ :return: A dictionary containing field names/values as well as a url to POST to
+
+ .. code-block:: python
+
+ {
+ "action": action_url_to_post_to,
+ "fields": [
+ {
+ "name": field_name,
+ "value": field_value
+ },
+ {
+ "name": field_name2,
+ "value": field_value2
+ }
+ ]
+ }
+
+ """
+ if fields == None:
+ fields = []
+ if conditions == None:
+ conditions = []
+ expiration = time.gmtime(int(time.time() + expires_in))
+
+ # Generate policy document
+ conditions.append('{"bucket": "%s"}' % bucket_name)
+ if key.endswith("${filename}"):
+ conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
+ else:
+ conditions.append('{"key": "%s"}' % key)
+ if acl:
+ conditions.append('{"acl": "%s"}' % acl)
+ fields.append({ "name": "acl", "value": acl})
+ if success_action_redirect:
+ conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
+ fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
+ if max_content_length:
+ conditions.append('["content-length-range", 0, %i]' % max_content_length)
+ fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
+
+ policy = self.build_post_policy(expiration, conditions)
+
+ # Add the base64-encoded policy document as the 'policy' field
+ policy_b64 = base64.b64encode(policy)
+ fields.append({"name": "policy", "value": policy_b64})
+
+ # Add the AWS access key as the 'AWSAccessKeyId' field
+ fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id})
+
+ # Add signature for encoded policy document as the 'AWSAccessKeyId' field
+ hmac_copy = self.hmac.copy()
+ hmac_copy.update(policy_b64)
+ signature = base64.encodestring(hmac_copy.digest()).strip()
+ fields.append({"name": "signature", "value": signature})
+ fields.append({"name": "key", "value": key})
+
+ # HTTPS protocol will be used if the secure HTTP option is enabled.
+ url = '%s://%s.s3.amazonaws.com/' % (http_method, bucket_name)
+
+ return {"action": url, "fields": fields}
+
+
+ def generate_url(self, expires_in, method, bucket='', key='',
+ headers=None, query_auth=True, force_http=False):
+ if not headers:
+ headers = {}
+ expires = int(time.time() + expires_in)
+ auth_path = self.calling_format.build_auth_path(bucket, key)
+ auth_path = self.get_path(auth_path)
+ canonical_str = boto.utils.canonical_string(method, auth_path,
+ headers, expires)
+ hmac_copy = self.hmac.copy()
+ hmac_copy.update(canonical_str)
+ b64_hmac = base64.encodestring(hmac_copy.digest()).strip()
+ encoded_canonical = urllib.quote_plus(b64_hmac)
+ self.calling_format.build_path_base(bucket, key)
+ if query_auth:
+ query_part = '?' + self.QueryString % (encoded_canonical, expires,
+ self.aws_access_key_id)
+ if 'x-amz-security-token' in headers:
+ query_part += '&x-amz-security-token=%s' % urllib.quote(headers['x-amz-security-token']);
+ else:
+ query_part = ''
+ if force_http:
+ protocol = 'http'
+ port = 80
+ else:
+ protocol = self.protocol
+ port = self.port
+ return self.calling_format.build_url_base(self, protocol, self.server_name(port),
+ bucket, key) + query_part
+
+ def get_all_buckets(self, headers=None):
+ response = self.make_request('GET')
+ body = response.read()
+ if response.status > 300:
+ raise S3ResponseError(response.status, response.reason, body)
+ rs = ResultSet([('Bucket', Bucket)])
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+
+ def get_canonical_user_id(self, headers=None):
+ """
+ Convenience method that returns the "CanonicalUserID" of the user who's credentials
+ are associated with the connection. The only way to get this value is to do a GET
+ request on the service which returns all buckets associated with the account. As part
+ of that response, the canonical userid is returned. This method simply does all of
+ that and then returns just the user id.
+
+ :rtype: string
+ :return: A string containing the canonical user id.
+ """
+ rs = self.get_all_buckets(headers=headers)
+ return rs.ID
+
+ def get_bucket(self, bucket_name, validate=True, headers=None):
+ bucket = Bucket(self, bucket_name)
+ if validate:
+ bucket.get_all_keys(headers, maxkeys=0)
+ return bucket
+
+ def lookup(self, bucket_name, validate=True, headers=None):
+ try:
+ bucket = self.get_bucket(bucket_name, validate, headers=headers)
+ except:
+ bucket = None
+ return bucket
+
+ def create_bucket(self, bucket_name, headers=None,
+ location=Location.DEFAULT, policy=None):
+ """
+ Creates a new located bucket. By default it's in the USA. You can pass
+ Location.EU to create an European bucket.
+
+ :type bucket_name: string
+ :param bucket_name: The name of the new bucket
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to AWS.
+
+ :type location: :class:`boto.s3.connection.Location`
+ :param location: The location of the new bucket
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ """
+ # Not sure what Exception Type from boto.exception to use.
+ if not bucket_name.islower():
+ raise Exception("Bucket names must be lower case.")
+
+ if policy:
+ if headers:
+ headers['x-amz-acl'] = policy
+ else:
+ headers = {'x-amz-acl' : policy}
+ if location == Location.DEFAULT:
+ data = ''
+ else:
+ data = '<CreateBucketConstraint><LocationConstraint>' + \
+ location + '</LocationConstraint></CreateBucketConstraint>'
+ response = self.make_request('PUT', bucket_name, headers=headers,
+ data=data)
+ body = response.read()
+ if response.status == 409:
+ raise S3CreateError(response.status, response.reason, body)
+ if response.status == 200:
+ return Bucket(self, bucket_name)
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def delete_bucket(self, bucket, headers=None):
+ response = self.make_request('DELETE', bucket, headers=headers)
+ body = response.read()
+ if response.status != 204:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ def make_request(self, method, bucket='', key='', headers=None, data='',
+ query_args=None, sender=None):
+ if isinstance(bucket, Bucket):
+ bucket = bucket.name
+ if isinstance(key, Key):
+ key = key.name
+ path = self.calling_format.build_path_base(bucket, key)
+ auth_path = self.calling_format.build_auth_path(bucket, key)
+ host = self.calling_format.build_host(self.server_name(), bucket)
+ if query_args:
+ path += '?' + query_args
+ auth_path += '?' + query_args
+ return AWSAuthConnection.make_request(self, method, path, headers,
+ data, host, auth_path, sender)
+
diff --git a/src/s3ql/backends/boto/s3/key.py b/src/s3ql/backends/boto/s3/key.py
new file mode 100644
index 0000000..5047e68
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/key.py
@@ -0,0 +1,901 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+import mimetypes
+import os
+import rfc822
+import StringIO
+import base64
+from .. import utils
+from ... import boto
+from ..exception import S3ResponseError, S3DataError, BotoClientError
+from .user import User
+from ...boto import UserAgent, config
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+
+class Key(object):
+
+ DefaultContentType = 'application/octet-stream'
+
+ BufferSize = 8192
+
+ def __init__(self, bucket=None, name=None):
+ self.bucket = bucket
+ self.name = name
+ self.metadata = {}
+ self.content_type = self.DefaultContentType
+ self.content_encoding = None
+ self.filename = None
+ self.etag = None
+ self.last_modified = None
+ self.owner = None
+ self.storage_class = 'STANDARD'
+ self.md5 = None
+ self.base64md5 = None
+ self.path = None
+ self.resp = None
+ self.mode = None
+ self.size = None
+ self.version_id = None
+ self.source_version_id = None
+ self.delete_marker = False
+
+ def __repr__(self):
+ if self.bucket:
+ return '<Key: %s,%s>' % (self.bucket.name, self.name)
+ else:
+ return '<Key: None,%s>' % self.name
+
+ def __getattr__(self, name):
+ if name == 'key':
+ return self.name
+ else:
+ raise AttributeError
+
+ def __setattr__(self, name, value):
+ if name == 'key':
+ self.__dict__['name'] = value
+ else:
+ self.__dict__[name] = value
+
+ def __iter__(self):
+ return self
+
+ def handle_version_headers(self, resp):
+ self.version_id = resp.getheader('x-amz-version-id', None)
+ self.source_version_id = resp.getheader('x-amz-copy-source-version-id', None)
+ if resp.getheader('x-amz-delete-marker', 'false') == 'true':
+ self.delete_marker = True
+ else:
+ self.delete_marker = False
+
+ def open_read(self, headers=None, query_args=None):
+ """
+ Open this key for reading
+
+ :type headers: dict
+ :param headers: Headers to pass in the web request
+
+ :type query_args: string
+ :param query_args: Arguments to pass in the query string (ie, 'torrent')
+ """
+ if self.resp == None:
+ self.mode = 'r'
+
+ self.resp = self.bucket.connection.make_request('GET',
+ self.bucket.name,
+ self.name, headers,
+ query_args=query_args)
+ if self.resp.status < 199 or self.resp.status > 299:
+ body = self.resp.read()
+ raise S3ResponseError(self.resp.status, self.resp.reason, body)
+ response_headers = self.resp.msg
+ self.metadata = boto.utils.get_aws_metadata(response_headers)
+ for name, value in response_headers.items():
+ if name.lower() == 'content-length':
+ self.size = int(value)
+ elif name.lower() == 'etag':
+ self.etag = value
+ elif name.lower() == 'content-type':
+ self.content_type = value
+ elif name.lower() == 'content-encoding':
+ self.content_encoding = value
+ elif name.lower() == 'last-modified':
+ self.last_modified = value
+ self.handle_version_headers(self.resp)
+
+ def open_write(self, headers=None):
+ """
+ Open this key for writing.
+ Not yet implemented
+
+ :type headers: dict
+ :param headers: Headers to pass in the write request
+ """
+ raise BotoClientError('Not Implemented')
+
+ def open(self, mode='r', headers=None, query_args=None):
+ if mode == 'r':
+ self.mode = 'r'
+ self.open_read(headers=headers, query_args=query_args)
+ elif mode == 'w':
+ self.mode = 'w'
+ self.open_write(headers=headers)
+ else:
+ raise BotoClientError('Invalid mode: %s' % mode)
+
+ closed = False
+ def close(self):
+ if self.resp:
+ self.resp.read()
+ self.resp = None
+ self.mode = None
+ self.closed = True
+
+ def next(self):
+ """
+ By providing a next method, the key object supports use as an iterator.
+ For example, you can now say:
+
+ for bytes in key:
+ write bytes to a file or whatever
+
+ All of the HTTP connection stuff is handled for you.
+ """
+ self.open_read()
+ data = self.resp.read(self.BufferSize)
+ if not data:
+ self.close()
+ raise StopIteration
+ return data
+
+ def read(self, size=0):
+ if size == 0:
+ size = self.BufferSize
+ self.open_read()
+ data = self.resp.read(size)
+ if not data:
+ self.close()
+ return data
+
+ def change_storage_class(self, new_storage_class, dst_bucket=None):
+ """
+ Change the storage class of an existing key.
+ Depending on whether a different destination bucket is supplied
+ or not, this will either move the item within the bucket, preserving
+ all metadata and ACL info bucket changing the storage class or it
+ will copy the item to the provided destination bucket, also
+ preserving metadata and ACL info.
+
+ :type new_storage_class: string
+ :param new_storage_class: The new storage class for the Key.
+ Possible values are:
+ * STANDARD
+ * REDUCED_REDUNDANCY
+
+ :type dst_bucket: string
+ :param dst_bucket: The name of a destination bucket. If not
+ provided the current bucket of the key
+ will be used.
+
+ """
+ self.storage_class = new_storage_class
+ return self.copy(self.bucket.name, self.name,
+ reduced_redundancy=True, preserve_acl=True)
+
+ def copy(self, dst_bucket, dst_key, metadata=None,
+ reduced_redundancy=False, preserve_acl=False):
+ """
+ Copy this Key to another bucket.
+
+ :type dst_bucket: string
+ :param dst_bucket: The name of the destination bucket
+
+ :type dst_key: string
+ :param dst_key: The name of the destination key
+
+ :type metadata: dict
+ :param metadata: Metadata to be associated with new key.
+ If metadata is supplied, it will replace the
+ metadata of the source key being copied.
+ If no metadata is supplied, the source key's
+ metadata will be copied to the new key.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will force the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY regardless of the
+ storage class of the key being copied.
+ The Reduced Redundancy Storage (RRS)
+ feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ :type preserve_acl: bool
+ :param preserve_acl: If True, the ACL from the source key
+ will be copied to the destination
+ key. If False, the destination key
+ will have the default ACL.
+ Note that preserving the ACL in the
+ new key object will require two
+ additional API calls to S3, one to
+ retrieve the current ACL and one to
+ set that ACL on the new object. If
+ you don't care about the ACL, a value
+ of False will be significantly more
+ efficient.
+
+ :rtype: :class:`boto.s3.key.Key` or subclass
+ :returns: An instance of the newly created key object
+ """
+ dst_bucket = self.bucket.connection.lookup(dst_bucket)
+ if reduced_redundancy:
+ storage_class = 'REDUCED_REDUNDANCY'
+ else:
+ storage_class = self.storage_class
+ return dst_bucket.copy_key(dst_key, self.bucket.name,
+ self.name, metadata,
+ storage_class=storage_class,
+ preserve_acl=preserve_acl)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Owner':
+ self.owner = User(self)
+ return self.owner
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Key':
+ self.name = value.encode('utf-8')
+ elif name == 'ETag':
+ self.etag = value
+ elif name == 'LastModified':
+ self.last_modified = value
+ elif name == 'Size':
+ self.size = int(value)
+ elif name == 'StorageClass':
+ self.storage_class = value
+ elif name == 'Owner':
+ pass
+ elif name == 'VersionId':
+ self.version_id = value
+ else:
+ setattr(self, name, value)
+
+ def exists(self):
+ """
+ Returns True if the key exists
+
+ :rtype: bool
+ :return: Whether the key exists on S3
+ """
+ return bool(self.bucket.lookup(self.name))
+
+ def delete(self):
+ """
+ Delete this key from S3
+ """
+ return self.bucket.delete_key(self.name)
+
+ def get_metadata(self, name):
+ return self.metadata.get(name)
+
+ def set_metadata(self, name, value):
+ self.metadata[name] = value
+
+ def update_metadata(self, d):
+ self.metadata.update(d)
+
+ # convenience methods for setting/getting ACL
+ def set_acl(self, acl_str, headers=None):
+ if self.bucket != None:
+ self.bucket.set_acl(acl_str, self.name, headers=headers)
+
+ def get_acl(self, headers=None):
+ if self.bucket != None:
+ return self.bucket.get_acl(self.name, headers=headers)
+
+ def get_xml_acl(self, headers=None):
+ if self.bucket != None:
+ return self.bucket.get_xml_acl(self.name, headers=headers)
+
+ def set_xml_acl(self, acl_str, headers=None):
+ if self.bucket != None:
+ return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
+
+ def set_canned_acl(self, acl_str, headers=None):
+ return self.bucket.set_canned_acl(acl_str, self.name, headers)
+
+ def make_public(self, headers=None):
+ return self.bucket.set_canned_acl('public-read', self.name, headers)
+
+ def generate_url(self, expires_in, method='GET', headers=None,
+ query_auth=True, force_http=False):
+ """
+ Generate a URL to access this key.
+
+ :type expires_in: int
+ :param expires_in: How long the url is valid for, in seconds
+
+ :type method: string
+ :param method: The method to use for retrieving the file (default is GET)
+
+ :type headers: dict
+ :param headers: Any headers to pass along in the request
+
+ :type query_auth: bool
+ :param query_auth:
+
+ :rtype: string
+ :return: The URL to access the key
+ """
+ return self.bucket.connection.generate_url(expires_in, method,
+ self.bucket.name, self.name,
+ headers, query_auth, force_http)
+
+ def send_file(self, fp, headers=None, cb=None, num_cb=10):
+ """
+ Upload a file to a key into a bucket on S3.
+
+ :type fp: file
+ :param fp: The file pointer to upload
+
+ :type headers: dict
+ :param headers: The headers to pass along with the PUT request
+
+ :type cb: function
+ :param cb: a callback function that will be called to report
+ progress on the upload. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted to S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the cb
+ parameter this parameter determines the granularity
+ of the callback by defining the maximum number of
+ times the callback will be called during the file
+ transfer. Providing a negative integer will cause
+ your callback to be called with each buffer read.
+
+ """
+ def sender(http_conn, method, path, data, headers):
+ http_conn.putrequest(method, path)
+ for key in headers:
+ http_conn.putheader(key, headers[key])
+ http_conn.endheaders()
+ fp.seek(0)
+ save_debug = self.bucket.connection.debug
+ self.bucket.connection.debug = 0
+ if cb:
+ if num_cb > 2:
+ cb_count = self.size / self.BufferSize / (num_cb - 2)
+ elif num_cb < 0:
+ cb_count = -1
+ else:
+ cb_count = 0
+ i = total_bytes = 0
+ cb(total_bytes, self.size)
+ l = fp.read(self.BufferSize)
+ while len(l) > 0:
+ http_conn.send(l)
+ if cb:
+ total_bytes += len(l)
+ i += 1
+ if i == cb_count or cb_count == -1:
+ cb(total_bytes, self.size)
+ i = 0
+ l = fp.read(self.BufferSize)
+ if cb:
+ cb(total_bytes, self.size)
+ response = http_conn.getresponse()
+ body = response.read()
+ fp.seek(0)
+ self.bucket.connection.debug = save_debug
+ if response.status == 500 or response.status == 503 or \
+ response.getheader('location'):
+ # we'll try again
+ return response
+ elif response.status >= 200 and response.status <= 299:
+ self.etag = response.getheader('etag')
+ if self.etag != '"%s"' % self.md5:
+ raise S3DataError('ETag from S3 did not match computed MD5')
+ return response
+ else:
+ raise S3ResponseError(response.status, response.reason, body)
+
+ if not headers:
+ headers = {}
+ else:
+ headers = headers.copy()
+ headers['User-Agent'] = UserAgent
+ headers['Content-MD5'] = self.base64md5
+ if self.storage_class != 'STANDARD':
+ headers['x-amz-storage-class'] = self.storage_class
+ if headers.has_key('Content-Type'):
+ self.content_type = headers['Content-Type']
+ elif self.path:
+ self.content_type = mimetypes.guess_type(self.path)[0]
+ if self.content_type == None:
+ self.content_type = self.DefaultContentType
+ headers['Content-Type'] = self.content_type
+ else:
+ headers['Content-Type'] = self.content_type
+ headers['Content-Length'] = str(self.size)
+ headers['Expect'] = '100-Continue'
+ headers = boto.utils.merge_meta(headers, self.metadata)
+ resp = self.bucket.connection.make_request('PUT', self.bucket.name,
+ self.name, headers,
+ sender=sender)
+ self.handle_version_headers(resp)
+
+ def compute_md5(self, fp):
+ """
+ :type fp: file
+ :param fp: File pointer to the file to MD5 hash. The file pointer will be
+ reset to the beginning of the file before the method returns.
+
+ :rtype: tuple
+ :return: A tuple containing the hex digest version of the MD5 hash
+ as the first element and the base64 encoded version of the
+ plain digest as the second element.
+ """
+ m = md5()
+ fp.seek(0)
+ s = fp.read(self.BufferSize)
+ while s:
+ m.update(s)
+ s = fp.read(self.BufferSize)
+ hex_md5 = m.hexdigest()
+ base64md5 = base64.encodestring(m.digest())
+ if base64md5[-1] == '\n':
+ base64md5 = base64md5[0:-1]
+ self.size = fp.tell()
+ fp.seek(0)
+ return (hex_md5, base64md5)
+
+ def set_contents_from_file(self, fp, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False):
+ """
+ Store an object in S3 using the name of the Key object as the
+ key in S3 and the contents of the file pointed to by 'fp' as the
+ contents.
+
+ :type fp: file
+ :param fp: the file whose contents to upload
+
+ :type headers: dict
+ :param headers: additional HTTP headers that will be sent with the PUT request.
+
+ :type replace: bool
+ :param replace: If this parameter is False, the method
+ will first check to see if an object exists in the
+ bucket with the same key. If it does, it won't
+ overwrite it. The default value is True which will
+ overwrite the object.
+
+ :type cb: function
+ :param cb: a callback function that will be called to report
+ progress on the upload. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted to S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
+ file as the first element and the Base64-encoded version of the plain
+ checksum as the second element. This is the same format returned by
+ the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to upload,
+ it's silly to have to do it twice so this param, if present, will be
+ used as the MD5 values of the file. Otherwise, the checksum will be computed.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will set the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY. The Reduced Redundancy
+ Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ """
+ if headers is None:
+ headers = {}
+ if policy:
+ headers['x-amz-acl'] = policy
+ if reduced_redundancy:
+ self.storage_class = 'REDUCED_REDUNDANCY'
+ headers['x-amz-storage-class'] = self.storage_class
+ if hasattr(fp, 'name'):
+ self.path = fp.name
+ if self.bucket != None:
+ if not md5:
+ md5 = self.compute_md5(fp)
+ self.md5 = md5[0]
+ self.base64md5 = md5[1]
+ if self.name == None:
+ self.name = self.md5
+ if not replace:
+ k = self.bucket.lookup(self.name)
+ if k:
+ return
+ self.send_file(fp, headers, cb, num_cb)
+
+ def set_contents_from_filename(self, filename, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False):
+ """
+ Store an object in S3 using the name of the Key object as the
+ key in S3 and the contents of the file named by 'filename'.
+ See set_contents_from_file method for details about the
+ parameters.
+
+ :type filename: string
+ :param filename: The name of the file that you want to put onto S3
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to AWS.
+
+ :type replace: bool
+ :param replace: If True, replaces the contents of the file if it already exists.
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
+ file as the first element and the Base64-encoded version of the plain
+ checksum as the second element. This is the same format returned by
+ the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to upload,
+ it's silly to have to do it twice so this param, if present, will be
+ used as the MD5 values of the file. Otherwise, the checksum will be computed.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will set the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY. The Reduced Redundancy
+ Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ """
+ fp = open(filename, 'rb')
+ self.set_contents_from_file(fp, headers, replace, cb, num_cb,
+ policy, md5, reduced_redundancy)
+ fp.close()
+
+ def set_contents_from_string(self, s, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False):
+ """
+ Store an object in S3 using the name of the Key object as the
+ key in S3 and the string 's' as the contents.
+ See set_contents_from_file method for details about the
+ parameters.
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to AWS.
+
+ :type replace: bool
+ :param replace: If True, replaces the contents of the file if it already exists.
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
+ file as the first element and the Base64-encoded version of the plain
+ checksum as the second element. This is the same format returned by
+ the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to upload,
+ it's silly to have to do it twice so this param, if present, will be
+ used as the MD5 values of the file. Otherwise, the checksum will be computed.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will set the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY. The Reduced Redundancy
+ Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ """
+ fp = StringIO.StringIO(s)
+ r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
+ policy, md5, reduced_redundancy)
+ fp.close()
+ return r
+
+ def get_file(self, fp, headers=None, cb=None, num_cb=10,
+ torrent=False, version_id=None):
+ """
+ Retrieves a file from an S3 Key
+
+ :type fp: file
+ :param fp: File pointer to put the data into
+
+ :type headers: string
+ :param: headers to send when retrieving the files
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: Flag for whether to get a torrent for the file
+ """
+ if cb:
+ if num_cb > 2:
+ cb_count = self.size / self.BufferSize / (num_cb - 2)
+ else:
+ cb_count = 0
+ i = total_bytes = 0
+ cb(total_bytes, self.size)
+ save_debug = self.bucket.connection.debug
+ if self.bucket.connection.debug == 1:
+ self.bucket.connection.debug = 0
+
+ query_args = ''
+ if torrent:
+ query_args = 'torrent'
+ elif version_id:
+ query_args = 'versionId=%s' % version_id
+ self.open('r', headers, query_args=query_args)
+ for bytes in self:
+ fp.write(bytes)
+ if cb:
+ total_bytes += len(bytes)
+ i += 1
+ if i == cb_count:
+ cb(total_bytes, self.size)
+ i = 0
+ if cb:
+ cb(total_bytes, self.size)
+ self.close()
+ self.bucket.connection.debug = save_debug
+
+ def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
+ """
+ Get a torrent file (see to get_file)
+
+ :type fp: file
+ :param fp: The file pointer of where to put the torrent
+
+ :type headers: dict
+ :param headers: Headers to be passed
+
+ :type cb: function
+ :param cb: Callback function to call on retrieved data
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ """
+ return self.get_file(fp, headers, cb, num_cb, torrent=True)
+
+ def get_contents_to_file(self, fp, headers=None,
+ cb=None, num_cb=10,
+ torrent=False,
+ version_id=None):
+ """
+ Retrieve an object from S3 using the name of the Key object as the
+ key in S3. Write the contents of the object to the file pointed
+ to by 'fp'.
+
+ :type fp: File -like object
+ :param fp:
+
+ :type headers: dict
+ :param headers: additional HTTP headers that will be sent with the GET request.
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: If True, returns the contents of a torrent file as a string.
+
+ """
+ if self.bucket != None:
+ self.get_file(fp, headers, cb, num_cb, torrent=torrent,
+ version_id=version_id)
+
+ def get_contents_to_filename(self, filename, headers=None,
+ cb=None, num_cb=10,
+ torrent=False,
+ version_id=None):
+ """
+ Retrieve an object from S3 using the name of the Key object as the
+ key in S3. Store contents of the object to a file named by 'filename'.
+ See get_contents_to_file method for details about the
+ parameters.
+
+ :type filename: string
+ :param filename: The filename of where to put the file contents
+
+ :type headers: dict
+ :param headers: Any additional headers to send in the request
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: If True, returns the contents of a torrent file as a string.
+
+ """
+ fp = open(filename, 'wb')
+ self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
+ version_id=version_id)
+ fp.close()
+ # if last_modified date was sent from s3, try to set file's timestamp
+ if self.last_modified != None:
+ try:
+ modified_tuple = rfc822.parsedate_tz(self.last_modified)
+ modified_stamp = int(rfc822.mktime_tz(modified_tuple))
+ os.utime(fp.name, (modified_stamp, modified_stamp))
+ except Exception: pass
+
+ def get_contents_as_string(self, headers=None,
+ cb=None, num_cb=10,
+ torrent=False,
+ version_id=None):
+ """
+ Retrieve an object from S3 using the name of the Key object as the
+ key in S3. Return the contents of the object as a string.
+ See get_contents_to_file method for details about the
+ parameters.
+
+ :type headers: dict
+ :param headers: Any additional headers to send in the request
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: If True, returns the contents of a torrent file as a string.
+
+ :rtype: string
+ :returns: The contents of the file as a string
+ """
+ fp = StringIO.StringIO()
+ self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
+ version_id=version_id)
+ return fp.getvalue()
+
+ def add_email_grant(self, permission, email_address, headers=None):
+ """
+ Convenience method that provides a quick way to add an email grant to a key.
+ This method retrieves the current ACL, creates a new grant based on the parameters
+ passed in, adds that grant to the ACL and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ READ|WRITE|READ_ACP|WRITE_ACP|FULL_CONTROL
+ See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingAuthAccess.html
+ for more details on permissions.
+
+ :type email_address: string
+ :param email_address: The email address associated with the AWS account your are granting
+ the permission to.
+ """
+ policy = self.get_acl(headers=headers)
+ policy.acl.add_email_grant(permission, email_address)
+ self.set_acl(policy, headers=headers)
+
+ def add_user_grant(self, permission, user_id):
+ """
+ Convenience method that provides a quick way to add a canonical user grant to a key.
+ This method retrieves the current ACL, creates a new grant based on the parameters
+ passed in, adds that grant to the ACL and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ READ|WRITE|READ_ACP|WRITE_ACP|FULL_CONTROL
+ See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingAuthAccess.html
+ for more details on permissions.
+
+ :type user_id: string
+ :param user_id: The canonical user id associated with the AWS account your are granting
+ the permission to.
+ """
+ policy = self.get_acl()
+ policy.acl.add_user_grant(permission, user_id)
+ self.set_acl(policy)
diff --git a/src/s3ql/backends/boto/s3/prefix.py b/src/s3ql/backends/boto/s3/prefix.py
new file mode 100644
index 0000000..25a234b
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/prefix.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+class Prefix:
+ def __init__(self, bucket=None, name=None):
+ self.bucket = bucket
+ self.name = name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Prefix':
+ self.name = value
+ else:
+ setattr(self, name, value)
+
diff --git a/src/s3ql/backends/boto/s3/user.py b/src/s3ql/backends/boto/s3/user.py
new file mode 100644
index 0000000..5d454e6
--- /dev/null
+++ b/src/s3ql/backends/boto/s3/user.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+class User:
+ def __init__(self, parent=None, id='', display_name=''):
+ if parent:
+ parent.owner = self
+ self.type = None
+ self.id = id
+ self.display_name = display_name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'DisplayName':
+ self.display_name = value
+ elif name == 'ID':
+ self.id = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self, element_name='Owner'):
+ if self.type:
+ s = '<%s xsi:type="%s">' % (element_name, self.type)
+ else:
+ s = '<%s>' % element_name
+ s += '<ID>%s</ID>' % self.id
+ s += '<DisplayName>%s</DisplayName>' % self.display_name
+ s += '</%s>' % element_name
+ return s
diff --git a/src/s3ql/backends/boto/storage_uri.py b/src/s3ql/backends/boto/storage_uri.py
new file mode 100644
index 0000000..44ed189
--- /dev/null
+++ b/src/s3ql/backends/boto/storage_uri.py
@@ -0,0 +1,274 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+import os
+from .exception import BotoClientError
+from .exception import InvalidUriError
+
+class StorageUri(object):
+ """
+ Base class for representing storage provider-independent bucket and
+ object name with a shorthand URI-like syntax.
+
+ This is an abstract class: the constructor cannot be called (throws an
+ exception if you try).
+ """
+
+ connection = None
+
+ def __init__(self):
+ """Uncallable constructor on abstract base StorageUri class.
+ """
+ raise BotoClientError('Attempt to instantiate abstract StorageUri '
+ 'class')
+
+ def __str__(self):
+ """Returns string representation of URI."""
+ return self.uri
+
+ def equals(self, uri):
+ """Returns true if two URIs are equal."""
+ return self.uri == uri.uri
+
+ def connect(self, access_key_id=None, secret_access_key=None, **kwargs):
+ """
+ Opens a connection to appropriate provider, depending on provider
+ portion of URI. Requires Credentials defined in boto config file (see
+ boto/pyami/config.py).
+ @type storage_uri: StorageUri
+ @param storage_uri: StorageUri specifying a bucket or a bucket+object
+ @rtype: L{AWSAuthConnection<boto.gs.connection.AWSAuthConnection>}
+ @return: A connection to storage service provider of the given URI.
+ """
+
+ if not self.connection:
+ if self.provider == 's3':
+ from boto.s3.connection import S3Connection
+ self.connection = S3Connection(access_key_id,
+ secret_access_key, **kwargs)
+ elif self.provider == 'gs':
+ from boto.gs.connection import GSConnection
+ self.connection = GSConnection(access_key_id,
+ secret_access_key, **kwargs)
+ elif self.provider == 'file':
+ from boto.file.connection import FileConnection
+ self.connection = FileConnection(self)
+ else:
+ raise InvalidUriError('Unrecognized provider "%s"' %
+ self.provider)
+ self.connection.debug = self.debug
+ return self.connection
+
+ def delete_key(self, headers=None):
+ if not self.object_name:
+ raise InvalidUriError('delete_key on object-less URI (%s)' %
+ self.uri)
+ bucket = self.get_bucket()
+ return bucket.delete_key(self.object_name, headers)
+
+ def get_all_keys(self, headers=None, **params):
+ bucket = self.get_bucket(headers)
+ return bucket.get_all_keys(headers, params)
+
+ def get_bucket(self, validate=True, headers=None):
+ if self.bucket_name is None:
+ raise InvalidUriError('get_bucket on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ return conn.get_bucket(self.bucket_name, validate, headers)
+
+ def get_key(self):
+ if not self.object_name:
+ raise InvalidUriError('get_key on object-less URI (%s)' % self.uri)
+ bucket = self.get_bucket()
+ return bucket.get_key(self.object_name)
+
+ def new_key(self):
+ if not self.object_name:
+ raise InvalidUriError('new_key on object-less URI (%s)' % self.uri)
+ bucket = self.get_bucket()
+ return bucket.new_key(self.object_name)
+
+ def get_contents_as_string(self, headers=None, cb=None, num_cb=10,
+ torrent=False):
+ if not self.object_name:
+ raise InvalidUriError('get_contents_as_string on object-less URI '
+ '(%s)' % self.uri)
+ return self.get_key().get_contents_as_string(headers, cb, num_cb,
+ torrent)
+
+
+class BucketStorageUri(StorageUri):
+ """
+ StorageUri subclass that handles bucket storage providers.
+ Callers should instantiate this class by calling boto.storage_uri().
+ """
+
+ def __init__(self, provider, bucket_name=None, object_name=None,
+ debug=False):
+ """Instantiate a BucketStorageUri from provider,bucket,object tuple.
+
+ @type provider: string
+ @param provider: provider name (gs, s3, etc.)
+ @type bucket_name: string
+ @param bucket_name: bucket name
+ @type object_name: string
+ @param object_name: object name
+ @type debug: bool
+ @param debug: whether to turn on debugging on calls to this class
+
+ After instantiation the components are available in the following
+ fields: uri, provider, bucket_name, object_name.
+ """
+
+ self.provider = provider
+ self.bucket_name = bucket_name
+ self.object_name = object_name
+ if self.bucket_name and self.object_name:
+ self.uri = ('%s://%s/%s' % (self.provider, self.bucket_name,
+ self.object_name))
+ elif self.bucket_name:
+ self.uri = ('%s://%s' % (self.provider, self.bucket_name))
+ else:
+ self.uri = ('%s://' % self.provider)
+ self.debug = debug
+
+ def clone_replace_name(self, new_name):
+ """Instantiate a BucketStorageUri from the current BucketStorageUri,
+ but replacing the object_name.
+
+ @type new_name: string
+ @param new_name: new object name
+ """
+ if not self.bucket_name:
+ raise InvalidUriError('clone_replace_name() on bucket-less URI %s' %
+ self.uri)
+ return BucketStorageUri(self.provider, self.bucket_name, new_name,
+ self.debug)
+
+ def get_acl(self, headers=None):
+ if not self.bucket_name:
+ raise InvalidUriError('get_acl on bucket-less URI (%s)' % self.uri)
+ bucket = self.get_bucket()
+ # This works for both bucket- and object- level ACL (former passes
+ # key_name=None):
+ return bucket.get_acl(self.object_name, headers)
+
+ def names_container(self):
+ """Returns True if this URI names a bucket (vs. an object).
+ """
+ return self.object_name is None or self.object_name == ''
+
+ def names_singleton(self):
+ """Returns True if this URI names an object (vs. a bucket).
+ """
+ return self.object_name is not None and self.object_name != ''
+
+ def is_file_uri(self):
+ return False
+
+ def is_cloud_uri(self):
+ return True
+
+ def create_bucket(self, headers=None, location='', policy=None):
+ if self.bucket_name is None:
+ raise InvalidUriError('create_bucket on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ return conn.create_bucket(self.bucket_name, headers, location, policy)
+
+ def delete_bucket(self, headers=None):
+ if self.bucket_name is None:
+ raise InvalidUriError('delete_bucket on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ return conn.delete_bucket(self.bucket_name, headers)
+
+ def get_all_buckets(self, headers=None):
+ conn = self.connect()
+ return conn.get_all_buckets(headers)
+
+ def set_acl(self, acl_or_str, key_name='', headers=None):
+ if not self.bucket_name:
+ raise InvalidUriError('set_acl on bucket-less URI (%s)' %
+ self.uri)
+ self.get_bucket().set_acl(acl_or_str, key_name, headers)
+
+ def set_canned_acl(self, acl_str, headers=None):
+ if not self.object_name:
+ raise InvalidUriError('set_canned_acl on object-less URI (%s)' %
+ self.uri)
+ key = self.get_key()
+ key.set_canned_acl(acl_str, headers)
+
+
+class FileStorageUri(StorageUri):
+ """
+ StorageUri subclass that handles files in the local file system.
+ Callers should instantiate this class by calling boto.storage_uri().
+
+ See file/README about how we map StorageUri operations onto a file system.
+ """
+
+ def __init__(self, object_name, debug):
+ """Instantiate a FileStorageUri from a path name.
+
+ @type object_name: string
+ @param object_name: object name
+
+ After instantiation the components are available in the following
+ fields: uri, provider, bucket_name (always blank for this "anonymous"
+ bucket), object_name.
+ """
+
+ self.provider = 'file'
+ self.bucket_name = ''
+ self.object_name = object_name
+ self.uri = 'file://' + object_name
+ self.debug = debug
+
+ def clone_replace_name(self, new_name):
+ """Instantiate a FileStorageUri from the current FileStorageUri,
+ but replacing the object_name.
+
+ @type new_name: string
+ @param new_name: new object name
+ """
+ return FileStorageUri(new_name, self.debug)
+
+ def names_container(self):
+ """Returns True if this URI names a directory.
+ """
+ return os.path.isdir(self.object_name)
+
+ def names_singleton(self):
+ """Returns True if this URI names a file.
+ """
+ return os.path.isfile(self.object_name)
+
+ def is_file_uri(self):
+ return True
+
+ def is_cloud_uri(self):
+ return False
diff --git a/src/s3ql/backends/boto/utils.py b/src/s3ql/backends/boto/utils.py
new file mode 100644
index 0000000..93de734
--- /dev/null
+++ b/src/s3ql/backends/boto/utils.py
@@ -0,0 +1,565 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#
+# Parts of this code were copied or derived from sample code supplied by AWS.
+# The following notice applies to that code.
+#
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006 Amazon Digital Services, Inc. or its
+# affiliates.
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+"""
+Some handy utility functions used by several classes.
+"""
+
+import re
+import urllib
+import urllib2
+import subprocess
+import StringIO
+import time
+import logging.handlers
+from .. import boto
+import tempfile
+import smtplib
+import datetime
+from email.MIMEMultipart import MIMEMultipart
+from email.MIMEBase import MIMEBase
+from email.MIMEText import MIMEText
+from email.Utils import formatdate
+from email import Encoders
+
+try:
+ import hashlib
+ _hashfn = hashlib.sha512
+except ImportError:
+ import md5
+ _hashfn = md5.md5
+
+METADATA_PREFIX = 'x-amz-meta-'
+AMAZON_HEADER_PREFIX = 'x-amz-'
+
+# generates the aws canonical string for the given parameters
+def canonical_string(method, path, headers, expires=None):
+ interesting_headers = {}
+ for key in headers:
+ lk = key.lower()
+ if lk in ['content-md5', 'content-type', 'date'] or lk.startswith(AMAZON_HEADER_PREFIX):
+ interesting_headers[lk] = headers[key].strip()
+
+ # these keys get empty strings if they don't exist
+ if not interesting_headers.has_key('content-type'):
+ interesting_headers['content-type'] = ''
+ if not interesting_headers.has_key('content-md5'):
+ interesting_headers['content-md5'] = ''
+
+ # just in case someone used this. it's not necessary in this lib.
+ if interesting_headers.has_key('x-amz-date'):
+ interesting_headers['date'] = ''
+
+ # if you're using expires for query string auth, then it trumps date
+ # (and x-amz-date)
+ if expires:
+ interesting_headers['date'] = str(expires)
+
+ sorted_header_keys = interesting_headers.keys()
+ sorted_header_keys.sort()
+
+ buf = "%s\n" % method
+ for key in sorted_header_keys:
+ val = interesting_headers[key]
+ if key.startswith(AMAZON_HEADER_PREFIX):
+ buf += "%s:%s\n" % (key, val)
+ else:
+ buf += "%s\n" % val
+
+ # don't include anything after the first ? in the resource...
+ buf += "%s" % path.split('?')[0]
+
+ # ...unless there is an acl or torrent parameter
+ if re.search("[&?]acl($|=|&)", path):
+ buf += "?acl"
+ elif re.search("[&?]logging($|=|&)", path):
+ buf += "?logging"
+ elif re.search("[&?]torrent($|=|&)", path):
+ buf += "?torrent"
+ elif re.search("[&?]location($|=|&)", path):
+ buf += "?location"
+ elif re.search("[&?]requestPayment($|=|&)", path):
+ buf += "?requestPayment"
+ elif re.search("[&?]versions($|=|&)", path):
+ buf += "?versions"
+ elif re.search("[&?]versioning($|=|&)", path):
+ buf += "?versioning"
+ else:
+ m = re.search("[&?]versionId=([^&]+)($|=|&)", path)
+ if m:
+ buf += '?versionId=' + m.group(1)
+
+ return buf
+
+def merge_meta(headers, metadata):
+ final_headers = headers.copy()
+ for k in metadata.keys():
+ if k.lower() in ['cache-control', 'content-md5', 'content-type',
+ 'content-encoding', 'content-disposition',
+ 'date', 'expires']:
+ final_headers[k] = metadata[k]
+ else:
+ final_headers[METADATA_PREFIX + k] = metadata[k]
+
+ return final_headers
+
+def get_aws_metadata(headers):
+ metadata = {}
+ for hkey in headers.keys():
+ if hkey.lower().startswith(METADATA_PREFIX):
+ #val = urllib.unquote_plus(headers[hkey])
+ #metadata[hkey[len(METADATA_PREFIX):]] = unicode(val, 'utf-8')
+ metadata[hkey[len(METADATA_PREFIX):]] = headers[hkey]
+ del headers[hkey]
+ return metadata
+
+def retry_url(url, retry_on_404=True):
+ for i in range(0, 10):
+ try:
+ req = urllib2.Request(url)
+ resp = urllib2.urlopen(req)
+ return resp.read()
+ except urllib2.HTTPError, e:
+ # in 2.6 you use getcode(), in 2.5 and earlier you use code
+ if hasattr(e, 'getcode'):
+ code = e.getcode()
+ else:
+ code = e.code
+ if code == 404 and not retry_on_404:
+ return ''
+ except:
+ pass
+ boto.log.exception('Caught exception reading instance data')
+ time.sleep(2 ** i)
+ boto.log.error('Unable to read instance data, giving up')
+ return ''
+
+def _get_instance_metadata(url):
+ d = {}
+ data = retry_url(url)
+ if data:
+ fields = data.split('\n')
+ for field in fields:
+ if field.endswith('/'):
+ d[field[0:-1]] = _get_instance_metadata(url + field)
+ else:
+ p = field.find('=')
+ if p > 0:
+ key = field[p + 1:]
+ resource = field[0:p] + '/openssh-key'
+ else:
+ key = resource = field
+ val = retry_url(url + resource)
+ p = val.find('\n')
+ if p > 0:
+ val = val.split('\n')
+ d[key] = val
+ return d
+
+def get_instance_metadata(version='latest'):
+ """
+ Returns the instance metadata as a nested Python dictionary.
+ Simple values (e.g. local_hostname, hostname, etc.) will be
+ stored as string values. Values such as ancestor-ami-ids will
+ be stored in the dict as a list of string values. More complex
+ fields such as public-keys and will be stored as nested dicts.
+ """
+ url = 'http://169.254.169.254/%s/meta-data/' % version
+ return _get_instance_metadata(url)
+
+def get_instance_userdata(version='latest', sep=None):
+ url = 'http://169.254.169.254/%s/user-data' % version
+ user_data = retry_url(url, retry_on_404=False)
+ if user_data:
+ if sep:
+ l = user_data.split(sep)
+ user_data = {}
+ for nvpair in l:
+ t = nvpair.split('=')
+ user_data[t[0].strip()] = t[1].strip()
+ return user_data
+
+ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
+
+def get_ts(ts=None):
+ if not ts:
+ ts = time.gmtime()
+ return time.strftime(ISO8601, ts)
+
+def parse_ts(ts):
+ return datetime.datetime.strptime(ts, ISO8601)
+
+def find_class(module_name, class_name=None):
+ if class_name:
+ module_name = "%s.%s" % (module_name, class_name)
+ modules = module_name.split('.')
+ c = None
+
+ try:
+ for m in modules[1:]:
+ if c:
+ c = getattr(c, m)
+ else:
+ c = getattr(__import__(".".join(modules[0:-1])), m)
+ return c
+ except:
+ return None
+
+def update_dme(username, password, dme_id, ip_address):
+ """
+ Update your Dynamic DNS record with DNSMadeEasy.com
+ """
+ dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip'
+ dme_url += '?username=%s&password=%s&id=%s&ip=%s'
+ s = urllib2.urlopen(dme_url % (username, password, dme_id, ip_address))
+ return s.read()
+
+def fetch_file(uri, file=None, username=None, password=None):
+ """
+ Fetch a file based on the URI provided. If you do not pass in a file pointer
+ a tempfile.NamedTemporaryFile, or None if the file could not be
+ retrieved is returned.
+ The URI can be either an HTTP url, or "s3://bucket_name/key_name"
+ """
+ boto.log.info('Fetching %s' % uri)
+ if file == None:
+ file = tempfile.NamedTemporaryFile()
+ try:
+ if uri.startswith('s3://'):
+ bucket_name, key_name = uri[len('s3://'):].split('/', 1)
+ c = boto.connect_s3()
+ bucket = c.get_bucket(bucket_name)
+ key = bucket.get_key(key_name)
+ key.get_contents_to_file(file)
+ else:
+ if username and password:
+ passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
+ passman.add_password(None, uri, username, password)
+ authhandler = urllib2.HTTPBasicAuthHandler(passman)
+ opener = urllib2.build_opener(authhandler)
+ urllib2.install_opener(opener)
+ s = urllib2.urlopen(uri)
+ file.write(s.read())
+ file.seek(0)
+ except:
+ raise
+ boto.log.exception('Problem Retrieving file: %s' % uri)
+ file = None
+ return file
+
+class ShellCommand(object):
+
+ def __init__(self, command, wait=True):
+ self.exit_code = 0
+ self.command = command
+ self.log_fp = StringIO.StringIO()
+ self.wait = wait
+ self.run()
+
+ def run(self):
+ boto.log.info('running:%s' % self.command)
+ self.process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ if(self.wait):
+ while self.process.poll() == None:
+ time.sleep(1)
+ t = self.process.communicate()
+ self.log_fp.write(t[0])
+ self.log_fp.write(t[1])
+ boto.log.info(self.log_fp.getvalue())
+ self.exit_code = self.process.returncode
+ return self.exit_code
+
+ def setReadOnly(self, value):
+ raise AttributeError
+
+ def getStatus(self):
+ return self.exit_code
+
+ status = property(getStatus, setReadOnly, None, 'The exit code for the command')
+
+ def getOutput(self):
+ return self.log_fp.getvalue()
+
+ output = property(getOutput, setReadOnly, None, 'The STDIN and STDERR output of the command')
+
+class AuthSMTPHandler(logging.handlers.SMTPHandler):
+ """
+ This class extends the SMTPHandler in the standard Python logging module
+ to accept a username and password on the constructor and to then use those
+ credentials to authenticate with the SMTP server. To use this, you could
+ add something like this in your boto config file:
+
+ [handler_hand07]
+ class=boto.utils.AuthSMTPHandler
+ level=WARN
+ formatter=form07
+ args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject')
+ """
+
+ def __init__(self, mailhost, username, password, fromaddr, toaddrs, subject):
+ """
+ Initialize the handler.
+
+ We have extended the constructor to accept a username/password
+ for SMTP authentication.
+ """
+ logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject)
+ self.username = username
+ self.password = password
+
+ def emit(self, record):
+ """
+ Emit a record.
+
+ Format the record and send it to the specified addressees.
+ It would be really nice if I could add authorization to this class
+ without having to resort to cut and paste inheritance but, no.
+ """
+ try:
+ port = self.mailport
+ if not port:
+ port = smtplib.SMTP_PORT
+ smtp = smtplib.SMTP(self.mailhost, port)
+ smtp.login(self.username, self.password)
+ msg = self.format(record)
+ msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
+ self.fromaddr,
+ ','.join(self.toaddrs),
+ self.getSubject(record),
+ formatdate(), msg)
+ smtp.sendmail(self.fromaddr, self.toaddrs, msg)
+ smtp.quit()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ self.handleError(record)
+
+class LRUCache(dict):
+ """A dictionary-like object that stores only a certain number of items, and
+ discards its least recently used item when full.
+
+ >>> cache = LRUCache(3)
+ >>> cache['A'] = 0
+ >>> cache['B'] = 1
+ >>> cache['C'] = 2
+ >>> len(cache)
+ 3
+
+ >>> cache['A']
+ 0
+
+ Adding new items to the cache does not increase its size. Instead, the least
+ recently used item is dropped:
+
+ >>> cache['D'] = 3
+ >>> len(cache)
+ 3
+ >>> 'B' in cache
+ False
+
+ Iterating over the cache returns the keys, starting with the most recently
+ used:
+
+ >>> for key in cache:
+ ... print key
+ D
+ A
+ C
+
+ This code is based on the LRUCache class from Genshi which is based on
+ Mighty's LRUCache from ``myghtyutils.util``, written
+ by Mike Bayer and released under the MIT license (Genshi uses the
+ BSD License). See:
+
+ http://svn.myghty.org/myghtyutils/trunk/lib/myghtyutils/util.py
+ """
+
+ class _Item(object):
+ def __init__(self, key, value):
+ self.previous = self.next = None
+ self.key = key
+ self.value = value
+ def __repr__(self):
+ return repr(self.value)
+
+ def __init__(self, capacity):
+ self._dict = dict()
+ self.capacity = capacity
+ self.head = None
+ self.tail = None
+
+ def __contains__(self, key):
+ return key in self._dict
+
+ def __iter__(self):
+ cur = self.head
+ while cur:
+ yield cur.key
+ cur = cur.next
+
+ def __len__(self):
+ return len(self._dict)
+
+ def __getitem__(self, key):
+ item = self._dict[key]
+ self._update_item(item)
+ return item.value
+
+ def __setitem__(self, key, value):
+ item = self._dict.get(key)
+ if item is None:
+ item = self._Item(key, value)
+ self._dict[key] = item
+ self._insert_item(item)
+ else:
+ item.value = value
+ self._update_item(item)
+ self._manage_size()
+
+ def __repr__(self):
+ return repr(self._dict)
+
+ def _insert_item(self, item):
+ item.previous = None
+ item.next = self.head
+ if self.head is not None:
+ self.head.previous = item
+ else:
+ self.tail = item
+ self.head = item
+ self._manage_size()
+
+ def _manage_size(self):
+ while len(self._dict) > self.capacity:
+ del self._dict[self.tail.key]
+ if self.tail != self.head:
+ self.tail = self.tail.previous
+ self.tail.next = None
+ else:
+ self.head = self.tail = None
+
+ def _update_item(self, item):
+ if self.head == item:
+ return
+
+ previous = item.previous
+ previous.next = item.next
+ if item.next is not None:
+ item.next.previous = previous
+ else:
+ self.tail = previous
+
+ item.previous = None
+ item.next = self.head
+ self.head.previous = self.head = item
+
+class Password(object):
+ """
+ Password object that stores itself as SHA512 hashed.
+ """
+ def __init__(self, str=None):
+ """
+ Load the string from an initial value, this should be the raw SHA512 hashed password
+ """
+ self.str = str
+
+ def set(self, value):
+ self.str = _hashfn(value).hexdigest()
+
+ def __str__(self):
+ return str(self.str)
+
+ def __eq__(self, other):
+ if other == None:
+ return False
+ return str(_hashfn(other).hexdigest()) == str(self.str)
+
+ def __len__(self):
+ if self.str:
+ return len(self.str)
+ else:
+ return 0
+
+def notify(subject, body=None, html_body=None, to_string=None, attachments=[], append_instance_id=True):
+ if append_instance_id:
+ subject = "[%s] %s" % (boto.config.get_value("Instance", "instance-id"), subject)
+ if not to_string:
+ to_string = boto.config.get_value('Notification', 'smtp_to', None)
+ if to_string:
+ try:
+ from_string = boto.config.get_value('Notification', 'smtp_from', 'boto')
+ msg = MIMEMultipart()
+ msg['From'] = from_string
+ msg['To'] = to_string
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = subject
+
+ if body:
+ msg.attach(MIMEText(body))
+
+ if html_body:
+ part = MIMEBase('text', 'html')
+ part.set_payload(html_body)
+ Encoders.encode_base64(part)
+ msg.attach(part)
+
+ for part in attachments:
+ msg.attach(part)
+
+ smtp_host = boto.config.get_value('Notification', 'smtp_host', 'localhost')
+
+ # Alternate port support
+ if boto.config.get_value("Notification", "smtp_port"):
+ server = smtplib.SMTP(smtp_host, int(boto.config.get_value("Notification", "smtp_port")))
+ else:
+ server = smtplib.SMTP(smtp_host)
+
+ # TLS support
+ if boto.config.getbool("Notification", "smtp_tls"):
+ server.ehlo()
+ server.starttls()
+ server.ehlo()
+ smtp_user = boto.config.get_value('Notification', 'smtp_user', '')
+ smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '')
+ if smtp_user:
+ server.login(smtp_user, smtp_pass)
+ server.sendmail(from_string, to_string, msg.as_string())
+ server.quit()
+ except:
+ boto.log.exception('notify failed')
+
diff --git a/src/s3ql/backends/common.py b/src/s3ql/backends/common.py
new file mode 100644
index 0000000..4b0bb16
--- /dev/null
+++ b/src/s3ql/backends/common.py
@@ -0,0 +1,690 @@
+'''
+common.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from cStringIO import StringIO
+import tempfile
+import hmac
+import logging
+import pycryptopp
+import cPickle as pickle
+import time
+import hashlib
+import zlib
+import os
+import bz2
+import lzma
+from base64 import b64decode, b64encode
+import struct
+from abc import ABCMeta, abstractmethod
+
+log = logging.getLogger("backend")
+
+__all__ = [ 'AbstractConnection', 'AbstractBucket', 'ChecksumError', 'UnsupportedError',
+ 'NoSuchObject', 'NoSuchBucket' ]
+
+def sha256(s):
+ return hashlib.sha256(s).digest()
+
+class AbstractConnection(object):
+ '''This class contains functionality shared between all backends.
+
+ All derived classes are expected to be completely threadsafe
+ (except for internal methods starting with underscore)
+ '''
+ __metaclass__ = ABCMeta
+
+ def bucket_exists(self, name):
+ """Check if the bucket `name` exists"""
+
+ try:
+ self.get_bucket(name)
+ except NoSuchBucket:
+ return False
+ else:
+ return True
+
+ def __contains__(self, name):
+ return self.bucket_exists(name)
+
+ def close(self):
+ '''Close connection.
+
+ If this method is not called, the interpreter may be kept alive by
+ background threads initiated by the connection.
+ '''
+ pass
+
+ def prepare_fork(self):
+ '''Prepare connection for forking
+
+ This method must be called before the process is forked, so that
+ the connection can properly terminate any threads that it uses.
+
+ The connection (or any of its bucket objects) can not be used
+ between the calls to `prepare_fork()` and `finish_fork()`.
+ '''
+ pass
+
+ def finish_fork(self):
+ '''Re-initalize connection after forking
+
+ This method must be called after the process has forked, so that
+ the connection can properly restart any threads that it may
+ have stopped for the fork.
+
+ The connection (or any of its bucket objects) can not be used
+ between the calls to `prepare_fork()` and `finish_fork()`.
+ '''
+ pass
+
+ @abstractmethod
+ def create_bucket(self, name, passphrase=None, compression=None):
+ """Create bucket and return `Bucket` instance"""
+ pass
+
+ @abstractmethod
+ def get_bucket(self, name, passphrase=None, compression=None):
+ """Get `Bucket` instance for bucket `name`"""
+ pass
+
+ @abstractmethod
+ def delete_bucket(self, name, recursive=False):
+ """Delete bucket
+
+ If `recursive` is False and the bucket still contains objects, the call
+ will fail.
+ """
+ pass
+
+
+class AbstractBucket(object):
+ '''This class contains functionality shared between all backends.
+
+ Instances behave similarly to dicts. They can be iterated over and
+ indexed into, but raise a separate set of exceptions.
+
+ All derived classes are expected to be completely threadsafe
+ (except for internal methods starting with underscore)
+ '''
+ __metaclass__ = ABCMeta
+
+ def __init__(self, passphrase, compression):
+ self.passphrase = passphrase
+ self.compression = compression
+ super(AbstractBucket, self).__init__()
+
+ def __getitem__(self, key):
+ return self.fetch(key)[0]
+
+ def __setitem__(self, key, value):
+ self.store(key, value)
+
+ def __delitem__(self, key):
+ self.delete(key)
+
+ def __iter__(self):
+ return self.list()
+
+ def __contains__(self, key):
+ return self.contains(key)
+
+ def iteritems(self):
+ for key in self.list():
+ yield (key, self[key])
+
+ def lookup(self, key):
+ """Return metadata for given key.
+
+ If the key does not exist, `NoSuchObject` is raised.
+ """
+
+ if not isinstance(key, str):
+ raise TypeError('key must be of type str')
+
+ meta_raw = self.raw_lookup(key)
+ return self._get_meta(meta_raw)[0]
+
+
+ def fetch(self, key):
+ """Return data stored under `key`.
+
+ Returns a tuple with the data and metadata. If only the data itself is
+ required, ``bucket[key]`` is a more concise notation for
+ ``bucket.fetch(key)[0]``.
+ """
+
+ if not isinstance(key, str):
+ raise TypeError('key must be of type str')
+
+ fh = StringIO()
+ meta = self.fetch_fh(key, fh)
+
+ return (fh.getvalue(), meta)
+
+ def store(self, key, val, metadata=None):
+ """Store data under `key`.
+
+ `metadata` can be a dict of additional attributes to store with the
+ object.
+
+ If no metadata is required, one can simply assign to the subscripted
+ bucket instead of using this function: ``bucket[key] = val`` is
+ equivalent to ``bucket.store(key, val)``.
+
+ Returns the size of the stored object (after compression).
+ """
+ if isinstance(val, unicode):
+ val = val.encode('us-ascii')
+
+ if not isinstance(key, str):
+ raise TypeError('key must be of type str')
+
+ fh = StringIO(val)
+ return self.store_fh(key, fh, metadata)
+
+ def _get_meta(self, meta_raw, plain=False):
+ '''Get metadata & decompressor factory
+
+ If the bucket has a password set
+ but the object is not encrypted, `ObjectNotEncrypted` is raised
+ unless `plain` is true.
+ '''
+
+ convert_legacy_metadata(meta_raw)
+
+ compr_alg = meta_raw['compression']
+ encr_alg = meta_raw['encryption']
+ encrypted = (encr_alg != 'None')
+
+ if encrypted:
+ if not self.passphrase:
+ raise ChecksumError('Encrypted object and no passphrase supplied')
+
+ if encr_alg != 'AES':
+ raise RuntimeError('Unsupported encryption')
+ elif self.passphrase and not plain:
+ raise ObjectNotEncrypted()
+
+ if compr_alg == 'BZIP2':
+ decomp = bz2.BZ2Decompressor
+ elif compr_alg == 'LZMA':
+ decomp = lzma.LZMADecompressor
+ elif compr_alg == 'ZLIB':
+ decomp = zlib.decompressobj
+ elif compr_alg == 'None':
+ decomp = DummyDecompressor
+ else:
+ raise RuntimeError('Unsupported compression: %s' % compr_alg)
+
+ if 'meta' in meta_raw:
+ buf = b64decode(meta_raw['meta'])
+ if encrypted:
+ buf = decrypt(buf, self.passphrase)
+ metadata = pickle.loads(buf)
+ else:
+ metadata = dict()
+
+ return (metadata, decomp)
+
+ def fetch_fh(self, key, fh, plain=False):
+ """Fetch data for `key` and write to `fh`
+
+ Return a dictionary with the metadata. If the bucket has a password set
+ but the object is not encrypted, `ObjectNotEncrypted` is raised
+ unless `plain` is true.
+ """
+
+ if not isinstance(key, str):
+ raise TypeError('key must be of type str')
+
+ tmp = tempfile.TemporaryFile()
+ (fh, tmp) = (tmp, fh)
+
+ meta_raw = self.raw_fetch(key, fh)
+ (metadata, decomp) = self._get_meta(meta_raw, plain)
+
+ (fh, tmp) = (tmp, fh)
+ tmp.seek(0)
+ fh.seek(0)
+ if self.passphrase:
+ decrypt_uncompress_fh(tmp, fh, self.passphrase, decomp())
+ else:
+ uncompress_fh(tmp, fh, decomp())
+ tmp.close()
+
+ return metadata
+
+ def store_fh(self, key, fh, metadata=None):
+ """Store data in `fh` under `key`
+
+ `metadata` can be a dict of additional attributes to store with the
+ object.
+
+ Returns the size of the stored object (after compression).
+ """
+ (size, fn) = self.prep_store_fh(key, fh, metadata)
+ fn()
+ return size
+
+ def prep_store_fh(self, key, fh, metadata=None):
+ """Prepare to store data in `fh` under `key`
+
+ `metadata` can be a dict of additional attributes to store with the
+ object. The method compresses and encrypts the data and returns a tuple
+ `(size, fn)`, where `fn` is a function that does the actual network
+ transaction and `size` is the size of the object after compression
+ and encryption.
+ """
+
+ if not isinstance(key, str):
+ raise TypeError('key must be of type str')
+
+ # We always store metadata (even if it's just None), so that we can
+ # verify that the object has been created by us when we call lookup().
+ meta_buf = pickle.dumps(metadata, 2)
+
+ meta_raw = dict()
+
+ if self.passphrase:
+ meta_raw['encryption'] = 'AES'
+ nonce = struct.pack(b'<f', time.time() - time.timezone) + bytes(key)
+ meta_raw['meta'] = b64encode(encrypt(meta_buf, self.passphrase, nonce))
+ else:
+ meta_raw['encryption'] = 'None'
+ meta_raw['meta'] = b64encode(meta_buf)
+
+ if self.compression == 'zlib':
+ compr = zlib.compressobj(9)
+ meta_raw['compression'] = 'ZLIB'
+ elif self.compression == 'bzip2':
+ compr = bz2.BZ2Compressor(9)
+ meta_raw['compression'] = 'BZIP2'
+ elif self.compression == 'lzma':
+ compr = lzma.LZMACompressor(options={ 'level': 7 })
+ meta_raw['compression'] = 'LZMA'
+ elif not self.compression:
+ compr = DummyCompressor()
+ meta_raw['compression'] = 'None'
+ else:
+ raise ValueError('Invalid compression algorithm')
+
+ # We need to generate a temporary copy to determine the size of the
+ # object (which needs to transmitted as Content-Length)
+ tmp = tempfile.TemporaryFile()
+ fh.seek(0)
+ if self.passphrase:
+ compress_encrypt_fh(fh, tmp, self.passphrase, nonce, compr)
+ else:
+ compress_fh(fh, tmp, compr)
+ tmp.seek(0, os.SEEK_END)
+ size = tmp.tell()
+ tmp.seek(0)
+ return (size, lambda: self.raw_store(key, tmp, meta_raw))
+
+ @abstractmethod
+ def read_after_create_consistent(self):
+ '''Does this backend provide read-after-create consistency?'''
+ pass
+
+ @abstractmethod
+ def read_after_write_consistent(self):
+ '''Does this backend provide read-after-write consistency?
+
+ (This does not includes read-after-delete)
+ '''
+ pass
+
+ @abstractmethod
+ def read_after_delete_consistent(self):
+ '''Does this backend provide read-after-delete consistency?'''
+ pass
+
+ @abstractmethod
+ def __str__(self):
+ pass
+
+ @abstractmethod
+ def clear(self):
+ """Delete all objects in bucket"""
+ pass
+
+ @abstractmethod
+ def contains(self, key):
+ '''Check if `key` is in bucket'''
+ pass
+
+ @abstractmethod
+ def raw_lookup(self, key):
+ '''Return meta data for `key`'''
+ pass
+
+ @abstractmethod
+ def delete(self, key, force=False):
+ """Delete object stored under `key`
+
+ ``bucket.delete(key)`` can also be written as ``del bucket[key]``.
+ If `force` is true, do not return an error if the key does not exist.
+ """
+ pass
+
+ @abstractmethod
+ def list(self, prefix=''):
+ '''List keys in bucket
+
+ Returns an iterator over all keys in the bucket.
+ '''
+ pass
+
+ @abstractmethod
+ def raw_fetch(self, key, fh):
+ '''Fetch contents stored under `key` and write them into `fh`'''
+ pass
+
+ @abstractmethod
+ def raw_store(self, key, fh, metadata):
+ '''Store contents of `fh` in `key` with metadata
+
+ `metadata` has to be a dict with lower-case keys.
+ '''
+ pass
+
+ def copy(self, src, dest):
+ """Copy data stored under key `src` to key `dest`
+
+ If `dest` already exists, it will be overwritten. The copying
+ is done on the remote side. If the backend does not support
+ this operation, raises `UnsupportedError`.
+ """
+ # Unused arguments
+ #pylint: disable=W0613
+ raise UnsupportedError('Backend does not support remote copy')
+
+ def rename(self, src, dest):
+ """Rename key `src` to `dest`
+
+ If `dest` already exists, it will be overwritten. The rename
+ is done on the remote side. If the backend does not support
+ this operation, raises `UnsupportedError`.
+ """
+ # Unused arguments
+ #pylint: disable=W0613
+ raise UnsupportedError('Backend does not support remote rename')
+
+
+class UnsupportedError(Exception):
+ '''Raised if a backend does not support a particular operation'''
+
+ pass
+
+
+def decrypt_uncompress_fh(ifh, ofh, passphrase, decomp):
+ '''Read `ofh` and write decrypted, uncompressed data to `ofh`'''
+
+ bs = 256 * 1024
+
+ # Read nonce
+ len_ = struct.unpack(b'<B', ifh.read(struct.calcsize(b'<B')))[0]
+ nonce = ifh.read(len_)
+
+ key = sha256(passphrase + nonce)
+ cipher = pycryptopp.cipher.aes.AES(key)
+ hmac_ = hmac.new(key, digestmod=hashlib.sha256)
+
+ # Read (encrypted) hmac
+ hash_ = ifh.read(32) # Length of hash
+
+ while True:
+ buf = ifh.read(bs)
+ if not buf:
+ break
+
+ buf = cipher.process(buf)
+ try:
+ buf = decomp.decompress(buf)
+ except IOError:
+ raise ChecksumError('Invalid compressed stream')
+
+ if buf:
+ hmac_.update(buf)
+ ofh.write(buf)
+
+ if decomp.unused_data:
+ raise ChecksumError('Data after end of compressed stream')
+
+ # Decompress hmac
+ hash_ = cipher.process(hash_)
+
+ if hash_ != hmac_.digest():
+ raise ChecksumError('HMAC mismatch')
+
+def uncompress_fh(ifh, ofh, decomp):
+ '''Read `ofh` and write uncompressed data to `ofh`'''
+
+ bs = 256 * 1024
+ while True:
+ buf = ifh.read(bs)
+ if not buf:
+ break
+
+ try:
+ buf = decomp.decompress(buf)
+ except IOError:
+ raise ChecksumError('Invalid compressed stream')
+
+ if buf:
+ ofh.write(buf)
+
+ if decomp.unused_data:
+ raise ChecksumError('Data after end of compressed stream')
+
+
+class DummyDecompressor(object):
+ def __init__(self):
+ super(DummyDecompressor, self).__init__()
+ self.unused_data = None
+
+ def decompress(self, buf):
+ return buf
+
+class DummyCompressor(object):
+ def flush(self):
+ return ''
+
+ def compress(self, buf):
+ return buf
+
+
+def compress_encrypt_fh(ifh, ofh, passphrase, nonce, compr):
+ '''Read `ifh` and write compressed, encrypted data to `ofh`'''
+
+ if isinstance(nonce, unicode):
+ nonce = nonce.encode('utf-8')
+
+ bs = 1024 * 1024
+ key = sha256(passphrase + nonce)
+ cipher = pycryptopp.cipher.aes.AES(key)
+ hmac_ = hmac.new(key, digestmod=hashlib.sha256)
+
+ # Write nonce
+ ofh.write(struct.pack(b'<B', len(nonce)))
+ ofh.write(nonce)
+ off = ofh.tell()
+
+ # Reserve space for hmac
+ ofh.write(b'0' * 32)
+
+ while True:
+ buf = ifh.read(bs)
+ if not buf:
+ buf = compr.flush()
+ buf = cipher.process(buf)
+ ofh.write(buf)
+ break
+
+ hmac_.update(buf)
+ buf = compr.compress(buf)
+ if buf:
+ buf = cipher.process(buf)
+ ofh.write(buf)
+
+ buf = hmac_.digest()
+ buf = cipher.process(buf)
+ ofh.seek(off)
+ ofh.write(buf)
+
+def compress_fh(ifh, ofh, compr):
+ '''Read `ifh` and write compressed data to `ofh`'''
+
+ bs = 1024 * 1024
+ while True:
+ buf = ifh.read(bs)
+ if not buf:
+ buf = compr.flush()
+ ofh.write(buf)
+ break
+
+ buf = compr.compress(buf)
+ if buf:
+ ofh.write(buf)
+
+
+
+def decrypt(buf, passphrase):
+ '''Decrypt given string'''
+
+ fh = StringIO(buf)
+
+ len_ = struct.unpack(b'<B', fh.read(struct.calcsize(b'<B')))[0]
+ nonce = fh.read(len_)
+
+ key = sha256(passphrase + nonce)
+ cipher = pycryptopp.cipher.aes.AES(key)
+ hmac_ = hmac.new(key, digestmod=hashlib.sha256)
+
+ # Read (encrypted) hmac
+ hash_ = fh.read(32) # Length of hash
+
+ buf = fh.read()
+ buf = cipher.process(buf)
+ hmac_.update(buf)
+
+ hash_ = cipher.process(hash_)
+
+ if hash_ != hmac_.digest():
+ raise ChecksumError('HMAC mismatch')
+
+ return buf
+
+
+class ChecksumError(Exception):
+ """
+ Raised if there is a checksum error in the data that we received.
+ """
+
+ def __init__(self, str_):
+ super(ChecksumError, self).__init__()
+ self.str = str_
+
+ def __str__(self):
+ return self.str
+
+
+class ObjectNotEncrypted(Exception):
+ '''
+ Raised by the backend if an object was requested from an encrypted
+ bucket, but the object was stored without encryption.
+
+ We do not want to simply return the uncrypted object, because the
+ caller may rely on the objects integrity being cryptographically
+ verified.
+ '''
+
+ pass
+
+class NoSuchObject(Exception):
+ '''Raised if the requested object does not exist in the bucket'''
+
+ def __init__(self, key):
+ super(NoSuchObject, self).__init__()
+ self.key = key
+
+ def __str__(self):
+ return 'Bucket does not have anything stored under key %r' % self.key
+
+class NoSuchBucket(Exception):
+ '''Raised if the requested bucket does not exist'''
+
+ def __init__(self, name):
+ super(NoSuchBucket, self).__init__()
+ self.name = name
+
+ def __str__(self):
+ return 'Bucket %r does not exist' % self.name
+
+def encrypt(buf, passphrase, nonce):
+ '''Encrypt given string'''
+
+ if isinstance(nonce, unicode):
+ nonce = nonce.encode('utf-8')
+
+ key = sha256(passphrase + nonce)
+ cipher = pycryptopp.cipher.aes.AES(key)
+ hmac_ = hmac.new(key, digestmod=hashlib.sha256)
+
+ hmac_.update(buf)
+ buf = cipher.process(buf)
+ hash_ = cipher.process(hmac_.digest())
+
+ return b''.join(
+ (struct.pack(b'<B', len(nonce)),
+ nonce, hash_, buf))
+
+
+def convert_legacy_metadata(meta):
+
+ if ('encryption' in meta and
+ 'compression' in meta):
+ return
+
+ if 'encrypted' not in meta:
+ meta['encryption'] = 'None'
+ meta['compression'] = 'None'
+ return
+
+ s = meta.pop('encrypted')
+
+ if s == 'True':
+ meta['encryption'] = 'AES'
+ meta['compression'] = 'BZIP2'
+
+ elif s == 'False':
+ meta['encryption'] = 'None'
+ meta['compression'] = 'None'
+
+ elif s.startswith('AES/'):
+ meta['encryption'] = 'AES'
+ meta['compression'] = s[4:]
+
+ elif s.startswith('PLAIN/'):
+ meta['encryption'] = 'None'
+ meta['compression'] = s[6:]
+ else:
+ raise RuntimeError('Unsupported encryption')
+
+ if meta['compression'] == 'BZ2':
+ meta['compression'] = 'BZIP2'
+
+ if meta['compression'] == 'NONE':
+ meta['compression'] = 'None'
+
+
+
+
+ \ No newline at end of file
diff --git a/src/s3ql/backends/ftp.py b/src/s3ql/backends/ftp.py
new file mode 100644
index 0000000..39c53f3
--- /dev/null
+++ b/src/s3ql/backends/ftp.py
@@ -0,0 +1,27 @@
+'''
+__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from .common import AbstractConnection
+from ..common import QuietError
+import logging
+
+log = logging.getLogger("backend.ftp")
+
+class Connection(AbstractConnection):
+
+ def __init__(self, host, port, login, password):
+ super(Connection, self).__init__()
+ raise QuietError('FTP backend is not yet implemented.')
+
+class TLSConnection(Connection):
+
+ def __init__(self, host, port, login, password):
+ super(Connection, self).__init__()
+ raise QuietError('FTP backend is not yet implemented.')
diff --git a/src/s3ql/backends/ftplib.py b/src/s3ql/backends/ftplib.py
new file mode 100644
index 0000000..70884f1
--- /dev/null
+++ b/src/s3ql/backends/ftplib.py
@@ -0,0 +1,1038 @@
+# Stolen from Python 2.7 to get TLS support
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+
+"""An FTP client class and some helper functions.
+
+Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
+
+Example:
+
+>>> from ftplib import FTP
+>>> ftp = FTP('ftp.python.org') # connect to host, default port
+>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
+'230 Guest login ok, access restrictions apply.'
+>>> ftp.retrlines('LIST') # list directory contents
+total 9
+drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
+drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
+drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
+drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
+d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
+drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
+drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
+drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
+-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
+'226 Transfer complete.'
+>>> ftp.quit()
+'221 Goodbye.'
+>>>
+
+A nice test that reveals some of the network dialogue would be:
+python ftplib.py -d localhost -l -p -l
+"""
+
+
+#
+# Changes and improvements suggested by Steve Majewski.
+# Modified by Jack to work on the mac.
+# Modified by Siebren to support docstrings and PASV.
+# Modified by Phil Schwartz to add storbinary and storlines callbacks.
+# Modified by Giampaolo Rodola' to add TLS support.
+#
+
+import os
+import sys
+
+# Import SOCKS module if it exists, else standard socket module socket
+try:
+ import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
+ from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
+except ImportError:
+ import socket
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+
+__all__ = ["FTP", "Netrc"]
+
+# Magic number from <socket.h>
+MSG_OOB = 0x1 # Process data out of band
+
+
+# The standard FTP server control port
+FTP_PORT = 21
+
+
+# Exception raised when an error or invalid response is received
+class Error(Exception): pass
+class error_reply(Error): pass # unexpected [123]xx reply
+class error_temp(Error): pass # 4xx errors
+class error_perm(Error): pass # 5xx errors
+class error_proto(Error): pass # response does not begin with [1-5]
+
+
+# All exceptions (hopefully) that may be raised here and that aren't
+# (always) programming errors on our side
+all_errors = (Error, IOError, EOFError)
+
+
+# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
+CRLF = '\r\n'
+
+# The class itself
+class FTP:
+
+ '''An FTP client class.
+
+ To create a connection, call the class using these arguments:
+ host, user, passwd, acct, timeout
+
+ The first four arguments are all strings, and have default value ''.
+ timeout must be numeric and defaults to None if not passed,
+ meaning that no timeout will be set on any ftp socket(s)
+ If a timeout is passed, then this is now the default timeout for all ftp
+ socket operations for this instance.
+
+ Then use self.connect() with optional host and port argument.
+
+ To download a file, use ftp.retrlines('RETR ' + filename),
+ or ftp.retrbinary() with slightly different arguments.
+ To upload a file, use ftp.storlines() or ftp.storbinary(),
+ which have an open file as argument (see their definitions
+ below for details).
+ The download/upload functions first issue appropriate TYPE
+ and PORT or PASV commands.
+'''
+
+ debugging = 0
+ host = ''
+ port = FTP_PORT
+ sock = None
+ file = None
+ welcome = None
+ passiveserver = 1
+
+ # Initialization method (called by class instantiation).
+ # Initialize host to localhost, port to standard ftp port
+ # Optional arguments are host (for connect()),
+ # and user, passwd, acct (for login())
+ def __init__(self, host='', user='', passwd='', acct='',
+ timeout=_GLOBAL_DEFAULT_TIMEOUT):
+ self.timeout = timeout
+ if host:
+ self.connect(host)
+ if user:
+ self.login(user, passwd, acct)
+
+ def connect(self, host='', port=0, timeout= -999):
+ '''Connect to host. Arguments are:
+ - host: hostname to connect to (string, default previous host)
+ - port: port to connect to (integer, default previous port)
+ '''
+ if host != '':
+ self.host = host
+ if port > 0:
+ self.port = port
+ if timeout != -999:
+ self.timeout = timeout
+ self.sock = socket.create_connection((self.host, self.port), self.timeout)
+ self.af = self.sock.family
+ self.file = self.sock.makefile('rb')
+ self.welcome = self.getresp()
+ return self.welcome
+
+ def getwelcome(self):
+ '''Get the welcome message from the server.
+ (this is read and squirreled away by connect())'''
+ if self.debugging:
+ print '*welcome*', self.sanitize(self.welcome)
+ return self.welcome
+
+ def set_debuglevel(self, level):
+ '''Set the debugging level.
+ The required argument level means:
+ 0: no debugging output (default)
+ 1: print commands and responses but not body text etc.
+ 2: also print raw lines read and sent before stripping CR/LF'''
+ self.debugging = level
+ debug = set_debuglevel
+
+ def set_pasv(self, val):
+ '''Use passive or active mode for data transfers.
+ With a false argument, use the normal PORT mode,
+ With a true argument, use the PASV command.'''
+ self.passiveserver = val
+
+ # Internal: "sanitize" a string for printing
+ def sanitize(self, s):
+ if s[:5] == 'pass ' or s[:5] == 'PASS ':
+ i = len(s)
+ while i > 5 and s[i - 1] in '\r\n':
+ i = i - 1
+ s = s[:5] + '*' * (i - 5) + s[i:]
+ return repr(s)
+
+ # Internal: send one line to the server, appending CRLF
+ def putline(self, line):
+ line = line + CRLF
+ if self.debugging > 1: print '*put*', self.sanitize(line)
+ self.sock.sendall(line)
+
+ # Internal: send one command to the server (through putline())
+ def putcmd(self, line):
+ if self.debugging: print '*cmd*', self.sanitize(line)
+ self.putline(line)
+
+ # Internal: return one line from the server, stripping CRLF.
+ # Raise EOFError if the connection is closed
+ def getline(self):
+ line = self.file.readline()
+ if self.debugging > 1:
+ print '*get*', self.sanitize(line)
+ if not line: raise EOFError
+ if line[-2:] == CRLF: line = line[:-2]
+ elif line[-1:] in CRLF: line = line[:-1]
+ return line
+
+ # Internal: get a response from the server, which may possibly
+ # consist of multiple lines. Return a single string with no
+ # trailing CRLF. If the response consists of multiple lines,
+ # these are separated by '\n' characters in the string
+ def getmultiline(self):
+ line = self.getline()
+ if line[3:4] == '-':
+ code = line[:3]
+ while 1:
+ nextline = self.getline()
+ line = line + ('\n' + nextline)
+ if nextline[:3] == code and \
+ nextline[3:4] != '-':
+ break
+ return line
+
+ # Internal: get a response from the server.
+ # Raise various errors if the response indicates an error
+ def getresp(self):
+ resp = self.getmultiline()
+ if self.debugging: print '*resp*', self.sanitize(resp)
+ self.lastresp = resp[:3]
+ c = resp[:1]
+ if c in ('1', '2', '3'):
+ return resp
+ if c == '4':
+ raise error_temp, resp
+ if c == '5':
+ raise error_perm, resp
+ raise error_proto, resp
+
+ def voidresp(self):
+ """Expect a response beginning with '2'."""
+ resp = self.getresp()
+ if resp[:1] != '2':
+ raise error_reply, resp
+ return resp
+
+ def abort(self):
+ '''Abort a file transfer. Uses out-of-band data.
+ This does not follow the procedure from the RFC to send Telnet
+ IP and Synch; that doesn't seem to work with the servers I've
+ tried. Instead, just send the ABOR command as OOB data.'''
+ line = 'ABOR' + CRLF
+ if self.debugging > 1: print '*put urgent*', self.sanitize(line)
+ self.sock.sendall(line, MSG_OOB)
+ resp = self.getmultiline()
+ if resp[:3] not in ('426', '226'):
+ raise error_proto, resp
+
+ def sendcmd(self, cmd):
+ '''Send a command and return the response.'''
+ self.putcmd(cmd)
+ return self.getresp()
+
+ def voidcmd(self, cmd):
+ """Send a command and expect a response beginning with '2'."""
+ self.putcmd(cmd)
+ return self.voidresp()
+
+ def sendport(self, host, port):
+ '''Send a PORT command with the current host and the given
+ port number.
+ '''
+ hbytes = host.split('.')
+ pbytes = [repr(port // 256), repr(port % 256)]
+ bytes = hbytes + pbytes
+ cmd = 'PORT ' + ','.join(bytes)
+ return self.voidcmd(cmd)
+
+ def sendeprt(self, host, port):
+ '''Send a EPRT command with the current host and the given port number.'''
+ af = 0
+ if self.af == socket.AF_INET:
+ af = 1
+ if self.af == socket.AF_INET6:
+ af = 2
+ if af == 0:
+ raise error_proto, 'unsupported address family'
+ fields = ['', repr(af), host, repr(port), '']
+ cmd = 'EPRT ' + '|'.join(fields)
+ return self.voidcmd(cmd)
+
+ def makeport(self):
+ '''Create a new socket and send a PORT command for it.'''
+ msg = "getaddrinfo returns an empty list"
+ sock = None
+ for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
+ af, socktype, proto, canonname, sa = res
+ try:
+ sock = socket.socket(af, socktype, proto)
+ sock.bind(sa)
+ except socket.error, msg:
+ if sock:
+ sock.close()
+ sock = None
+ continue
+ break
+ if not sock:
+ raise socket.error, msg
+ sock.listen(1)
+ port = sock.getsockname()[1] # Get proper port
+ host = self.sock.getsockname()[0] # Get proper host
+ if self.af == socket.AF_INET:
+ resp = self.sendport(host, port)
+ else:
+ resp = self.sendeprt(host, port)
+ return sock
+
+ def makepasv(self):
+ if self.af == socket.AF_INET:
+ host, port = parse227(self.sendcmd('PASV'))
+ else:
+ host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
+ return host, port
+
+ def ntransfercmd(self, cmd, rest=None):
+ """Initiate a transfer over the data connection.
+
+ If the transfer is active, send a port command and the
+ transfer command, and accept the connection. If the server is
+ passive, send a pasv command, connect to it, and start the
+ transfer command. Either way, return the socket for the
+ connection and the expected size of the transfer. The
+ expected size may be None if it could not be determined.
+
+ Optional `rest' argument can be a string that is sent as the
+ argument to a REST command. This is essentially a server
+ marker used to tell the server to skip over any data up to the
+ given marker.
+ """
+ size = None
+ if self.passiveserver:
+ host, port = self.makepasv()
+ conn = socket.create_connection((host, port), self.timeout)
+ if rest is not None:
+ self.sendcmd("REST %s" % rest)
+ resp = self.sendcmd(cmd)
+ # Some servers apparently send a 200 reply to
+ # a LIST or STOR command, before the 150 reply
+ # (and way before the 226 reply). This seems to
+ # be in violation of the protocol (which only allows
+ # 1xx or error messages for LIST), so we just discard
+ # this response.
+ if resp[0] == '2':
+ resp = self.getresp()
+ if resp[0] != '1':
+ raise error_reply, resp
+ else:
+ sock = self.makeport()
+ if rest is not None:
+ self.sendcmd("REST %s" % rest)
+ resp = self.sendcmd(cmd)
+ # See above.
+ if resp[0] == '2':
+ resp = self.getresp()
+ if resp[0] != '1':
+ raise error_reply, resp
+ conn, sockaddr = sock.accept()
+ if resp[:3] == '150':
+ # this is conditional in case we received a 125
+ size = parse150(resp)
+ return conn, size
+
+ def transfercmd(self, cmd, rest=None):
+ """Like ntransfercmd() but returns only the socket."""
+ return self.ntransfercmd(cmd, rest)[0]
+
+ def login(self, user='', passwd='', acct=''):
+ '''Login, default anonymous.'''
+ if not user: user = 'anonymous'
+ if not passwd: passwd = ''
+ if not acct: acct = ''
+ if user == 'anonymous' and passwd in ('', '-'):
+ # If there is no anonymous ftp password specified
+ # then we'll just use anonymous@
+ # We don't send any other thing because:
+ # - We want to remain anonymous
+ # - We want to stop SPAM
+ # - We don't want to let ftp sites to discriminate by the user,
+ # host or country.
+ passwd = passwd + 'anonymous@'
+ resp = self.sendcmd('USER ' + user)
+ if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
+ if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
+ if resp[0] != '2':
+ raise error_reply, resp
+ return resp
+
+ def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
+ """Retrieve data in binary mode. A new port is created for you.
+
+ Args:
+ cmd: A RETR command.
+ callback: A single parameter callable to be called on each
+ block of data read.
+ blocksize: The maximum number of bytes to read from the
+ socket at one time. [default: 8192]
+ rest: Passed to transfercmd(). [default: None]
+
+ Returns:
+ The response code.
+ """
+ self.voidcmd('TYPE I')
+ conn = self.transfercmd(cmd, rest)
+ while 1:
+ data = conn.recv(blocksize)
+ if not data:
+ break
+ callback(data)
+ conn.close()
+ return self.voidresp()
+
+ def retrlines(self, cmd, callback=None):
+ """Retrieve data in line mode. A new port is created for you.
+
+ Args:
+ cmd: A RETR, LIST, NLST, or MLSD command.
+ callback: An optional single parameter callable that is called
+ for each line with the trailing CRLF stripped.
+ [default: print_line()]
+
+ Returns:
+ The response code.
+ """
+ if callback is None: callback = print_line
+ resp = self.sendcmd('TYPE A')
+ conn = self.transfercmd(cmd)
+ fp = conn.makefile('rb')
+ while 1:
+ line = fp.readline()
+ if self.debugging > 2: print '*retr*', repr(line)
+ if not line:
+ break
+ if line[-2:] == CRLF:
+ line = line[:-2]
+ elif line[-1:] == '\n':
+ line = line[:-1]
+ callback(line)
+ fp.close()
+ conn.close()
+ return self.voidresp()
+
+ def storbinary(self, cmd, fp, blocksize=8192, callback=None):
+ """Store a file in binary mode. A new port is created for you.
+
+ Args:
+ cmd: A STOR command.
+ fp: A file-like object with a read(num_bytes) method.
+ blocksize: The maximum data size to read from fp and send over
+ the connection at once. [default: 8192]
+ callback: An optional single parameter callable that is called on
+ on each block of data after it is sent. [default: None]
+
+ Returns:
+ The response code.
+ """
+ self.voidcmd('TYPE I')
+ conn = self.transfercmd(cmd)
+ while 1:
+ buf = fp.read(blocksize)
+ if not buf: break
+ conn.sendall(buf)
+ if callback: callback(buf)
+ conn.close()
+ return self.voidresp()
+
+ def storlines(self, cmd, fp, callback=None):
+ """Store a file in line mode. A new port is created for you.
+
+ Args:
+ cmd: A STOR command.
+ fp: A file-like object with a readline() method.
+ callback: An optional single parameter callable that is called on
+ on each line after it is sent. [default: None]
+
+ Returns:
+ The response code.
+ """
+ self.voidcmd('TYPE A')
+ conn = self.transfercmd(cmd)
+ while 1:
+ buf = fp.readline()
+ if not buf: break
+ if buf[-2:] != CRLF:
+ if buf[-1] in CRLF: buf = buf[:-1]
+ buf = buf + CRLF
+ conn.sendall(buf)
+ if callback: callback(buf)
+ conn.close()
+ return self.voidresp()
+
+ def acct(self, password):
+ '''Send new account name.'''
+ cmd = 'ACCT ' + password
+ return self.voidcmd(cmd)
+
+ def nlst(self, *args):
+ '''Return a list of files in a given directory (default the current).'''
+ cmd = 'NLST'
+ for arg in args:
+ cmd = cmd + (' ' + arg)
+ files = []
+ self.retrlines(cmd, files.append)
+ return files
+
+ def dir(self, *args):
+ '''List a directory in long form.
+ By default list current directory to stdout.
+ Optional last argument is callback function; all
+ non-empty arguments before it are concatenated to the
+ LIST command. (This *should* only be used for a pathname.)'''
+ cmd = 'LIST'
+ func = None
+ if args[-1:] and type(args[-1]) != type(''):
+ args, func = args[:-1], args[-1]
+ for arg in args:
+ if arg:
+ cmd = cmd + (' ' + arg)
+ self.retrlines(cmd, func)
+
+ def rename(self, fromname, toname):
+ '''Rename a file.'''
+ resp = self.sendcmd('RNFR ' + fromname)
+ if resp[0] != '3':
+ raise error_reply, resp
+ return self.voidcmd('RNTO ' + toname)
+
+ def delete(self, filename):
+ '''Delete a file.'''
+ resp = self.sendcmd('DELE ' + filename)
+ if resp[:3] in ('250', '200'):
+ return resp
+ else:
+ raise error_reply, resp
+
+ def cwd(self, dirname):
+ '''Change to a directory.'''
+ if dirname == '..':
+ try:
+ return self.voidcmd('CDUP')
+ except error_perm, msg:
+ if msg.args[0][:3] != '500':
+ raise
+ elif dirname == '':
+ dirname = '.' # does nothing, but could return error
+ cmd = 'CWD ' + dirname
+ return self.voidcmd(cmd)
+
+ def size(self, filename):
+ '''Retrieve the size of a file.'''
+ # The SIZE command is defined in RFC-3659
+ resp = self.sendcmd('SIZE ' + filename)
+ if resp[:3] == '213':
+ s = resp[3:].strip()
+ try:
+ return int(s)
+ except (OverflowError, ValueError):
+ return long(s)
+
+ def mkd(self, dirname):
+ '''Make a directory, return its full pathname.'''
+ resp = self.sendcmd('MKD ' + dirname)
+ return parse257(resp)
+
+ def rmd(self, dirname):
+ '''Remove a directory.'''
+ return self.voidcmd('RMD ' + dirname)
+
+ def pwd(self):
+ '''Return current working directory.'''
+ resp = self.sendcmd('PWD')
+ return parse257(resp)
+
+ def quit(self):
+ '''Quit, and close the connection.'''
+ resp = self.voidcmd('QUIT')
+ self.close()
+ return resp
+
+ def close(self):
+ '''Close the connection without assuming anything about it.'''
+ if self.file:
+ self.file.close()
+ self.sock.close()
+ self.file = self.sock = None
+
+
+try:
+ import ssl
+except ImportError:
+ pass
+else:
+ class FTP_TLS(FTP):
+ '''A FTP subclass which adds TLS support to FTP as described
+ in RFC-4217.
+
+ Connect as usual to port 21 implicitly securing the FTP control
+ connection before authenticating.
+
+ Securing the data connection requires user to explicitly ask
+ for it by calling prot_p() method.
+
+ Usage example:
+ >>> from ftplib import FTP_TLS
+ >>> ftps = FTP_TLS('ftp.python.org')
+ >>> ftps.login() # login anonimously previously securing control channel
+ '230 Guest login ok, access restrictions apply.'
+ >>> ftps.prot_p() # switch to secure data connection
+ '200 Protection level set to P'
+ >>> ftps.retrlines('LIST') # list directory content securely
+ total 9
+ drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
+ drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
+ drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
+ drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
+ d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
+ drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
+ drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
+ drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
+ -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
+ '226 Transfer complete.'
+ >>> ftps.quit()
+ '221 Goodbye.'
+ >>>
+ '''
+ ssl_version = ssl.PROTOCOL_TLSv1
+
+ def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
+ certfile=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
+ self.keyfile = keyfile
+ self.certfile = certfile
+ self._prot_p = False
+ FTP.__init__(self, host, user, passwd, acct, timeout)
+
+ def login(self, user='', passwd='', acct='', secure=True):
+ if secure and not isinstance(self.sock, ssl.SSLSocket):
+ self.auth()
+ return FTP.login(self, user, passwd, acct)
+
+ def auth(self):
+ '''Set up secure control connection by using TLS/SSL.'''
+ if isinstance(self.sock, ssl.SSLSocket):
+ raise ValueError("Already using TLS")
+ if self.ssl_version == ssl.PROTOCOL_TLSv1:
+ resp = self.voidcmd('AUTH TLS')
+ else:
+ resp = self.voidcmd('AUTH SSL')
+ self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
+ ssl_version=self.ssl_version)
+ self.file = self.sock.makefile(mode='rb')
+ return resp
+
+ def prot_p(self):
+ '''Set up secure data connection.'''
+ # PROT defines whether or not the data channel is to be protected.
+ # Though RFC-2228 defines four possible protection levels,
+ # RFC-4217 only recommends two, Clear and Private.
+ # Clear (PROT C) means that no security is to be used on the
+ # data-channel, Private (PROT P) means that the data-channel
+ # should be protected by TLS.
+ # PBSZ command MUST still be issued, but must have a parameter of
+ # '0' to indicate that no buffering is taking place and the data
+ # connection should not be encapsulated.
+ self.voidcmd('PBSZ 0')
+ resp = self.voidcmd('PROT P')
+ self._prot_p = True
+ return resp
+
+ def prot_c(self):
+ '''Set up clear text data connection.'''
+ resp = self.voidcmd('PROT C')
+ self._prot_p = False
+ return resp
+
+ # --- Overridden FTP methods
+
+ def ntransfercmd(self, cmd, rest=None):
+ conn, size = FTP.ntransfercmd(self, cmd, rest)
+ if self._prot_p:
+ conn = ssl.wrap_socket(conn, self.keyfile, self.certfile,
+ ssl_version=self.ssl_version)
+ return conn, size
+
+ def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
+ self.voidcmd('TYPE I')
+ conn = self.transfercmd(cmd, rest)
+ try:
+ while 1:
+ data = conn.recv(blocksize)
+ if not data:
+ break
+ callback(data)
+ # shutdown ssl layer
+ if isinstance(conn, ssl.SSLSocket):
+ conn.unwrap()
+ finally:
+ conn.close()
+ return self.voidresp()
+
+ def retrlines(self, cmd, callback=None):
+ if callback is None: callback = print_line
+ resp = self.sendcmd('TYPE A')
+ conn = self.transfercmd(cmd)
+ fp = conn.makefile('rb')
+ try:
+ while 1:
+ line = fp.readline()
+ if self.debugging > 2: print '*retr*', repr(line)
+ if not line:
+ break
+ if line[-2:] == CRLF:
+ line = line[:-2]
+ elif line[-1:] == '\n':
+ line = line[:-1]
+ callback(line)
+ # shutdown ssl layer
+ if isinstance(conn, ssl.SSLSocket):
+ conn.unwrap()
+ finally:
+ fp.close()
+ conn.close()
+ return self.voidresp()
+
+ def storbinary(self, cmd, fp, blocksize=8192, callback=None):
+ self.voidcmd('TYPE I')
+ conn = self.transfercmd(cmd)
+ try:
+ while 1:
+ buf = fp.read(blocksize)
+ if not buf: break
+ conn.sendall(buf)
+ if callback: callback(buf)
+ # shutdown ssl layer
+ if isinstance(conn, ssl.SSLSocket):
+ conn.unwrap()
+ finally:
+ conn.close()
+ return self.voidresp()
+
+ def storlines(self, cmd, fp, callback=None):
+ self.voidcmd('TYPE A')
+ conn = self.transfercmd(cmd)
+ try:
+ while 1:
+ buf = fp.readline()
+ if not buf: break
+ if buf[-2:] != CRLF:
+ if buf[-1] in CRLF: buf = buf[:-1]
+ buf = buf + CRLF
+ conn.sendall(buf)
+ if callback: callback(buf)
+ # shutdown ssl layer
+ if isinstance(conn, ssl.SSLSocket):
+ conn.unwrap()
+ finally:
+ conn.close()
+ return self.voidresp()
+
+ __all__.append(FTP_TLS)
+ all_errors = (Error, IOError, EOFError, ssl.SSLError)
+
+
+_150_re = None
+
+def parse150(resp):
+ '''Parse the '150' response for a RETR request.
+ Returns the expected transfer size or None; size is not guaranteed to
+ be present in the 150 message.
+ '''
+ if resp[:3] != '150':
+ raise error_reply, resp
+ global _150_re
+ if _150_re is None:
+ import re
+ _150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
+ m = _150_re.match(resp)
+ if not m:
+ return None
+ s = m.group(1)
+ try:
+ return int(s)
+ except (OverflowError, ValueError):
+ return long(s)
+
+
+_227_re = None
+
+def parse227(resp):
+ '''Parse the '227' response for a PASV request.
+ Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
+ Return ('host.addr.as.numbers', port#) tuple.'''
+
+ if resp[:3] != '227':
+ raise error_reply, resp
+ global _227_re
+ if _227_re is None:
+ import re
+ _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
+ m = _227_re.search(resp)
+ if not m:
+ raise error_proto, resp
+ numbers = m.groups()
+ host = '.'.join(numbers[:4])
+ port = (int(numbers[4]) << 8) + int(numbers[5])
+ return host, port
+
+
+def parse229(resp, peer):
+ '''Parse the '229' response for a EPSV request.
+ Raises error_proto if it does not contain '(|||port|)'
+ Return ('host.addr.as.numbers', port#) tuple.'''
+
+ if resp[:3] != '229':
+ raise error_reply, resp
+ left = resp.find('(')
+ if left < 0: raise error_proto, resp
+ right = resp.find(')', left + 1)
+ if right < 0:
+ raise error_proto, resp # should contain '(|||port|)'
+ if resp[left + 1] != resp[right - 1]:
+ raise error_proto, resp
+ parts = resp[left + 1:right].split(resp[left + 1])
+ if len(parts) != 5:
+ raise error_proto, resp
+ host = peer[0]
+ port = int(parts[3])
+ return host, port
+
+
+def parse257(resp):
+ '''Parse the '257' response for a MKD or PWD request.
+ This is a response to a MKD or PWD request: a directory name.
+ Returns the directoryname in the 257 reply.'''
+
+ if resp[:3] != '257':
+ raise error_reply, resp
+ if resp[3:5] != ' "':
+ return '' # Not compliant to RFC 959, but UNIX ftpd does this
+ dirname = ''
+ i = 5
+ n = len(resp)
+ while i < n:
+ c = resp[i]
+ i = i + 1
+ if c == '"':
+ if i >= n or resp[i] != '"':
+ break
+ i = i + 1
+ dirname = dirname + c
+ return dirname
+
+
+def print_line(line):
+ '''Default retrlines callback to print a line.'''
+ print line
+
+
+def ftpcp(source, sourcename, target, targetname='', type='I'):
+ '''Copy file from one FTP-instance to another.'''
+ if not targetname: targetname = sourcename
+ type = 'TYPE ' + type
+ source.voidcmd(type)
+ target.voidcmd(type)
+ sourcehost, sourceport = parse227(source.sendcmd('PASV'))
+ target.sendport(sourcehost, sourceport)
+ # RFC 959: the user must "listen" [...] BEFORE sending the
+ # transfer request.
+ # So: STOR before RETR, because here the target is a "user".
+ treply = target.sendcmd('STOR ' + targetname)
+ if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
+ sreply = source.sendcmd('RETR ' + sourcename)
+ if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
+ source.voidresp()
+ target.voidresp()
+
+
+class Netrc:
+ """Class to parse & provide access to 'netrc' format files.
+
+ See the netrc(4) man page for information on the file format.
+
+ WARNING: This class is obsolete -- use module netrc instead.
+
+ """
+ __defuser = None
+ __defpasswd = None
+ __defacct = None
+
+ def __init__(self, filename=None):
+ if filename is None:
+ if "HOME" in os.environ:
+ filename = os.path.join(os.environ["HOME"],
+ ".netrc")
+ else:
+ raise IOError, \
+ "specify file to load or set $HOME"
+ self.__hosts = {}
+ self.__macros = {}
+ fp = open(filename, "r")
+ in_macro = 0
+ while 1:
+ line = fp.readline()
+ if not line: break
+ if in_macro and line.strip():
+ macro_lines.append(line)
+ continue
+ elif in_macro:
+ self.__macros[macro_name] = tuple(macro_lines)
+ in_macro = 0
+ words = line.split()
+ host = user = passwd = acct = None
+ default = 0
+ i = 0
+ while i < len(words):
+ w1 = words[i]
+ if i + 1 < len(words):
+ w2 = words[i + 1]
+ else:
+ w2 = None
+ if w1 == 'default':
+ default = 1
+ elif w1 == 'machine' and w2:
+ host = w2.lower()
+ i = i + 1
+ elif w1 == 'login' and w2:
+ user = w2
+ i = i + 1
+ elif w1 == 'password' and w2:
+ passwd = w2
+ i = i + 1
+ elif w1 == 'account' and w2:
+ acct = w2
+ i = i + 1
+ elif w1 == 'macdef' and w2:
+ macro_name = w2
+ macro_lines = []
+ in_macro = 1
+ break
+ i = i + 1
+ if default:
+ self.__defuser = user or self.__defuser
+ self.__defpasswd = passwd or self.__defpasswd
+ self.__defacct = acct or self.__defacct
+ if host:
+ if host in self.__hosts:
+ ouser, opasswd, oacct = \
+ self.__hosts[host]
+ user = user or ouser
+ passwd = passwd or opasswd
+ acct = acct or oacct
+ self.__hosts[host] = user, passwd, acct
+ fp.close()
+
+ def get_hosts(self):
+ """Return a list of hosts mentioned in the .netrc file."""
+ return self.__hosts.keys()
+
+ def get_account(self, host):
+ """Returns login information for the named host.
+
+ The return value is a triple containing userid,
+ password, and the accounting field.
+
+ """
+ host = host.lower()
+ user = passwd = acct = None
+ if host in self.__hosts:
+ user, passwd, acct = self.__hosts[host]
+ user = user or self.__defuser
+ passwd = passwd or self.__defpasswd
+ acct = acct or self.__defacct
+ return user, passwd, acct
+
+ def get_macros(self):
+ """Return a list of all defined macro names."""
+ return self.__macros.keys()
+
+ def get_macro(self, macro):
+ """Return a sequence of lines which define a named macro."""
+ return self.__macros[macro]
+
+
+
+def test():
+ '''Test program.
+ Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
+
+ -d dir
+ -l list
+ -p password
+ '''
+
+ if len(sys.argv) < 2:
+ print test.__doc__
+ sys.exit(0)
+
+ debugging = 0
+ rcfile = None
+ while sys.argv[1] == '-d':
+ debugging = debugging + 1
+ del sys.argv[1]
+ if sys.argv[1][:2] == '-r':
+ # get name of alternate ~/.netrc file:
+ rcfile = sys.argv[1][2:]
+ del sys.argv[1]
+ host = sys.argv[1]
+ ftp = FTP(host)
+ ftp.set_debuglevel(debugging)
+ userid = passwd = acct = ''
+ try:
+ netrc = Netrc(rcfile)
+ except IOError:
+ if rcfile is not None:
+ sys.stderr.write("Could not open account file"
+ " -- using anonymous login.")
+ else:
+ try:
+ userid, passwd, acct = netrc.get_account(host)
+ except KeyError:
+ # no account for host
+ sys.stderr.write(
+ "No account -- using anonymous login.")
+ ftp.login(userid, passwd, acct)
+ for file in sys.argv[2:]:
+ if file[:2] == '-l':
+ ftp.dir(file[2:])
+ elif file[:2] == '-d':
+ cmd = 'CWD'
+ if file[2:]: cmd = cmd + ' ' + file[2:]
+ resp = ftp.sendcmd(cmd)
+ elif file == '-p':
+ ftp.set_pasv(not ftp.passiveserver)
+ else:
+ ftp.retrbinary('RETR ' + file, \
+ sys.stdout.write, 1024)
+ ftp.quit()
+
+
+if __name__ == '__main__':
+ test()
diff --git a/src/s3ql/backends/local.py b/src/s3ql/backends/local.py
new file mode 100644
index 0000000..136c7d5
--- /dev/null
+++ b/src/s3ql/backends/local.py
@@ -0,0 +1,296 @@
+'''
+local.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from .common import AbstractConnection, AbstractBucket, NoSuchBucket, NoSuchObject
+import shutil
+import logging
+import cPickle as pickle
+import os
+import errno
+import threading
+
+log = logging.getLogger("backend.local")
+
+class Connection(AbstractConnection):
+ """
+ A connection that stores buckets on the local disk
+
+ This class is threadsafe. All methods (except for internal methods
+ starting with underscore) may be called concurrently by different
+ threads.
+ """
+
+ def __init__(self):
+ super(Connection, self).__init__()
+ self.lock = threading.RLock()
+
+ def delete_bucket(self, name, recursive=False):
+ """Delete bucket"""
+
+ with self.lock:
+ if not os.path.exists(name):
+ raise NoSuchBucket(name)
+
+ if recursive:
+ shutil.rmtree(name)
+ else:
+ os.rmdir(name)
+
+ def create_bucket(self, name, passphrase=None, compression='bzip2'):
+ """Create and return a bucket"""
+
+ with self.lock:
+ if os.path.exists(name):
+ raise RuntimeError('Bucket already exists')
+ os.mkdir(name)
+
+ return self.get_bucket(name, passphrase, compression)
+
+ def get_bucket(self, name, passphrase=None, compression='bzip2'):
+ """Return a bucket instance for the bucket `name`
+
+ Raises `NoSuchBucket` if the bucket does not exist.
+ """
+
+ with self.lock:
+ if not os.path.exists(name):
+ raise NoSuchBucket(name)
+ return Bucket(name, self.lock, passphrase, compression)
+
+
+class Bucket(AbstractBucket):
+ '''
+ A bucket that is stored on the local hard disk
+
+ This class is threadsafe. All methods (except for internal methods
+ starting with underscore) may be called concurrently by different
+ threads.
+ '''
+
+ def __init__(self, name, lock, passphrase, compression):
+ super(Bucket, self).__init__(passphrase, compression)
+ self.name = name
+ self.lock = lock
+
+ def __str__(self):
+ return '<local bucket, name=%r>' % self.name
+
+ def read_after_create_consistent(self):
+ return True
+
+ def read_after_write_consistent(self):
+ return True
+
+ def read_after_delete_consistent(self):
+ return True
+
+ def clear(self):
+ """Delete all objects in bucket"""
+ with self.lock:
+ for name in os.listdir(self.name):
+ path = os.path.join(self.name, name)
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ os.unlink(path)
+
+ def contains(self, key):
+ with self.lock:
+ path = self._key_to_path(key) + '.dat'
+ try:
+ os.lstat(path)
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ return False
+ raise
+ return True
+
+
+ def raw_lookup(self, key):
+ with self.lock:
+ path = self._key_to_path(key)
+ try:
+ with open(path + '.meta', 'rb') as src:
+ return pickle.load(src)
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise NoSuchObject(key)
+ else:
+ raise
+
+ def delete(self, key, force=False):
+ with self.lock:
+ path = self._key_to_path(key)
+ try:
+ os.unlink(path + '.dat')
+ os.unlink(path + '.meta')
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ if force:
+ pass
+ else:
+ raise NoSuchObject(key)
+ else:
+ raise
+
+
+ def list(self, prefix=None):
+ with self.lock:
+ if prefix:
+ base = os.path.dirname(self._key_to_path(prefix))
+ else:
+ base = self.name
+
+ for (path, dirnames, filenames) in os.walk(base, topdown=True):
+
+ # Do not look in wrong directories
+ if prefix:
+ rpath = path[len(self.name):] # path relative to base
+ prefix_l = ''.join(rpath.split('/'))
+
+ dirs_to_walk = list()
+ for name in dirnames:
+ prefix_ll = unescape(prefix_l + name)
+ if prefix_ll.startswith(prefix[:len(prefix_ll)]):
+ dirs_to_walk.append(name)
+ dirnames[:] = dirs_to_walk
+
+ for name in filenames:
+ if not name.endswith('.dat'):
+ continue
+ key = unescape(name[:-4])
+
+ if not prefix or key.startswith(prefix):
+ yield key
+
+ def raw_fetch(self, key, fh):
+ with self.lock:
+ path = self._key_to_path(key)
+ try:
+ with open(path + '.dat', 'rb') as src:
+ fh.seek(0)
+ shutil.copyfileobj(src, fh)
+ with open(path + '.meta', 'rb') as src:
+ metadata = pickle.load(src)
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise NoSuchObject(key)
+ else:
+ raise
+
+ return metadata
+
+ def raw_store(self, key, fh, metadata):
+ with self.lock:
+ path = self._key_to_path(key)
+ fh.seek(0)
+ try:
+ dest = open(path + '.dat', 'wb')
+ except IOError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ os.makedirs(os.path.dirname(path))
+ dest = open(path + '.dat', 'wb')
+
+ shutil.copyfileobj(fh, dest)
+ dest.close()
+
+ with open(path + '.meta', 'wb') as dest:
+ pickle.dump(metadata, dest, 2)
+
+ def copy(self, src, dest):
+ with self.lock:
+ if not isinstance(src, str):
+ raise TypeError('key must be of type str')
+
+ if not isinstance(dest, str):
+ raise TypeError('key must be of type str')
+
+ path_src = self._key_to_path(src)
+ path_dest = self._key_to_path(dest)
+
+ # Can't use shutil.copyfile() here, need to make
+ # sure destination path exists
+ try:
+ dest = open(path_dest + '.dat', 'wb')
+ except IOError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ os.makedirs(os.path.dirname(path_dest))
+ dest = open(path_dest + '.dat', 'wb')
+
+ try:
+ with open(path_src + '.dat', 'rb') as src:
+ shutil.copyfileobj(src, dest)
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise NoSuchObject(src)
+ else:
+ raise
+ finally:
+ dest.close()
+
+ shutil.copyfile(path_src + '.meta', path_dest + '.meta')
+
+ def rename(self, src, dest):
+ with self.lock:
+ src_path = self._key_to_path(src)
+ dest_path = self._key_to_path(dest)
+ if not os.path.exists(src_path + '.dat'):
+ raise NoSuchObject(src)
+
+ try:
+ os.rename(src_path + '.dat', dest_path + '.dat')
+ os.rename(src_path + '.meta', dest_path + '.meta')
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ os.makedirs(os.path.dirname(dest_path))
+ os.rename(src_path + '.dat', dest_path + '.dat')
+ os.rename(src_path + '.meta', dest_path + '.meta')
+
+ def _key_to_path(self, key):
+ '''Return path for given key'''
+
+ # NOTE: We must not split the path in the middle of an
+ # escape sequence, or list() will fail to work.
+
+ key = escape(key)
+
+ if not key.startswith('s3ql_data_'):
+ return os.path.join(self.name, key)
+
+ no = key[10:]
+ path = [ self.name, 's3ql_data_']
+ for i in range(0, len(no), 3):
+ path.append(no[:i])
+ path.append(key)
+
+ return os.path.join(*path)
+
+def escape(s):
+ '''Escape '/', '=' and '\0' in s'''
+
+ s = s.replace('=', '=3D')
+ s = s.replace('/', '=2F')
+ s = s.replace('\0', '=00')
+
+ return s
+
+def unescape(s):
+ '''Un-Escape '/', '=' and '\0' in s'''
+
+ s = s.replace('=2F', '/')
+ s = s.replace('=00', '\0')
+ s = s.replace('=3D', '=')
+
+ return s
+
+
diff --git a/src/s3ql/backends/s3.py b/src/s3ql/backends/s3.py
new file mode 100644
index 0000000..a536c1c
--- /dev/null
+++ b/src/s3ql/backends/s3.py
@@ -0,0 +1,382 @@
+'''
+s3.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+# Python boto uses several deprecated modules, deactivate warnings for them
+import warnings
+warnings.filterwarnings("ignore", "", DeprecationWarning, "boto")
+
+from .common import AbstractConnection, AbstractBucket, NoSuchBucket, NoSuchObject
+from time import sleep
+from .boto.s3.connection import S3Connection
+from contextlib import contextmanager
+from .boto import exception
+from s3ql.common import (TimeoutError, AsyncFn)
+import logging
+import errno
+import httplib
+import re
+import time
+import threading
+
+log = logging.getLogger("backend.s3")
+
+class Connection(AbstractConnection):
+ """Represents a connection to Amazon S3
+
+ This class just dispatches everything to boto. It uses a separate boto
+ connection object for each thread.
+
+ This class is threadsafe. All methods (except for internal methods
+ starting with underscore) may be called concurrently by different
+ threads.
+ """
+
+ def __init__(self, awskey, awspass, use_ssl,
+ reduced_redundancy=False):
+ super(Connection, self).__init__()
+ self.awskey = awskey
+ self.awspass = awspass
+ self.pool = list()
+ self.lock = threading.RLock()
+ self.conn_cnt = 0
+ self.use_ssl = use_ssl
+ self.reduced_redundancy = reduced_redundancy
+
+ def _pop_conn(self):
+ '''Get boto connection object from the pool'''
+
+ with self.lock:
+ try:
+ conn = self.pool.pop()
+ except IndexError:
+ # Need to create a new connection
+ log.debug("Creating new boto connection (active conns: %d)...",
+ self.conn_cnt)
+ conn = S3Connection(self.awskey, self.awspass,
+ is_secure=self.use_ssl)
+ self.conn_cnt += 1
+
+ return conn
+
+ def _push_conn(self, conn):
+ '''Return boto connection object to pool'''
+ with self.lock:
+ self.pool.append(conn)
+
+ def delete_bucket(self, name, recursive=False):
+ """Delete bucket"""
+
+ if not recursive:
+ with self._get_boto() as boto:
+ boto.delete_bucket(name)
+ return
+
+ # Delete recursively
+ with self._get_boto() as boto:
+ step = 1
+ waited = 0
+ while waited < 600:
+ try:
+ boto.delete_bucket(name)
+ except exception.S3ResponseError as exc:
+ if exc.code != 'BucketNotEmpty':
+ raise
+ else:
+ return
+ self.get_bucket(name, passphrase=None).clear()
+ time.sleep(step)
+ waited += step
+ step *= 2
+
+ raise RuntimeError('Bucket does not seem to get empty')
+
+
+ @contextmanager
+ def _get_boto(self):
+ """Provide boto connection object"""
+
+ conn = self._pop_conn()
+ try:
+ yield conn
+ finally:
+ self._push_conn(conn)
+
+ def create_bucket(self, name, location, passphrase=None,
+ compression='lzma'):
+ """Create and return an S3 bucket
+
+ Note that a call to `get_bucket` right after creation may fail,
+ since the changes do not propagate instantaneously through AWS.
+ """
+ # Argument number deliberately differs from base class
+ #pylint: disable-msg=W0221
+
+ self.check_name(name)
+ with self._get_boto() as boto:
+ try:
+ boto.create_bucket(name, location=location)
+ except exception.S3ResponseError as exc:
+ if exc.code == 'InvalidBucketName':
+ raise InvalidBucketNameError()
+ else:
+ raise
+
+ return Bucket(self, name, passphrase, compression)
+
+ def check_name(self, name):
+ '''Check if bucket name conforms to requirements
+
+ Raises `InvalidBucketName` for invalid names.
+ '''
+
+ if (not re.match('^[a-z0-9][a-z0-9.-]{1,60}[a-z0-9]$', name)
+ or '..' in name
+ or '.-' in name
+ or '-.' in name
+ or re.match('^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', name)):
+ raise InvalidBucketNameError()
+
+ def get_bucket(self, name, passphrase=None, compression='lzma'):
+ """Return a bucket instance for the bucket `name`"""
+
+ self.check_name(name)
+
+ with self._get_boto() as boto:
+ try:
+ boto.get_bucket(name)
+ except exception.S3ResponseError as e:
+ if e.status == 404:
+ raise NoSuchBucket(name)
+ elif e.code == 'InvalidBucketName':
+ raise InvalidBucketNameError()
+ else:
+ raise
+ return Bucket(self, name, passphrase, compression)
+
+class Bucket(AbstractBucket):
+ """Represents a bucket stored in Amazon S3.
+
+ This class should not be instantiated directly, but using
+ `Connection.get_bucket()`.
+
+ Due to AWS' eventual propagation model, we may receive e.g. a 'unknown
+ bucket' error when we try to upload a key into a newly created bucket. For
+ this reason, many boto calls are wrapped with `retry_boto`. Note that this
+ assumes that no one else is messing with the bucket at the same time.
+
+ This class is threadsafe. All methods (except for internal methods
+ starting with underscore) may be called concurrently by different
+ threads.
+ """
+
+ @contextmanager
+ def _get_boto(self):
+ '''Provide boto bucket object'''
+ # Access to protected methods ok
+ #pylint: disable-msg=W0212
+
+ boto_conn = self.conn._pop_conn()
+ try:
+ yield retry_boto(boto_conn.get_bucket, self.name)
+ finally:
+ self.conn._push_conn(boto_conn)
+
+ def __init__(self, conn, name, passphrase, compression):
+ super(Bucket, self).__init__(passphrase, compression)
+ self.conn = conn
+ self.name = name
+ with self._get_boto() as boto:
+ self.rac_consistent = (boto.get_location() != '')
+
+ def clear(self):
+ """Delete all objects in bucket
+
+ This function starts multiple threads."""
+
+ threads = list()
+ for (no, s3key) in enumerate(self):
+ if no != 0 and no % 1000 == 0:
+ log.info('Deleted %d objects so far..', no)
+
+ log.debug('Deleting key %s', s3key)
+
+ # Ignore missing objects when clearing bucket
+ t = AsyncFn(self.delete, s3key, True)
+ t.start()
+ threads.append(t)
+
+ if len(threads) > 50:
+ log.debug('50 threads reached, waiting..')
+ threads.pop(0).join_and_raise()
+
+ log.debug('Waiting for removal threads')
+ for t in threads:
+ t.join_and_raise()
+
+ def __str__(self):
+ if self.passphrase:
+ return '<encrypted s3 bucket, name=%r>' % self.name
+ else:
+ return '<s3 bucket, name=%r>' % self.name
+
+ def contains(self, key):
+ with self._get_boto() as boto:
+ bkey = retry_boto(boto.get_key, key)
+
+ return bkey is not None
+
+ def read_after_create_consistent(self):
+ return self.rac_consistent
+
+ def read_after_write_consistent(self):
+ return False
+
+ def read_after_delete_consistent(self):
+ return False
+
+ def raw_lookup(self, key):
+ '''Retrieve metadata for `key`
+
+ If the key has been lost (S3 returns 405), it is automatically
+ deleted so that it will no longer be returned by list_keys.
+ '''
+ with self._get_boto() as boto:
+ bkey = _get_boto_key(boto, key)
+
+ if bkey is None:
+ raise NoSuchObject(key)
+
+ return bkey.metadata
+
+ def delete(self, key, force=False):
+ """Deletes the specified key
+
+ ``bucket.delete(key)`` can also be written as ``del bucket[key]``.
+ If `force` is true, do not return an error if the key does not exist.
+ """
+
+ if not isinstance(key, str):
+ raise TypeError('key must be of type str')
+
+ with self._get_boto() as boto:
+ if not force and retry_boto(boto.get_key, key) is None:
+ raise NoSuchObject(key)
+
+ retry_boto(boto.delete_key, key)
+
+ def list(self, prefix=''):
+ with self._get_boto() as boto:
+ for bkey in boto.list(prefix):
+ yield bkey.name
+
+ def raw_fetch(self, key, fh):
+ '''Fetch `key` and store in `fh`
+
+ If the key has been lost (S3 returns 405), it is automatically
+ deleted so that it will no longer be returned by list_keys.
+ '''
+
+ with self._get_boto() as boto:
+ bkey = _get_boto_key(boto, key)
+
+ if bkey is None:
+ raise NoSuchObject(key)
+ fh.seek(0)
+ retry_boto(bkey.get_contents_to_file, fh)
+
+ return bkey.metadata
+
+ def raw_store(self, key, fh, metadata):
+ with self._get_boto() as boto:
+ bkey = boto.new_key(key)
+ bkey.metadata.update(metadata)
+ retry_boto(bkey.set_contents_from_file, fh,
+ reduced_redundancy=self.conn.reduced_redundancy)
+
+
+ def copy(self, src, dest):
+ if not isinstance(src, str):
+ raise TypeError('key must be of type str')
+
+ if not isinstance(dest, str):
+ raise TypeError('key must be of type str')
+
+ with self._get_boto() as boto:
+ retry_boto(boto.copy_key, dest, self.name, src)
+
+def _get_boto_key(boto, key):
+ '''Get boto key object for `key`
+
+ If the key has been lost (S3 returns 405), it is automatically
+ deleted so that it will no longer be returned by list_keys.
+ '''
+
+ try:
+ return retry_boto(boto.get_key, key)
+ except exception.S3ResponseError as exc:
+ if exc.error_code != 'MethodNotAllowed':
+ raise
+
+ # Object was lost
+ log.warn('Object %s has been lost by Amazon, deleting..', key)
+ retry_boto(boto.delete_key, key)
+ return None
+
+def retry_boto(fn, *a, **kw):
+ """Wait for fn(*a, **kw) to succeed
+
+ If `fn(*a, **kw)` raises any of
+
+ - `boto.exception.S3ResponseError` with errorcode in
+ (`NoSuchBucket`, `RequestTimeout`)
+ - `IOError` with errno 104
+ - `httplib.IncompleteRead`
+
+ the function is called again. If the timeout is reached, `TimeoutError` is raised.
+ """
+
+ step = 0.2
+ timeout = 300
+ waited = 0
+ while waited < timeout:
+ try:
+ return fn(*a, **kw)
+ except exception.S3ResponseError as exc:
+ if exc.error_code in ('NoSuchBucket', 'RequestTimeout', 'InternalError'):
+ log.warn('Encountered %s error when calling %s, retrying...',
+ exc.error_code, fn.__name__)
+ else:
+ raise
+ except IOError as exc:
+ if exc.errno == errno.ECONNRESET:
+ pass
+ else:
+ raise
+ except exception.S3CopyError as exc:
+ if exc.error_code in ('RequestTimeout', 'InternalError'):
+ log.warn('Encountered %s error when calling %s, retrying...',
+ exc.error_code, fn.__name__)
+ else:
+ raise
+ except httplib.IncompleteRead as exc:
+ log.warn('Encountered IncompleteRead error when calling %s, retrying...',
+ fn.__name__)
+
+ sleep(step)
+ waited += step
+ if step < timeout / 30:
+ step *= 2
+
+ raise TimeoutError()
+
+class InvalidBucketNameError(Exception):
+
+ def __str__(self):
+ return 'Bucket name contains invalid characters.'
diff --git a/src/s3ql/backends/sftp.py b/src/s3ql/backends/sftp.py
new file mode 100644
index 0000000..5900901
--- /dev/null
+++ b/src/s3ql/backends/sftp.py
@@ -0,0 +1,349 @@
+'''
+__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
+Copyright (C) 2010 Ron Knapp <ron.siesta@gmail.com>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from .common import AbstractConnection, AbstractBucket, NoSuchBucket, NoSuchObject
+import logging
+import errno
+import shutil
+import cPickle as pickle
+import os
+import stat
+import paramiko
+import threading
+
+log = logging.getLogger("backend.sftp")
+
+
+class Connection(AbstractConnection):
+ '''
+ Provides a connection to an SFTP server.
+
+ This class is threadsafe. All methods (except for internal methods
+ starting with underscore) may be called concurrently by different
+ threads.
+ '''
+
+ def __init__(self, host, port, login, password):
+ super(Connection, self).__init__()
+
+ self.port = port or 22
+ self.host = host
+ self.login = login
+ self.password = password
+
+ self._client = None
+ self.sftp = None
+
+ self._setup_ssh_connection()
+
+ self.lock = threading.RLock()
+
+ def _setup_ssh_connection(self):
+
+ self._client = paramiko.SSHClient()
+ # Probably not a good idea to do this by default
+ #self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self._client.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
+ self._client.connect(self.host, port=self.port, username=self.login, password=self.password)
+ self.sftp = self._client.open_sftp()
+
+ # We don't want the connection to time out
+ self._client.get_transport().set_keepalive(300)
+
+ def __contains__(self, entry):
+ with self.lock:
+ try:
+ self.sftp.stat(entry)
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ return False
+ else:
+ raise
+ else:
+ return True
+
+ def delete_bucket(self, name, recursive=False):
+ """Delete bucket"""
+
+ with self.lock:
+ if name not in self:
+ raise NoSuchBucket(name)
+
+ if recursive:
+ self._rmtree(name)
+
+ self.sftp.rmdir(name)
+
+ def _rmtree(self, path):
+ '''Recursively delete contents of remote path'''
+
+ for attr in self.sftp.listdir_attr(path):
+ fullname = '%s/%s' % (path, attr.filename)
+ if stat.S_ISDIR(attr.st_mode):
+ self._rmtree(fullname)
+ self.sftp.rmdir(fullname)
+ else:
+ self.sftp.remove(fullname)
+
+
+ def create_bucket(self, name, passphrase=None, compression='lzma'):
+ """Create and return bucket"""
+
+ with self.lock:
+ self.sftp.mkdir(name)
+ return self.get_bucket(name, passphrase, compression)
+
+ def get_bucket(self, name, passphrase=None, compression='lzma'):
+ """Return Bucket instance for the bucket `name`"""
+
+ with self.lock:
+ if name not in self:
+ raise NoSuchBucket(name)
+
+ return Bucket(self, name, passphrase, compression)
+
+ def close(self):
+ with self.lock:
+ self._client.close()
+
+ def prepare_fork(self):
+ with self.lock:
+ self._client.close()
+
+ def finish_fork(self):
+ with self.lock:
+ self._setup_ssh_connection()
+
+class Bucket(AbstractBucket):
+ '''
+ Stores data remotely on an SFTP server.
+
+ This class is threadsafe. All methods (except for internal methods
+ starting with underscore) may be called concurrently by different
+ threads.
+ '''
+
+ def __init__(self, conn, name, passphrase, compression):
+ super(Bucket, self).__init__(passphrase, compression)
+ self.conn = conn
+ self.name = name
+
+ def _key_to_path(self, key):
+ '''Return path for given key'''
+
+ key = _escape(key)
+
+ if not key.startswith('s3ql_data_'):
+ return os.path.join(self.name, key)
+
+ no = key[10:]
+ path = [ self.name, 's3ql_data']
+ for i in range(0, len(no), 3):
+ path.append(no[:i])
+ path.append(key)
+
+ return os.path.join(*path)
+
+ def __str__(self):
+ return '<sftp bucket, name=%r>' % self.name
+
+ def read_after_create_consistent(self):
+ return True
+
+ def read_after_write_consistent(self):
+ return True
+
+ def read_after_delete_consistent(self):
+ return True
+
+ def clear(self):
+ # Access to protected member ok
+ #pylint: disable=W0212
+ with self.conn.lock:
+ self.conn._rmtree(self.name)
+
+ def contains(self, key):
+ with self.conn.lock:
+ return (self._key_to_path(key) + '.dat') in self.conn
+
+ def raw_lookup(self, key):
+ with self.conn.lock:
+ path = self._key_to_path(key)
+ try:
+ src = self.conn.sftp.open(path + '.meta', 'rb')
+ return pickle.load(src)
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise NoSuchObject(key)
+ else:
+ raise
+
+ def delete(self, key, force=False):
+ with self.conn.lock:
+ path = self._key_to_path(key)
+
+ try:
+ self.conn.sftp.remove(path + '.dat')
+ self.conn.sftp.remove(path + '.meta')
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ if force:
+ pass
+ else:
+ raise NoSuchObject(key)
+ else:
+ raise
+
+ def list(self, prefix=''):
+ with self.conn.lock:
+ if prefix:
+ base = os.path.dirname(self._key_to_path(prefix))
+ else:
+ base = self.name
+
+ for (path, dirnames, filenames) in self._walk(base):
+
+ # Do not look in wrong directories
+ if prefix:
+ rpath = path[len(self.name):] # path relative to base
+ prefix_l = ''.join(rpath.split('/'))
+
+ dirs_to_walk = list()
+ for name in dirnames:
+ prefix_ll = _unescape(prefix_l + name)
+ if prefix_ll.startswith(prefix[:len(prefix_ll)]):
+ dirs_to_walk.append(name)
+ dirnames[:] = dirs_to_walk
+
+ for name in filenames:
+ if not name.endswith('.dat'):
+ continue
+ key = _unescape(name[:-4])
+
+ if not prefix or key.startswith(prefix):
+ yield key
+
+ def _walk(self, base):
+ '''Iterate recursively over directories, like os.walk'''
+
+ to_visit = [ base ]
+ while to_visit:
+ base = to_visit.pop()
+ files = list()
+ for attr in self.conn.sftp.listdir_attr(base):
+ if stat.S_ISDIR(attr.st_mode):
+ to_visit.append('%s/%s' % (base, attr.filename))
+ else:
+ files.append(attr.filename)
+ yield (base, to_visit, files)
+
+ def _makedirs(self, path):
+ '''Like os.makedirs, but over sftp'''
+
+ cur = '/'
+ done = False
+ for el in path.split('/'):
+ cur = '%s/%s' % (cur, el)
+ if cur not in self.conn:
+ self.conn.sftp.mkdir(cur)
+ done = True
+
+ if not done:
+ err = OSError('Entry already exists: %s' % cur)
+ err.errno = errno.EEXIST
+ raise err
+
+ def raw_fetch(self, key, fh):
+ with self.conn.lock:
+ path = self._key_to_path(key)
+ try:
+ src = self.conn.sftp.open(path + '.dat', 'r')
+ src.prefetch()
+ fh.seek(0)
+ shutil.copyfileobj(src, fh)
+ src.close()
+
+ src = self.conn.sftp.open(path + '.meta', 'r')
+ src.prefetch()
+ metadata = pickle.load(src)
+ src.close()
+
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise NoSuchObject(key)
+ else:
+ raise
+
+ return metadata
+
+ def raw_store(self, key, fh, metadata):
+ with self.conn.lock:
+ path = self._key_to_path(key)
+ fh.seek(0)
+
+ try:
+ dest = self.conn.sftp.open(path + '.dat', 'w')
+ dest.set_pipelined(True)
+ except IOError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ self._makedirs(os.path.dirname(path))
+ dest = self.conn.sftp.open(path + '.dat', 'w')
+ dest.set_pipelined(True)
+
+ shutil.copyfileobj(fh, dest)
+ dest.close()
+
+ dest = self.conn.sftp.open(path + '.meta', 'w')
+ dest.set_pipelined(True)
+ pickle.dump(metadata, dest, 2)
+ dest.close()
+
+ def rename(self, src, dest):
+ with self.conn.lock:
+ src_path = self._key_to_path(src)
+ dest_path = self._key_to_path(dest)
+
+ try:
+ self.conn.sftp.lstat(src_path + '.dat')
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise NoSuchObject(src)
+ else:
+ raise
+
+ try:
+ self.conn.sftp.rename(src_path + '.dat', dest_path + '.dat')
+ self.conn.sftp.rename(src_path + '.meta', dest_path + '.meta')
+ except IOError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ self._makedirs(os.path.dirname(dest_path))
+ self.conn.sftp.rename(src_path + '.dat', dest_path + '.dat')
+ self.conn.sftp.rename(src_path + '.meta', dest_path + '.meta')
+
+def _escape(s):
+ '''Escape '/', '=' and '\0' in s'''
+
+ s = s.replace('=', '=3D')
+ s = s.replace('/', '=2F')
+ s = s.replace('\0', '=00')
+
+ return s
+
+def _unescape(s):
+ '''Un-Escape '/', '=' and '\0' in s'''
+
+ s = s.replace('=2F', '/')
+ s = s.replace('=00', '\0')
+ s = s.replace('=3D', '=')
+
+ return s
diff --git a/src/s3ql/block_cache.py b/src/s3ql/block_cache.py
new file mode 100644
index 0000000..f4bf152
--- /dev/null
+++ b/src/s3ql/block_cache.py
@@ -0,0 +1,547 @@
+'''
+block_cache.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from contextlib import contextmanager
+from .multi_lock import MultiLock
+from .backends.common import NoSuchObject
+from .ordered_dict import OrderedDict
+from .common import EmbeddedException, ExceptionStoringThread
+from .thread_group import ThreadGroup
+from .upload_manager import UploadManager, RemoveThread, retry_exc
+from .database import NoSuchRowError
+from llfuse import lock, lock_released
+import logging
+import os
+import threading
+import time
+import stat
+
+__all__ = [ "BlockCache" ]
+
+# standard logger for this module
+log = logging.getLogger("BlockCache")
+
+
+class CacheEntry(file):
+ """An element in the block cache
+
+ If `obj_id` is `None`, then the object has not yet been
+ uploaded to the backend.
+
+ Attributes:
+ -----------
+
+ :modified_after_upload:
+ This attribute is only significant when the cache entry
+ is currently being uploaded. At the beginning of the upload,
+ it is set to False. For any write access, it is set to True.
+ If it is still False when the upload has completed,
+ `dirty` is set to False and the object looses the ``.d`` suffix
+ in its name.
+
+ """
+
+ __slots__ = [ 'dirty', 'obj_id', 'inode', 'blockno', 'last_access',
+ 'modified_after_upload' ]
+
+ def __init__(self, inode, blockno, obj_id, filename, mode):
+ super(CacheEntry, self).__init__(filename, mode)
+ self.dirty = False
+ self.modified_after_upload = False
+ self.obj_id = obj_id
+ self.inode = inode
+ self.blockno = blockno
+ self.last_access = 0
+
+ def truncate(self, *a, **kw):
+ if not self.dirty:
+ os.rename(self.name, self.name + '.d')
+ self.dirty = True
+ self.modified_after_upload = True
+ return super(CacheEntry, self).truncate(*a, **kw)
+
+ def write(self, *a, **kw):
+ if not self.dirty:
+ os.rename(self.name, self.name + '.d')
+ self.dirty = True
+ self.modified_after_upload = True
+ return super(CacheEntry, self).write(*a, **kw)
+
+ def writelines(self, *a, **kw):
+ if not self.dirty:
+ os.rename(self.name, self.name + '.d')
+ self.dirty = True
+ self.modified_after_upload = True
+ return super(CacheEntry, self).writelines(*a, **kw)
+
+ def __str__(self):
+ return ('<CacheEntry, inode=%d, blockno=%d, dirty=%s, obj_id=%r>' %
+ (self.inode, self.blockno, self.dirty, self.obj_id))
+
+MAX_REMOVAL_THREADS = 25
+class BlockCache(object):
+ """Provides access to file blocks
+
+ This class manages access to file blocks. It takes care of creation,
+ uploading, downloading and deduplication.
+
+ This class uses the llfuse global lock. Methods which release the lock have
+ are marked as such in their docstring.
+
+
+ Attributes:
+ -----------
+
+ :mlock: locks on (inode, blockno) during `get`, so that we do not
+ download the same object with more than one thread.
+ :encountered_errors: This attribute is set if some non-fatal errors
+ were encountered during asynchronous operations (for
+ example, an object that was supposed to be deleted did
+ not exist).
+ """
+
+ def __init__(self, bucket, db, cachedir, max_size, max_entries=768):
+ log.debug('Initializing')
+ self.cache = OrderedDict()
+ self.cachedir = cachedir
+ self.max_size = max_size
+ self.max_entries = max_entries
+ self.size = 0
+ self.db = db
+ self.bucket = bucket
+ self.mlock = MultiLock()
+ self.removal_queue = ThreadGroup(MAX_REMOVAL_THREADS)
+ self.upload_manager = UploadManager(bucket, db, self.removal_queue)
+ self.commit_thread = CommitThread(self)
+ self.encountered_errors = False
+
+ def init(self):
+ log.debug('init: start')
+ if not os.path.exists(self.cachedir):
+ os.mkdir(self.cachedir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+ self.commit_thread.start()
+ log.debug('init: end')
+
+ def destroy(self):
+ log.debug('destroy: start')
+
+ # If there were errors, we still want to try to finalize
+ # as much as we can
+ try:
+ self.commit_thread.stop()
+ except Exception as exc:
+ self.encountered_errors = True
+ if isinstance(exc, EmbeddedException):
+ log.error('CommitThread encountered exception.')
+ else:
+ log.exception('Error when stopping commit thread')
+
+ try:
+ self.clear()
+ except:
+ self.encountered_errors = True
+ log.exception('Error when clearing cache')
+
+ while True:
+ try:
+ self.upload_manager.join_all()
+ except Exception as exc:
+ self.encountered_errors = True
+ if isinstance(exc, EmbeddedException):
+ log.error('UploadManager encountered exception.')
+ else:
+ log.exception('Error when joining UploadManager')
+ break
+ else:
+ break
+
+ while True:
+ try:
+ self.removal_queue.join_all()
+ except Exception as exc:
+ self.encountered_errors = True
+ if isinstance(exc, EmbeddedException):
+ log.error('RemovalQueue encountered exception.')
+ else:
+ log.exception('Error when waiting for removal queue:')
+ break
+ else:
+ break
+
+ if self.upload_manager.encountered_errors:
+ self.encountered_errors = True
+
+ os.rmdir(self.cachedir)
+ log.debug('destroy: end')
+
+ def get_bucket_size(self):
+ '''Return total size of the underlying bucket'''
+
+ return self.bucket.get_size()
+
+ def __len__(self):
+ '''Get number of objects in cache'''
+ return len(self.cache)
+
+ @contextmanager
+ def get(self, inode, blockno):
+ """Get file handle for block `blockno` of `inode`
+
+ This method releases the global lock.
+
+ Note: if `get` and `remove` are called concurrently, then it is
+ possible that a block that has been requested with `get` and
+ passed to `remove` for deletion will not be deleted.
+ """
+
+ log.debug('get(inode=%d, block=%d): start', inode, blockno)
+
+ if self.size > self.max_size or len(self.cache) > self.max_entries:
+ self.expire()
+
+ # Need to release global lock to acquire mlock to prevent deadlocking
+ lock.release()
+ with self.mlock(inode, blockno):
+ lock.acquire()
+
+ try:
+ el = self.cache[(inode, blockno)]
+
+ # Not in cache
+ except KeyError:
+ filename = os.path.join(self.cachedir,
+ 'inode_%d_block_%d' % (inode, blockno))
+ try:
+ obj_id = self.db.get_val("SELECT obj_id FROM blocks WHERE inode=? AND blockno=?",
+ (inode, blockno))
+
+ # No corresponding object
+ except NoSuchRowError:
+ log.debug('get(inode=%d, block=%d): creating new block', inode, blockno)
+ el = CacheEntry(inode, blockno, None, filename, "w+b")
+
+ # Need to download corresponding object
+ else:
+ log.debug('get(inode=%d, block=%d): downloading block', inode, blockno)
+ el = CacheEntry(inode, blockno, obj_id, filename, "w+b")
+ with lock_released:
+ try:
+ if self.bucket.read_after_create_consistent():
+ self.bucket.fetch_fh('s3ql_data_%d' % obj_id, el)
+ else:
+ retry_exc(300, [ NoSuchObject ], self.bucket.fetch_fh,
+ 's3ql_data_%d' % obj_id, el)
+ except:
+ os.unlink(filename)
+ raise
+
+ # Writing will have set dirty flag
+ el.dirty = False
+ os.rename(el.name + '.d', el.name)
+
+ self.size += os.fstat(el.fileno()).st_size
+
+ self.cache[(inode, blockno)] = el
+
+ # In Cache
+ else:
+ log.debug('get(inode=%d, block=%d): in cache', inode, blockno)
+ self.cache.to_head((inode, blockno))
+
+
+ el.last_access = time.time()
+ oldsize = os.fstat(el.fileno()).st_size
+
+ # Provide fh to caller
+ try:
+ log.debug('get(inode=%d, block=%d): yield', inode, blockno)
+ yield el
+ finally:
+ # Update cachesize
+ el.flush()
+ newsize = os.fstat(el.fileno()).st_size
+ self.size += newsize - oldsize
+
+ log.debug('get(inode=%d, block=%d): end', inode, blockno)
+
+
+ def expire(self):
+ """Perform cache expiry
+
+ This method releases the global lock.
+ """
+
+ # Note that we have to make sure that the cache entry is written into
+ # the database before we remove it from the cache!
+
+ log.debug('expire: start')
+
+ while (len(self.cache) > self.max_entries or
+ (len(self.cache) > 0 and self.size > self.max_size)):
+
+ need_size = self.size - self.max_size
+ need_entries = len(self.cache) - self.max_entries
+
+ # Try to expire entries that are not dirty
+ for el in self.cache.values_rev():
+ if el.dirty:
+ if (el.inode, el.blockno) not in self.upload_manager.in_transit:
+ log.debug('expire: %s is dirty, trying to flush', el)
+ break
+ else:
+ continue
+
+ del self.cache[(el.inode, el.blockno)]
+ size = os.fstat(el.fileno()).st_size
+ el.close()
+ os.unlink(el.name)
+ need_entries -= 1
+ self.size -= size
+ need_size -= size
+
+ if need_size <= 0 and need_entries <= 0:
+ break
+
+ if need_size <= 0 and need_entries <= 0:
+ break
+
+ # If nothing is being uploaded, try to upload just enough
+ if not self.upload_manager.upload_in_progress():
+ for el in self.cache.values_rev():
+ log.debug('expire: uploading %s..', el)
+ if el.dirty and (el.inode, el.blockno) not in self.upload_manager.in_transit:
+ freed = self.upload_manager.add(el) # Releases global lock
+ need_size -= freed
+ else:
+ need_size -= os.fstat(el.fileno()).st_size
+ need_entries -= 1
+
+ if need_size <= 0 and need_entries <= 0:
+ break
+
+ # Wait for the next entry
+ log.debug('expire: waiting for upload threads..')
+ self.upload_manager.join_one() # Releases global lock
+
+ log.debug('expire: end')
+
+
+ def remove(self, inode, start_no, end_no=None):
+ """Remove blocks for `inode`
+
+ If `end_no` is not specified, remove just the `start_no` block.
+ Otherwise removes all blocks from `start_no` to, but not including,
+ `end_no`.
+
+ This method releases the global lock.
+
+ Note: if `get` and `remove` are called concurrently, then it
+ is possible that a block that has been requested with `get` and
+ passed to `remove` for deletion will not be deleted.
+ """
+
+ log.debug('remove(inode=%d, start=%d, end=%s): start',
+ inode, start_no, end_no)
+
+ if end_no is None:
+ end_no = start_no + 1
+
+ for blockno in range(start_no, end_no):
+ # We can't use self.mlock here to prevent simultaneous retrieval
+ # of the block with get(), because this could deadlock
+ if (inode, blockno) in self.cache:
+ # Type inference fails here
+ #pylint: disable-msg=E1103
+ el = self.cache.pop((inode, blockno))
+
+ self.size -= os.fstat(el.fileno()).st_size
+ el.close()
+ if el.dirty:
+ os.unlink(el.name + '.d')
+ else:
+ os.unlink(el.name)
+
+ if el.obj_id is None:
+ log.debug('remove(inode=%d, blockno=%d): block only in cache',
+ inode, blockno)
+ continue
+
+ log.debug('remove(inode=%d, blockno=%d): block in cache and db', inode, blockno)
+ obj_id = el.obj_id
+
+ else:
+ try:
+ obj_id = self.db.get_val('SELECT obj_id FROM blocks WHERE inode=? '
+ 'AND blockno = ?', (inode, blockno))
+ except NoSuchRowError:
+ log.debug('remove(inode=%d, blockno=%d): block does not exist',
+ inode, blockno)
+ continue
+
+ log.debug('remove(inode=%d, blockno=%d): block only in db ', inode, blockno)
+
+ self.db.execute('DELETE FROM blocks WHERE inode=? AND blockno=?',
+ (inode, blockno))
+
+ refcount = self.db.get_val('SELECT refcount FROM objects WHERE id=?', (obj_id,))
+ if refcount > 1:
+ log.debug('remove(inode=%d, blockno=%d): decreasing refcount for object %d',
+ inode, blockno, obj_id)
+ self.db.execute('UPDATE objects SET refcount=refcount-1 WHERE id=?',
+ (obj_id,))
+ to_delete = False
+ else:
+ log.debug('remove(inode=%d, blockno=%d): deleting object %d',
+ inode, blockno, obj_id)
+ self.db.execute('DELETE FROM objects WHERE id=?', (obj_id,))
+ to_delete = True
+
+ if to_delete:
+ try:
+ # Releases global lock:
+ self.removal_queue.add_thread(RemoveThread(obj_id, self.bucket,
+ (inode, blockno),
+ self.upload_manager))
+ except EmbeddedException as exc:
+ exc = exc.exc_info[1]
+ if isinstance(exc, NoSuchObject):
+ log.warn('Backend seems to have lost object %s', exc.key)
+ self.encountered_errors = True
+ else:
+ raise
+
+ log.debug('remove(inode=%d, start=%d, end=%s): end',
+ inode, start_no, end_no)
+
+ def flush(self, inode):
+ """Flush buffers for `inode`"""
+
+ # Cache entries are automatically flushed after each read()
+ # and write()
+ pass
+
+ def commit(self):
+ """Upload all dirty blocks
+
+ This method uploads all dirty blocks. The object itself may
+ still be in transit when the method returns, but the
+ blocks table is guaranteed to refer to the correct objects.
+
+ This method releases the global lock.
+ """
+
+ in_transit = set()
+
+ for el in self.cache.itervalues():
+ if not el.dirty:
+ continue
+
+ if (el.inode, el.blockno) in self.upload_manager.in_transit:
+ if not el.modified_after_upload:
+ continue
+
+ # We need to wait for the current upload to complete
+ in_transit.add(el)
+ else:
+ self.upload_manager.add(el) # Releases global lock
+
+ while in_transit:
+ log.warn('commit(): in_transit: %s', in_transit)
+ self.upload_manager.join_one()
+ finished = in_transit.difference(self.upload_manager.in_transit)
+ in_transit = in_transit.intersection(self.upload_manager.in_transit)
+
+ for el in finished:
+ # Object may no longer be dirty or already in transit
+ # if a different thread initiated the object while
+ # the global lock was released in a previous iteration.
+ if el.dirty:
+ continue
+ if (el.inode, el.blockno) in self.upload_manager.in_transit:
+ continue
+
+ self.upload_manager.add(el) # Releases global lock
+
+
+ def clear(self):
+ """Upload all dirty data and clear cache
+
+ When the method returns, all blocks have been registered
+ in the database, but the actual uploads may still be
+ in progress.
+
+ This method releases the global lock.
+ """
+
+ log.debug('clear: start')
+ bak = self.max_entries
+ self.max_entries = 0
+ self.expire() # Releases global lock
+ self.max_entries = bak
+ log.debug('clear: end')
+
+ def __del__(self):
+ if len(self.cache) > 0:
+ raise RuntimeError("BlockCache instance was destroyed without calling destroy()!")
+
+class CommitThread(ExceptionStoringThread):
+ '''
+ Periodically upload dirty blocks.
+
+ This class uses the llfuse global lock. When calling objects
+ passed in the constructor, the global lock is acquired first.
+ '''
+
+
+ def __init__(self, bcache):
+ super(CommitThread, self).__init__()
+ self.bcache = bcache
+ self.stop_event = threading.Event()
+ self.name = 'CommitThread'
+
+ def run_protected(self):
+ log.debug('CommitThread: start')
+
+ while not self.stop_event.is_set():
+ did_sth = False
+ stamp = time.time()
+ for el in self.bcache.cache.values_rev():
+ if stamp - el.last_access < 10:
+ break
+ if (not el.dirty or
+ (el.inode, el.blockno) in self.bcache.upload_manager.in_transit):
+ continue
+
+ # Acquire global lock to access UploadManager instance
+ with lock:
+ if (not el.dirty or # Object may have been accessed
+ (el.inode, el.blockno) in self.bcache.upload_manager.in_transit):
+ continue
+ self.bcache.upload_manager.add(el)
+ did_sth = True
+
+ if self.stop_event.is_set():
+ break
+
+ if not did_sth:
+ self.stop_event.wait(5)
+
+ log.debug('CommitThread: end')
+
+ def stop(self):
+ '''Wait for thread to finish, raise any occurred exceptions.
+
+ This method releases the global lock.
+ '''
+
+ self.stop_event.set()
+ with lock_released:
+ self.join_and_raise()
+ \ No newline at end of file
diff --git a/src/s3ql/cli/__init__.py b/src/s3ql/cli/__init__.py
new file mode 100644
index 0000000..019cbde
--- /dev/null
+++ b/src/s3ql/cli/__init__.py
@@ -0,0 +1,12 @@
+'''
+__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+__all__ = ['adm', 'cp', 'ctrl', 'fsck', 'lock', 'mkfs',
+ 'mount', 'remove', 'statfs', 'umount' ]
diff --git a/src/s3ql/cli/adm.py b/src/s3ql/cli/adm.py
new file mode 100644
index 0000000..1e991b0
--- /dev/null
+++ b/src/s3ql/cli/adm.py
@@ -0,0 +1,315 @@
+'''
+adm.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import logging
+from s3ql.common import (get_backend, QuietError, unlock_bucket,
+ cycle_metadata, dump_metadata, restore_metadata,
+ setup_logging, get_bucket_home)
+from s3ql.backends import s3
+from s3ql.parse_args import ArgumentParser
+from s3ql import CURRENT_FS_REV
+from getpass import getpass
+import sys
+from s3ql.backends.common import ChecksumError
+import os
+from s3ql.database import Connection
+import tempfile
+from datetime import datetime as Datetime
+import textwrap
+import shutil
+import stat
+import time
+import cPickle as pickle
+
+log = logging.getLogger("adm")
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description="Manage S3QL Buckets.",
+ epilog=textwrap.dedent('''\
+ Hint: run `%(prog)s <action> --help` to get help on the additional
+ arguments that the different actions take.'''))
+
+ pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\
+ Hint: run `%(prog)s --help` to get help on other available actions and
+ optional arguments that can be used with all actions.'''))
+ pparser.add_storage_url()
+
+ subparsers = parser.add_subparsers(metavar='<action>', dest='action',
+ help='may be either of')
+ subparsers.add_parser("passphrase", help="change bucket passphrase",
+ parents=[pparser])
+ subparsers.add_parser("upgrade", help="upgrade file system to newest revision",
+ parents=[pparser])
+ subparsers.add_parser("delete", help="completely delete a bucket with all contents",
+ parents=[pparser])
+ subparsers.add_parser("download-metadata",
+ help="Interactively download metadata backups. "
+ "Use only if you know what you are doing.",
+ parents=[pparser])
+
+ parser.add_debug_modules()
+ parser.add_quiet()
+ parser.add_homedir()
+ parser.add_version()
+ parser.add_ssl()
+
+ options = parser.parse_args(args)
+
+ if not os.path.exists(options.homedir):
+ os.mkdir(options.homedir, 0700)
+
+ return options
+
+def main(args=None):
+ '''Change or show S3QL file system parameters'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options, 'adm.log')
+
+ with get_backend(options.storage_url,
+ options.homedir, options.ssl) as (conn, bucketname):
+ home = get_bucket_home(options.storage_url, options.homedir)
+
+ if options.action == 'delete':
+ return delete_bucket(conn, bucketname, home)
+
+ if not bucketname in conn:
+ raise QuietError("Bucket does not exist.")
+
+ bucket = conn.get_bucket(bucketname)
+
+ try:
+ unlock_bucket(options.homedir, options.storage_url, bucket)
+ except ChecksumError:
+ raise QuietError('Checksum error - incorrect password?')
+
+ if options.action == 'passphrase':
+ return change_passphrase(bucket)
+
+ if options.action == 'upgrade':
+ return upgrade(bucket)
+
+ if options.action == 'download-metadata':
+ return download_metadata(bucket, options.storage_url)
+
+
+def download_metadata(bucket, storage_url):
+ '''Download old metadata backups'''
+
+ backups = sorted(bucket.list('s3ql_metadata_bak_'))
+
+ if not backups:
+ raise QuietError('No metadata backups found.')
+
+ log.info('The following backups are available:')
+ log.info('%3s %-23s %-15s', 'No', 'Name', 'Date')
+ for (i, name) in enumerate(backups):
+ params = bucket.lookup(name)
+ if 'last-modified' in params:
+ date = Datetime.fromtimestamp(params['last-modified']).strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ # (metadata might from an older fs revision)
+ date = '(unknown)'
+
+ log.info('%3d %-23s %-15s', i, name, date)
+
+ name = None
+ while name is None:
+ buf = raw_input('Enter no to download: ')
+ try:
+ name = backups[int(buf.strip())]
+ except:
+ log.warn('Invalid input')
+
+ log.info('Downloading %s...', name)
+
+ home = get_bucket_home(storage_url, '.')
+ for i in ('.db', '.params'):
+ if os.path.exists(home + i):
+ raise QuietError('%s already exists, aborting.' % home+i)
+
+ fh = os.fdopen(os.open(home + '.db', os.O_RDWR | os.O_CREAT,
+ stat.S_IRUSR | stat.S_IWUSR), 'w+b')
+ param = bucket.lookup(name)
+ try:
+ fh.close()
+ db = Connection(home + '.db')
+ fh = tempfile.TemporaryFile()
+ bucket.fetch_fh(name, fh)
+ fh.seek(0)
+ log.info('Reading metadata...')
+ restore_metadata(fh, db)
+ fh.close()
+ except:
+ # Don't keep file if it doesn't contain anything sensible
+ os.unlink(home + '.db')
+ raise
+ pickle.dump(param, open(home + '.params', 'wb'), 2)
+
+
+
+def change_passphrase(bucket):
+ '''Change bucket passphrase'''
+
+ if 's3ql_passphrase' not in bucket:
+ raise QuietError('Bucket is not encrypted.')
+
+ data_pw = bucket.passphrase
+
+ if sys.stdin.isatty():
+ wrap_pw = getpass("Enter new encryption password: ")
+ if not wrap_pw == getpass("Confirm new encryption password: "):
+ raise QuietError("Passwords don't match")
+ else:
+ wrap_pw = sys.stdin.readline().rstrip()
+
+ bucket.passphrase = wrap_pw
+ bucket['s3ql_passphrase'] = data_pw
+
+def delete_bucket(conn, bucketname, home):
+ print('I am about to delete the bucket %s with ALL contents.' % bucketname,
+ 'Please enter "yes" to continue.', '> ', sep='\n', end='')
+
+ if sys.stdin.readline().strip().lower() != 'yes':
+ raise QuietError()
+
+ log.info('Deleting...')
+
+ for suffix in ('.db', '.params'):
+ name = home + suffix
+ if os.path.exists(name):
+ os.unlink(name)
+
+ name = home + '-cache'
+ if os.path.exists(name):
+ shutil.rmtree(name)
+
+ if bucketname in conn:
+ conn.delete_bucket(bucketname, recursive=True)
+
+ print('Bucket deleted.')
+ if isinstance(conn, s3.Connection):
+ print('Note that it may take a while until the removal becomes visible.')
+
+def upgrade(bucket):
+ '''Upgrade file system to newest revision'''
+
+ # Access to protected member
+ #pylint: disable=W0212
+ log.info('Getting file system parameters..')
+ seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ]
+ if not seq_nos:
+ raise QuietError(textwrap.dedent('''
+ File system revision too old to upgrade!
+
+ You need to use an older S3QL version to upgrade to a more recent
+ revision before you can use this version to upgrade to the newest
+ revision.
+ '''))
+ seq_no = max(seq_nos)
+ param = bucket.lookup('s3ql_metadata')
+
+ # Check for unclean shutdown
+ if param['seq_no'] < seq_no:
+ if (bucket.read_after_write_consistent() and
+ bucket.read_after_delete_consistent()):
+ raise QuietError(textwrap.fill(textwrap.dedent('''\
+ It appears that the file system is still mounted somewhere else. If this is not
+ the case, the file system may have not been unmounted cleanly and you should try
+ to run fsck on the computer where the file system has been mounted most recently.
+ ''')))
+ else:
+ raise QuietError(textwrap.fill(textwrap.dedent('''\
+ It appears that the file system is still mounted somewhere else. If this is not the
+ case, the file system may have not been unmounted cleanly or the data from the
+ most-recent mount may have not yet propagated through the backend. In the later case,
+ waiting for a while should fix the problem, in the former case you should try to run
+ fsck on the computer where the file system has been mounted most recently.
+ ''')))
+
+ # Check that the fs itself is clean
+ if param['needs_fsck']:
+ raise QuietError("File system damaged, run fsck!")
+
+ # Check revision
+ if param['revision'] < CURRENT_FS_REV - 1:
+ raise QuietError(textwrap.dedent('''
+ File system revision too old to upgrade!
+
+ You need to use an older S3QL version to upgrade to a more recent
+ revision before you can use this version to upgrade to the newest
+ revision.
+ '''))
+
+ elif param['revision'] >= CURRENT_FS_REV:
+ print('File system already at most-recent revision')
+ return
+
+ print(textwrap.dedent('''
+ I am about to update the file system to the newest revision.
+ You will not be able to access the file system with any older version
+ of S3QL after this operation.
+
+ You should make very sure that this command is not interrupted and
+ that no one else tries to mount, fsck or upgrade the file system at
+ the same time.
+ '''))
+
+ print('Please enter "yes" to continue.', '> ', sep='\n', end='')
+
+ if sys.stdin.readline().strip().lower() != 'yes':
+ raise QuietError()
+
+ # Download metadata
+ log.info("Downloading & uncompressing metadata...")
+ dbfile = tempfile.NamedTemporaryFile()
+ db = Connection(dbfile.name, fast_mode=True)
+ fh = tempfile.TemporaryFile()
+ bucket.fetch_fh("s3ql_metadata", fh)
+ fh.seek(0)
+ log.info('Reading metadata...')
+ restore_metadata(fh, db)
+ fh.close()
+
+ log.info('Upgrading from revision %d to %d...', CURRENT_FS_REV - 1,
+ CURRENT_FS_REV)
+ param['revision'] = CURRENT_FS_REV
+
+ for (id_, mode, target) in db.query('SELECT id, mode, target FROM inodes'):
+ if stat.S_ISLNK(mode):
+ db.execute('UPDATE inodes SET size=? WHERE id=?',
+ (len(target), id_))
+
+ # Increase metadata sequence no
+ param['seq_no'] += 1
+ bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')
+ for i in seq_nos:
+ if i < param['seq_no'] - 5:
+ del bucket['s3ql_seq_no_%d' % i ]
+
+ # Upload metadata
+ fh = tempfile.TemporaryFile()
+ dump_metadata(fh, db)
+ fh.seek(0)
+ log.info("Uploading database..")
+ cycle_metadata(bucket)
+ param['last-modified'] = time.time() - time.timezone
+ bucket.store_fh("s3ql_metadata", fh, param)
+ fh.close()
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/cp.py b/src/s3ql/cli/cp.py
new file mode 100644
index 0000000..51e9dbc
--- /dev/null
+++ b/src/s3ql/cli/cp.py
@@ -0,0 +1,102 @@
+'''
+cp.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import llfuse
+import os
+import logging
+from s3ql.common import (setup_logging, CTRL_NAME, QuietError)
+from s3ql.parse_args import ArgumentParser
+import struct
+import stat
+import textwrap
+import errno
+import sys
+
+log = logging.getLogger("cp")
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description=textwrap.dedent('''\
+ Replicates the contents of the directory <source> in the
+ directory <target>. <source> has to be an existing directory and
+ <target> must not exist. Both directories have to be within
+ the same S3QL file system.
+
+ The replication will not take any additional space. Only if one
+ of directories is modified later on, the modified data will take
+ additional storage space.
+ '''))
+
+ parser.add_debug()
+ parser.add_quiet()
+ parser.add_version()
+
+ parser.add_argument('source', help='source directory',
+ type=(lambda x: x.rstrip('/')))
+ parser.add_argument('target', help='target directory',
+ type=(lambda x: x.rstrip('/')))
+
+ options = parser.parse_args(args)
+
+ return options
+
+def main(args=None):
+ '''Efficiently copy a directory tree'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ if not os.path.exists(options.source):
+ raise QuietError('Source directory %r does not exist' % options.source)
+
+ if os.path.exists(options.target):
+ raise QuietError('Target directory %r must not yet exist.' % options.target)
+
+ parent = os.path.dirname(os.path.abspath(options.target))
+ if not os.path.exists(parent):
+ raise QuietError('Target parent %r does not exist' % parent)
+
+ fstat_s = os.stat(options.source)
+ fstat_p = os.stat(parent)
+ if not stat.S_ISDIR(fstat_s.st_mode):
+ raise QuietError('Source %r is not a directory' % options.source)
+
+ if not stat.S_ISDIR(fstat_p.st_mode):
+ raise QuietError('Target parent %r is not a directory' % parent)
+
+ if fstat_p.st_dev != fstat_s.st_dev:
+ raise QuietError('Source and target are not on the same file system.')
+
+ ctrlfile = os.path.join(parent, CTRL_NAME)
+ if not (CTRL_NAME not in llfuse.listdir(parent) and os.path.exists(ctrlfile)):
+ raise QuietError('Source and target are not on an S3QL file system')
+
+ if os.stat(ctrlfile).st_uid != os.geteuid() and os.geteuid() != 0:
+ raise QuietError('Only root and the mounting user may run s3qlcp.')
+
+ try:
+ os.mkdir(options.target)
+ except OSError as exc:
+ if exc.errno == errno.EACCES:
+ raise QuietError('No permission to create target directory')
+ else:
+ raise
+
+ fstat_t = os.stat(options.target)
+ llfuse.setxattr(ctrlfile, 'copy', struct.pack('II', fstat_s.st_ino, fstat_t.st_ino))
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/ctrl.py b/src/s3ql/cli/ctrl.py
new file mode 100644
index 0000000..637e321
--- /dev/null
+++ b/src/s3ql/cli/ctrl.py
@@ -0,0 +1,120 @@
+'''
+ctrl.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import llfuse
+import os
+import logging
+from s3ql.common import (CTRL_NAME, QuietError, setup_logging)
+from s3ql.parse_args import ArgumentParser
+import textwrap
+import sys
+import cPickle as pickle
+
+log = logging.getLogger("ctrl")
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description='''Control a mounted S3QL File System''',
+ epilog=textwrap.dedent('''\
+ Hint: run `%(prog)s <action> --help` to get help on the additional
+ arguments that the different actions take.'''))
+
+ pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\
+ Hint: run `%(prog)s --help` to get help on other available actions and
+ optional arguments that can be used with all actions.'''))
+ pparser.add_argument("mountpoint", metavar='<mountpoint>',
+ type=(lambda x: x.rstrip('/')),
+ help='Mountpoint of the file system')
+
+ parser.add_debug()
+ parser.add_quiet()
+ parser.add_version()
+
+ subparsers = parser.add_subparsers(metavar='<action>', dest='action',
+ help='may be either of')
+ subparsers.add_parser('flushcache', help='flush file system cache',
+ parents=[pparser])
+ subparsers.add_parser('upload-meta', help='Upload metadata',
+ parents=[pparser])
+
+ sparser = subparsers.add_parser('cachesize', help='Change cache size',
+ parents=[pparser])
+ sparser.add_argument('cachesize', metavar='<size>', type=int,
+ help='New cache size in KB')
+
+ sparser = subparsers.add_parser('log', help='Change log level',
+ parents=[pparser])
+
+ sparser.add_argument('level', choices=('debug', 'info', 'warn'),
+ metavar='<level>',
+ help='Desired new log level for mount.s3ql process. '
+ 'Allowed values: %(choices)s')
+ sparser.add_argument('modules', nargs='*', metavar='<module>',
+ help='Modules to enable debugging output for. Specify '
+ '`all` to enable debugging for all modules.')
+
+ options = parser.parse_args(args)
+
+ if options.action == 'log':
+ if options.level != 'debug' and options.modules:
+ parser.error('Modules can only be specified with `debug` logging level.')
+ if not options.modules:
+ options.modules = [ 'all' ]
+
+ if options.level:
+ # Protected member ok, hopefully this won't break
+ #pylint: disable=W0212
+ options.level = logging._levelNames[options.level.upper()]
+
+ return options
+
+
+def main(args=None):
+ '''Control a mounted S3QL File System.'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ path = options.mountpoint
+
+ if not os.path.exists(path):
+ raise QuietError('Mountpoint %r does not exist' % path)
+
+ ctrlfile = os.path.join(path, CTRL_NAME)
+ if not (CTRL_NAME not in llfuse.listdir(path)
+ and os.path.exists(ctrlfile)):
+ raise QuietError('Mountpoint is not an S3QL file system')
+
+ if os.stat(ctrlfile).st_uid != os.geteuid() and os.geteuid() != 0:
+ raise QuietError('Only root and the mounting user may run s3qlctrl.')
+
+ if options.action == 'flushcache':
+ llfuse.setxattr(ctrlfile, 's3ql_flushcache!', 'dummy')
+
+ if options.action == 'upload-meta':
+ llfuse.setxattr(ctrlfile, 'upload-meta', 'dummy')
+
+ elif options.action == 'log':
+ llfuse.setxattr(ctrlfile, 'logging',
+ pickle.dumps((options.level, options.modules),
+ pickle.HIGHEST_PROTOCOL))
+
+ elif options.action == 'cachesize':
+ llfuse.setxattr(ctrlfile, 'cachesize', pickle.dumps(options.cachesize*1024))
+
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/fsck.py b/src/s3ql/cli/fsck.py
new file mode 100644
index 0000000..c1bc04b
--- /dev/null
+++ b/src/s3ql/cli/fsck.py
@@ -0,0 +1,217 @@
+'''
+fsck.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import os
+import stat
+import time
+from s3ql.common import (get_bucket_home, cycle_metadata, setup_logging,
+ unlock_bucket, QuietError, get_backend, get_seq_no,
+ restore_metadata, dump_metadata)
+from s3ql.parse_args import ArgumentParser
+from s3ql import CURRENT_FS_REV
+from s3ql.database import Connection
+import logging
+from s3ql.fsck import Fsck
+from s3ql.backends.common import ChecksumError
+import sys
+import apsw
+import tempfile
+import cPickle as pickle
+import textwrap
+
+log = logging.getLogger("fsck")
+
+def parse_args(args):
+
+ parser = ArgumentParser(
+ description="Checks and repairs an S3QL filesystem.")
+
+ parser.add_homedir()
+ parser.add_debug_modules()
+ parser.add_quiet()
+ parser.add_version()
+ parser.add_storage_url()
+ parser.add_ssl()
+
+ parser.add_argument("--batch", action="store_true", default=False,
+ help="If user input is required, exit without prompting.")
+ parser.add_argument("--force", action="store_true", default=False,
+ help="Force checking even if file system is marked clean.")
+
+ options = parser.parse_args(args)
+
+ if not os.path.exists(options.homedir):
+ os.mkdir(options.homedir, 0700)
+
+ return options
+
+
+def main(args=None):
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options, 'fsck.log')
+
+ with get_backend(options.storage_url, options.homedir,
+ options.ssl) as (conn, bucketname):
+
+ # Check if fs is mounted on this computer
+ # This is not foolproof but should prevent common mistakes
+ match = options.storage_url + ' /'
+ with open('/proc/mounts', 'r') as fh:
+ for line in fh:
+ if line.startswith(match):
+ raise QuietError('Can not check mounted file system.')
+
+ if not bucketname in conn:
+ raise QuietError("Bucket does not exist.")
+ bucket = conn.get_bucket(bucketname)
+
+ try:
+ unlock_bucket(options.homedir, options.storage_url, bucket)
+ except ChecksumError:
+ raise QuietError('Checksum error - incorrect password?')
+
+ home = get_bucket_home(options.storage_url, options.homedir)
+ seq_no = get_seq_no(bucket)
+ param_remote = bucket.lookup('s3ql_metadata')
+ db = None
+
+ if os.path.exists(home + '.params'):
+ assert os.path.exists(home + '.db')
+ param = pickle.load(open(home + '.params', 'rb'))
+ if param['seq_no'] < seq_no:
+ log.info('Ignoring locally cached metadata (outdated).')
+ param = bucket.lookup('s3ql_metadata')
+ else:
+ log.info('Using cached metadata.')
+ db = Connection(home + '.db')
+ assert not os.path.exists(home + '-cache') or param['needs_fsck']
+
+ if param_remote['seq_no'] != param['seq_no']:
+ log.warn('Remote metadata is outdated.')
+ param['needs_fsck'] = True
+
+ else:
+ param = param_remote
+ assert not os.path.exists(home + '-cache')
+ # .db might exist if mount.s3ql is killed at exactly the right instant
+ # and should just be ignored.
+
+ # Check revision
+ if param['revision'] < CURRENT_FS_REV:
+ raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
+ elif param['revision'] > CURRENT_FS_REV:
+ raise QuietError('File system revision too new, please update your '
+ 'S3QL installation.')
+
+ if param['seq_no'] < seq_no:
+ if (bucket.read_after_write_consistent() and
+ bucket.read_after_delete_consistent()):
+ print(textwrap.fill(textwrap.dedent('''\
+ Up to date metadata is not available. Probably the file system has not
+ been properly unmounted and you should try to run fsck on the computer
+ where the file system has been mounted most recently.
+ ''')))
+ else:
+ print(textwrap.fill(textwrap.dedent('''\
+ Up to date metadata is not available. Either the file system has not
+ been unmounted cleanly or the data has not yet propagated through the backend.
+ In the later case, waiting for a while should fix the problem, in
+ the former case you should try to run fsck on the computer where
+ the file system has been mounted most recently
+ ''')))
+
+ print('Enter "continue" to use the outdated data anyway:',
+ '> ', sep='\n', end='')
+ if options.batch:
+ raise QuietError('(in batch mode, exiting)')
+ if sys.stdin.readline().strip() != 'continue':
+ raise QuietError()
+
+ param['seq_no'] = seq_no
+ param['needs_fsck'] = True
+
+
+ if (not param['needs_fsck']
+ and ((time.time() - time.timezone) - param['last_fsck'])
+ < 60 * 60 * 24 * 31): # last check more than 1 month ago
+ if options.force:
+ log.info('File system seems clean, checking anyway.')
+ else:
+ log.info('File system is marked as clean. Use --force to force checking.')
+ return
+
+ # If using local metadata, check consistency
+ if db:
+ log.info('Checking DB integrity...')
+ try:
+ # get_list may raise CorruptError itself
+ res = db.get_list('PRAGMA integrity_check(20)')
+ if res[0][0] != u'ok':
+ log.error('\n'.join(x[0] for x in res ))
+ raise apsw.CorruptError()
+ except apsw.CorruptError:
+ raise QuietError('Local metadata is corrupted. Remove or repair the following '
+ 'files manually and re-run fsck:\n'
+ + home + '.db (corrupted)\n'
+ + home + '.param (intact)')
+ else:
+ log.info("Downloading & uncompressing metadata...")
+ fh = tempfile.TemporaryFile()
+ bucket.fetch_fh("s3ql_metadata", fh)
+ os.close(os.open(home + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR))
+ db = Connection(home + '.db.tmp', fast_mode=True)
+ fh.seek(0)
+ log.info('Reading metadata...')
+ restore_metadata(fh, db)
+ fh.close()
+ db.close()
+ os.rename(home + '.db.tmp', home + '.db')
+ db = Connection(home + '.db')
+
+ # Increase metadata sequence no
+ param['seq_no'] += 1
+ param['needs_fsck'] = True
+ bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')
+ pickle.dump(param, open(home + '.params', 'wb'), 2)
+
+ fsck = Fsck(home + '-cache', bucket, param, db)
+ fsck.check()
+
+ if fsck.uncorrectable_errors:
+ raise QuietError("Uncorrectable errors found, aborting.")
+
+ if os.path.exists(home + '-cache'):
+ os.rmdir(home + '-cache')
+
+ log.info('Saving metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(fh, db)
+
+ log.info("Compressing & uploading metadata..")
+ cycle_metadata(bucket)
+ fh.seek(0)
+ param['needs_fsck'] = False
+ param['last_fsck'] = time.time() - time.timezone
+ param['last-modified'] = time.time() - time.timezone
+ bucket.store_fh("s3ql_metadata", fh, param)
+ fh.close()
+ pickle.dump(param, open(home + '.params', 'wb'), 2)
+
+ db.execute('ANALYZE')
+ db.execute('VACUUM')
+ db.close()
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/lock.py b/src/s3ql/cli/lock.py
new file mode 100644
index 0000000..75c01d1
--- /dev/null
+++ b/src/s3ql/cli/lock.py
@@ -0,0 +1,74 @@
+'''
+lock.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import llfuse
+import os
+import logging
+from s3ql.common import (setup_logging, CTRL_NAME, QuietError)
+from s3ql.parse_args import ArgumentParser
+import cPickle as pickle
+import textwrap
+import sys
+
+log = logging.getLogger("lock")
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description=textwrap.dedent('''\
+ Makes the given directory tree(s) immutable. No changes of any sort can
+ be performed on the tree after that. Immutable entries can only be
+ deleted with s3qlrm.
+ '''))
+
+ parser.add_debug()
+ parser.add_quiet()
+ parser.add_version()
+
+ parser.add_argument('path', metavar='<path>', nargs='+',
+ help='Directories to make immutable.',
+ type=(lambda x: x.rstrip('/')))
+
+ return parser.parse_args(args)
+
+
+def main(args=None):
+ '''Make directory tree immutable'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ for name in options.path:
+ if not os.path.exists(name):
+ raise QuietError('%r does not exist' % name)
+
+ parent = os.path.dirname(os.path.abspath(name))
+ fstat_p = os.stat(parent)
+ fstat = os.stat(name)
+
+ if fstat_p.st_dev != fstat.st_dev:
+ raise QuietError('%s is a mount point itself.' % name)
+
+ ctrlfile = os.path.join(parent, CTRL_NAME)
+ if not (CTRL_NAME not in llfuse.listdir(parent) and os.path.exists(ctrlfile)):
+ raise QuietError('%s is not on an S3QL file system' % name)
+
+ if os.stat(ctrlfile).st_uid != os.geteuid():
+ raise QuietError('Only root and the mounting user may run s3qllock.')
+
+ llfuse.setxattr(ctrlfile, 'lock', pickle.dumps((fstat.st_ino,),
+ pickle.HIGHEST_PROTOCOL))
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/mkfs.py b/src/s3ql/cli/mkfs.py
new file mode 100644
index 0000000..0f4037b
--- /dev/null
+++ b/src/s3ql/cli/mkfs.py
@@ -0,0 +1,147 @@
+'''
+mkfs.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import sys
+import os
+from getpass import getpass
+import shutil
+import logging
+import cPickle as pickle
+from s3ql import CURRENT_FS_REV
+from s3ql.common import (get_backend, get_bucket_home, setup_logging,
+ QuietError, dump_metadata, create_tables,
+ init_tables)
+from s3ql.parse_args import ArgumentParser
+from s3ql.database import Connection
+from s3ql.backends.boto.s3.connection import Location
+from s3ql.backends import s3
+import time
+import tempfile
+
+log = logging.getLogger("mkfs")
+
+def parse_args(args):
+
+ parser = ArgumentParser(
+ description="Initializes an S3QL file system")
+
+ parser.add_homedir()
+ parser.add_debug_modules()
+ parser.add_quiet()
+ parser.add_version()
+ parser.add_storage_url()
+ parser.add_ssl()
+
+ parser.add_argument("--s3-location", default='EU', metavar='<name>',
+ choices=('EU', 'us-west-1', 'us-standard', 'ap-southeast-1'),
+ help="Storage location for new S3 buckets. Allowed values: `EU`, "
+ '`us-west-1`, `ap-southeast-1`, or `us-standard`. '
+ '(default: %(default)s)')
+ parser.add_argument("-L", default='', help="Filesystem label",
+ dest="label", metavar='<name>',)
+ parser.add_argument("--blocksize", type=int, default=10240, metavar='<size>',
+ help="Maximum block size in KB (default: %(default)d)")
+ parser.add_argument("--plain", action="store_true", default=False,
+ help="Create unencrypted file system.")
+ parser.add_argument("--force", action="store_true", default=False,
+ help="Overwrite any existing data.")
+
+ options = parser.parse_args(args)
+
+ if options.s3_location == 'us-standard':
+ options.s3_location = Location.DEFAULT
+
+ if not os.path.exists(options.homedir):
+ os.mkdir(options.homedir, 0700)
+
+ return options
+
+def main(args=None):
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ with get_backend(options.storage_url, options.homedir,
+ options.ssl) as (conn, bucketname):
+ if conn.bucket_exists(bucketname):
+ if not options.force:
+ raise QuietError("Bucket already exists! Use --force to overwrite")
+
+ bucket = conn.get_bucket(bucketname)
+ log.info('Bucket already exists. Purging old file system data..')
+ if not bucket.read_after_delete_consistent():
+ log.info('Please note that the new file system may appear inconsistent\n'
+ 'for a while until the removals have propagated through the backend.')
+ bucket.clear()
+
+ elif isinstance(conn, s3.Connection):
+ bucket = conn.create_bucket(bucketname, location=options.s3_location)
+ else:
+ bucket = conn.create_bucket(bucketname)
+
+ if not options.plain:
+ if sys.stdin.isatty():
+ wrap_pw = getpass("Enter encryption password: ")
+ if not wrap_pw == getpass("Confirm encryption password: "):
+ raise QuietError("Passwords don't match.")
+ else:
+ wrap_pw = sys.stdin.readline().rstrip()
+
+ # Generate data encryption passphrase
+ log.info('Generating random encryption key...')
+ fh = open('/dev/urandom', "rb", 0) # No buffering
+ data_pw = fh.read(32)
+ fh.close()
+
+ bucket.passphrase = wrap_pw
+ bucket['s3ql_passphrase'] = data_pw
+ bucket.passphrase = data_pw
+
+ # Setup database
+ home = get_bucket_home(options.storage_url, options.homedir)
+
+ # There can't be a corresponding bucket, so we can safely delete
+ # these files.
+ if os.path.exists(home + '.db'):
+ os.unlink(home + '.db')
+ if os.path.exists(home + '-cache'):
+ shutil.rmtree(home + '-cache')
+
+ log.info('Creating metadata tables...')
+ db = Connection(home + '.db')
+ create_tables(db)
+ init_tables(db)
+
+ param = dict()
+ param['revision'] = CURRENT_FS_REV
+ param['seq_no'] = 0
+ param['label'] = options.label
+ param['blocksize'] = options.blocksize * 1024
+ param['needs_fsck'] = False
+ param['last_fsck'] = time.time() - time.timezone
+ param['last-modified'] = time.time() - time.timezone
+ bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')
+
+ log.info('Saving metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(fh, db)
+
+ log.info("Compressing & uploading metadata..")
+ fh.seek(0)
+ bucket.store_fh("s3ql_metadata", fh, param)
+ fh.close()
+ pickle.dump(param, open(home + '.params', 'wb'), 2)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/mount.py b/src/s3ql/cli/mount.py
new file mode 100644
index 0000000..65cd63b
--- /dev/null
+++ b/src/s3ql/cli/mount.py
@@ -0,0 +1,458 @@
+'''
+mount.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+# We can't use relative imports because this file may
+# be directly executed.
+import sys
+from s3ql import fs, CURRENT_FS_REV
+from s3ql.daemonize import daemonize
+from s3ql.backends.common import (ChecksumError)
+from s3ql.common import (setup_logging, get_backend, get_bucket_home, get_seq_no,
+ QuietError, unlock_bucket, ExceptionStoringThread,
+ cycle_metadata, dump_metadata, restore_metadata)
+from s3ql.parse_args import ArgumentParser
+from s3ql.database import Connection
+import llfuse
+import tempfile
+import textwrap
+import os
+import stat
+import signal
+import time
+import threading
+import logging
+import cPickle as pickle
+
+#import psyco
+#psyco.profile()
+
+__all__ = [ 'main' ]
+
+log = logging.getLogger("mount")
+
+def main(args=None):
+ '''Mount S3QL file system'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ fuse_opts = get_fuse_opts(options)
+
+ # Save handler so that we can remove it when daemonizing
+ stdout_log_handler = setup_logging(options, 'mount.log')
+
+ if not os.path.exists(options.mountpoint):
+ raise QuietError('Mountpoint does not exist.')
+
+ if options.profile:
+ import cProfile
+ import pstats
+ prof = cProfile.Profile()
+
+ with get_backend(options.storage_url, options.homedir,
+ options.ssl) as (conn, bucketname):
+
+ if not bucketname in conn:
+ raise QuietError("Bucket does not exist.")
+ bucket = conn.get_bucket(bucketname, compression=options.compress)
+
+ # Unlock bucket
+ try:
+ unlock_bucket(options.homedir, options.storage_url, bucket)
+ except ChecksumError:
+ raise QuietError('Checksum error - incorrect password?')
+
+ # Get paths
+ home = get_bucket_home(options.storage_url, options.homedir)
+
+ # Retrieve metadata
+ (param, db) = get_metadata(bucket, home)
+
+ metadata_upload_thread = MetadataUploadThread(bucket, param, db,
+ options.metadata_upload_interval)
+ operations = fs.Operations(bucket, db, cachedir=home + '-cache',
+ blocksize=param['blocksize'],
+ cache_size=options.cachesize * 1024,
+ upload_event=metadata_upload_thread.event,
+ cache_entries=options.max_cache_entries)
+
+ log.info('Mounting filesystem...')
+ llfuse.init(operations, options.mountpoint, fuse_opts)
+ try:
+ if not options.fg:
+ conn.prepare_fork()
+ me = threading.current_thread()
+ for t in threading.enumerate():
+ if t is me:
+ continue
+ log.error('Waiting for thread %s', t)
+ t.join()
+
+ if stdout_log_handler:
+ logging.getLogger().removeHandler(stdout_log_handler)
+ daemonize(options.homedir)
+ conn.finish_fork()
+
+ metadata_upload_thread.start()
+ if options.upstart:
+ os.kill(os.getpid(), signal.SIGSTOP)
+ if options.profile:
+ prof.runcall(llfuse.main, options.single)
+ else:
+ llfuse.main(options.single)
+
+ finally:
+ llfuse.close()
+ metadata_upload_thread.stop()
+
+ db_mtime = metadata_upload_thread.db_mtime
+
+ if operations.encountered_errors:
+ param['needs_fsck'] = True
+ else:
+ param['needs_fsck'] = False
+
+ # Do not update .params yet, dump_metadata() may
+ # fail if the database is corrupted, in which case we
+ # want to force an fsck.
+
+ seq_no = get_seq_no(bucket)
+ if db_mtime == os.stat(home + '.db').st_mtime:
+ log.info('File system unchanged, not uploading metadata.')
+ del bucket['s3ql_seq_no_%d' % param['seq_no']]
+ param['seq_no'] -= 1
+ pickle.dump(param, open(home + '.params', 'wb'), 2)
+ elif seq_no == param['seq_no']:
+ log.info('Saving metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(fh, db)
+ log.info("Compressing & uploading metadata..")
+ cycle_metadata(bucket)
+ fh.seek(0)
+ param['last-modified'] = time.time() - time.timezone
+ bucket.store_fh("s3ql_metadata", fh, param)
+ fh.close()
+ pickle.dump(param, open(home + '.params', 'wb'), 2)
+ else:
+ log.error('Remote metadata is newer than local (%d vs %d), '
+ 'refusing to overwrite!', seq_no, param['seq_no'])
+ log.error('The locally cached metadata will be *lost* the next time the file system '
+ 'is mounted or checked and has therefore been backed up.')
+ for name in (home + '.params', home + '.db'):
+ for i in reversed(range(4)):
+ if os.path.exists(name + '.%d' % i):
+ os.rename(name + '.%d' % i, name + '.%d' % (i+1))
+ os.rename(name, name + '.0')
+
+ db.execute('ANALYZE')
+ db.execute('VACUUM')
+ db.close()
+
+ if options.profile:
+ tmp = tempfile.NamedTemporaryFile()
+ prof.dump_stats(tmp.name)
+ fh = open('s3ql_profile.txt', 'w')
+ p = pstats.Stats(tmp.name, stream=fh)
+ tmp.close()
+ p.strip_dirs()
+ p.sort_stats('cumulative')
+ p.print_stats(50)
+ p.sort_stats('time')
+ p.print_stats(50)
+ fh.close()
+
+ if operations.encountered_errors:
+ raise QuietError('Some errors were encountered while the file system was mounted,\n'
+ 'you should run fsck.s3ql and examine ~/.s3ql/mount.log.')
+
+
+def get_metadata(bucket, home):
+ '''Retrieve metadata
+
+ Checks:
+ - Revision
+ - Unclean mounts
+
+ Locally cached metadata is used if up-to-date.
+ '''
+
+ seq_no = get_seq_no(bucket)
+
+ # Check for cached metadata
+ db = None
+ if os.path.exists(home + '.params'):
+ param = pickle.load(open(home + '.params', 'rb'))
+ if param['seq_no'] < seq_no:
+ log.info('Ignoring locally cached metadata (outdated).')
+ param = bucket.lookup('s3ql_metadata')
+ else:
+ log.info('Using cached metadata.')
+ db = Connection(home + '.db')
+ else:
+ param = bucket.lookup('s3ql_metadata')
+
+ # Check for unclean shutdown
+ if param['seq_no'] < seq_no:
+ if (bucket.read_after_write_consistent() and
+ bucket.read_after_delete_consistent()):
+ raise QuietError(textwrap.fill(textwrap.dedent('''\
+ It appears that the file system is still mounted somewhere else. If this is not
+ the case, the file system may have not been unmounted cleanly and you should try
+ to run fsck on the computer where the file system has been mounted most recently.
+ ''')))
+ else:
+ raise QuietError(textwrap.fill(textwrap.dedent('''\
+ It appears that the file system is still mounted somewhere else. If this is not the
+ case, the file system may have not been unmounted cleanly or the data from the
+ most-recent mount may have not yet propagated through the backend. In the later case,
+ waiting for a while should fix the problem, in the former case you should try to run
+ fsck on the computer where the file system has been mounted most recently.
+ ''')))
+
+ # Check revision
+ if param['revision'] < CURRENT_FS_REV:
+ raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
+ elif param['revision'] > CURRENT_FS_REV:
+ raise QuietError('File system revision too new, please update your '
+ 'S3QL installation.')
+
+ # Check that the fs itself is clean
+ if param['needs_fsck']:
+ raise QuietError("File system damaged or not unmounted cleanly, run fsck!")
+ if (time.time() - time.timezone) - param['last_fsck'] > 60 * 60 * 24 * 31:
+ log.warn('Last file system check was more than 1 month ago, '
+ 'running fsck.s3ql is recommended.')
+
+ # Download metadata
+ if not db:
+ log.info("Downloading & uncompressing metadata...")
+ fh = tempfile.TemporaryFile()
+ bucket.fetch_fh("s3ql_metadata", fh)
+ os.close(os.open(home + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR))
+ db = Connection(home + '.db.tmp', fast_mode=True)
+ fh.seek(0)
+ log.info('Reading metadata...')
+ restore_metadata(fh, db)
+ fh.close()
+ db.close()
+ os.rename(home + '.db.tmp', home + '.db')
+ db = Connection(home + '.db')
+
+ # Increase metadata sequence no
+ param['seq_no'] += 1
+ param['needs_fsck'] = True
+ bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')
+ pickle.dump(param, open(home + '.params', 'wb'), 2)
+
+ return (param, db)
+
+
+def get_fuse_opts(options):
+ '''Return fuse options for given command line options'''
+
+ fuse_opts = [ b"nonempty", b'fsname=%s' % options.storage_url,
+ 'subtype=s3ql' ]
+
+ if options.allow_other:
+ fuse_opts.append(b'allow_other')
+ if options.allow_root:
+ fuse_opts.append(b'allow_root')
+ if options.allow_other or options.allow_root:
+ fuse_opts.append(b'default_permissions')
+
+ return fuse_opts
+
+
+
+def parse_args(args):
+ '''Parse command line'''
+
+ # Parse fstab-style -o options
+ if '--' in args:
+ max_idx = args.index('--')
+ else:
+ max_idx = len(args)
+ if '-o' in args[:max_idx]:
+ pos = args.index('-o')
+ val = args[pos + 1]
+ del args[pos]
+ del args[pos]
+ for opt in reversed(val.split(',')):
+ if '=' in opt:
+ (key, val) = opt.split('=')
+ args.insert(pos, val)
+ args.insert(pos, '--' + key)
+ else:
+ if opt in ('rw', 'defaults', 'auto', 'noauto', 'user', 'nouser', 'dev', 'nodev',
+ 'suid', 'nosuid', 'atime', 'diratime', 'exec', 'noexec', 'group',
+ 'mand', 'nomand', '_netdev', 'nofail', 'norelatime', 'strictatime',
+ 'owner', 'users', 'nobootwait'):
+ continue
+ elif opt == 'ro':
+ raise QuietError('Read-only mounting not supported.')
+ args.insert(pos, '--' + opt)
+
+ parser = ArgumentParser(
+ description="Mount an S3QL file system.")
+
+ parser.add_homedir()
+ parser.add_debug_modules()
+ parser.add_quiet()
+ parser.add_version()
+ parser.add_storage_url()
+ parser.add_ssl()
+
+ parser.add_argument("mountpoint", metavar='<mountpoint>',
+ type=(lambda x: x.rstrip('/')),
+ help='Where to mount the file system')
+
+ parser.add_argument("--cachesize", type=int, default=102400, metavar='<size>',
+ help="Cache size in kb (default: 102400 (100 MB)). Should be at least 10 times "
+ "the blocksize of the filesystem, otherwise an object may be retrieved and "
+ "written several times during a single write() or read() operation.")
+ parser.add_argument("--max-cache-entries", type=int, default=768, metavar='<num>',
+ help="Maximum number of entries in cache (default: %(default)d). "
+ 'Each cache entry requires one file descriptor, so if you increase '
+ 'this number you have to make sure that your process file descriptor '
+ 'limit (as set with `ulimit -n`) is high enough (at least the number '
+ 'of cache entries + 100).')
+ parser.add_argument("--allow-other", action="store_true", default=False, help=
+ 'Normally, only the user who called `mount.s3ql` can access the mount '
+ 'point. This user then also has full access to it, independent of '
+ 'individual file permissions. If the `--allow-other` option is '
+ 'specified, other users can access the mount point as well and '
+ 'individual file permissions are taken into account for all users.')
+ parser.add_argument("--allow-root", action="store_true", default=False,
+ help='Like `--allow-other`, but restrict access to the mounting '
+ 'user and the root user.')
+ parser.add_argument("--fg", action="store_true", default=False,
+ help="Do not daemonize, stay in foreground")
+ parser.add_argument("--single", action="store_true", default=False,
+ help="Run in single threaded mode. If you don't understand this, "
+ "then you don't need it.")
+ parser.add_argument("--upstart", action="store_true", default=False,
+ help="Stay in foreground and raise SIGSTOP once mountpoint "
+ "is up.")
+ parser.add_argument("--profile", action="store_true", default=False,
+ help="Create profiling information. If you don't understand this, "
+ "then you don't need it.")
+ parser.add_argument("--compress", action="store", default='lzma', metavar='<name>',
+ choices=('lzma', 'bzip2', 'zlib', 'none'),
+ help="Compression algorithm to use when storing new data. Allowed "
+ "values: `lzma`, `bzip2`, `zlib`, none. (default: `%(default)s`)")
+ parser.add_argument("--metadata-upload-interval", action="store", type=int,
+ default=24*60*60, metavar='<seconds>',
+ help='Interval in seconds between complete metadata uploads. '
+ 'Set to 0 to disable. Default: 24h.')
+ parser.add_argument("--compression-threads", action="store", type=int,
+ default=1, metavar='<no>',
+ help='Number of parallel compression and encryption threads '
+ 'to use (default: %(default)s).')
+
+ options = parser.parse_args(args)
+
+ if options.allow_other and options.allow_root:
+ parser.error("--allow-other and --allow-root are mutually exclusive.")
+
+ if options.profile:
+ options.single = True
+
+ if options.upstart:
+ options.fg = True
+
+ if options.metadata_upload_interval == 0:
+ options.metadata_upload_interval = None
+
+ if options.compress == 'none':
+ options.compress = None
+
+ if not os.path.exists(options.homedir):
+ os.mkdir(options.homedir, 0700)
+
+ from .. import upload_manager
+ upload_manager.MAX_COMPRESS_THREADS = options.compression_threads
+
+ return options
+
+class MetadataUploadThread(ExceptionStoringThread):
+ '''
+ Periodically commit dirty inodes.
+ '''
+
+
+ def __init__(self, bucket, param, db, interval):
+ super(MetadataUploadThread, self).__init__()
+ self.bucket = bucket
+ self.param = param
+ self.db = db
+ self.interval = interval
+ self.daemon = True
+ self.db_mtime = os.stat(db.file).st_mtime
+ self.event = threading.Event()
+ self.quit = False
+ self.name = 'Metadata-Upload-Thread'
+
+ def run_protected(self):
+ log.debug('MetadataUploadThread: start')
+
+ while True:
+ self.event.wait(self.interval)
+ self.event.clear()
+
+ if self.quit:
+ break
+
+ with llfuse.lock:
+ new_mtime = os.stat(self.db.file).st_mtime
+ if self.db_mtime == new_mtime:
+ log.info('File system unchanged, not uploading metadata.')
+ continue
+
+ log.info('Saving metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(fh, self.db)
+
+ seq_no = get_seq_no(self.bucket)
+ if seq_no != self.param['seq_no']:
+ log.error('Remote metadata is newer than local (%d vs %d), '
+ 'refusing to overwrite!', seq_no, self.param['seq_no'])
+ fh.close()
+ continue
+
+ log.info("Compressing & uploading metadata..")
+ cycle_metadata(self.bucket)
+ fh.seek(0)
+ self.param['last-modified'] = time.time() - time.timezone
+
+ # Temporarily decrease sequence no, this is not the final upload
+ self.param['seq_no'] -= 1
+ self.bucket.store_fh("s3ql_metadata", fh, self.param)
+ self.param['seq_no'] += 1
+
+ fh.close()
+ self.db_mtime = new_mtime
+
+ log.debug('MetadataUploadThread: end')
+
+ def stop(self):
+ '''Wait for thread to finish, raise any occurred exceptions.
+
+ This method releases the global lock.
+ '''
+
+ self.quit = True
+ self.event.set()
+ self.join_and_raise()
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/remove.py b/src/s3ql/cli/remove.py
new file mode 100644
index 0000000..492a9a3
--- /dev/null
+++ b/src/s3ql/cli/remove.py
@@ -0,0 +1,75 @@
+'''
+remove.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import llfuse
+import os
+import logging
+from s3ql.common import (setup_logging, CTRL_NAME, QuietError)
+from s3ql.parse_args import ArgumentParser
+import cPickle as pickle
+import textwrap
+import sys
+
+log = logging.getLogger("remove")
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description=textwrap.dedent('''\
+ Recursively delete files and directories in an S3QL file system,
+ including immutable entries.
+ '''))
+
+ parser.add_debug()
+ parser.add_quiet()
+ parser.add_version()
+
+ parser.add_argument('path', metavar='<path>', nargs='+',
+ help='Directories to remove',
+ type=(lambda x: x.rstrip('/')))
+
+ return parser.parse_args(args)
+
+
+def main(args=None):
+ '''Recursively delete files and directories in an S3QL file system'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ for name in options.path:
+ if not os.path.exists(name):
+ raise QuietError('%r does not exist' % name)
+
+ parent = os.path.dirname(os.path.abspath(name))
+ fstat_p = os.stat(parent)
+ fstat = os.stat(name)
+
+ if fstat_p.st_dev != fstat.st_dev:
+ raise QuietError('%s is a mount point itself.' % name)
+
+ ctrlfile = os.path.join(parent, CTRL_NAME)
+ if not (CTRL_NAME not in llfuse.listdir(parent) and os.path.exists(ctrlfile)):
+ raise QuietError('%s is not on an S3QL file system' % name)
+
+ if os.stat(ctrlfile).st_uid != os.geteuid():
+ raise QuietError('Only root and the mounting user may run s3qlrm.')
+
+ llfuse.setxattr(ctrlfile, 'rmtree', pickle.dumps((fstat_p.st_ino,
+ os.path.basename(name)),
+ pickle.HIGHEST_PROTOCOL))
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/statfs.py b/src/s3ql/cli/statfs.py
new file mode 100644
index 0000000..96eac81
--- /dev/null
+++ b/src/s3ql/cli/statfs.py
@@ -0,0 +1,85 @@
+'''
+statfs.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import llfuse
+import os
+import logging
+from s3ql.common import (CTRL_NAME, QuietError, setup_logging)
+from s3ql.parse_args import ArgumentParser
+import posixpath
+import struct
+import sys
+
+log = logging.getLogger("stat")
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description="Print file system statistics.")
+
+ parser.add_debug()
+ parser.add_quiet()
+ parser.add_version()
+ parser.add_argument("mountpoint", metavar='<mountpoint>',
+ type=(lambda x: x.rstrip('/')),
+ help='Mount point of the file system to examine')
+
+ return parser.parse_args(args)
+
+def main(args=None):
+ '''Print file system statistics to sys.stdout'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+ mountpoint = options.mountpoint
+
+ # Check if it's a mount point
+ if not posixpath.ismount(mountpoint):
+ raise QuietError('%s is not a mount point' % mountpoint)
+
+ # Check if it's an S3QL mountpoint
+ ctrlfile = os.path.join(mountpoint, CTRL_NAME)
+ if not (CTRL_NAME not in llfuse.listdir(mountpoint)
+ and os.path.exists(ctrlfile)):
+ raise QuietError('%s is not a mount point' % mountpoint)
+
+ if os.stat(ctrlfile).st_uid != os.geteuid() and os.geteuid() != 0:
+ raise QuietError('Only root and the mounting user may run s3qlstat.')
+
+ # Use a decent sized buffer, otherwise the statistics have to be
+ # calculated thee(!) times because we need to invoce getxattr
+ # three times.
+ buf = llfuse.getxattr(ctrlfile, b's3qlstat', size_guess=256)
+
+ (entries, blocks, inodes, fs_size, dedup_size,
+ compr_size, db_size) = struct.unpack('QQQQQQQ', buf)
+ p_dedup = dedup_size * 100 / fs_size if fs_size else 0
+ p_compr_1 = compr_size * 100 / fs_size if fs_size else 0
+ p_compr_2 = compr_size * 100 / dedup_size if dedup_size else 0
+ mb= 1024**2
+ print ('Directory entries: %d' % entries,
+ 'Inodes: %d' % inodes,
+ 'Data blocks: %d' % blocks,
+ 'Total data size: %.2f MB' % (fs_size/mb),
+ 'After de-duplication: %.2f MB (%.2f%% of total)'
+ % (dedup_size / mb, p_dedup),
+ 'After compression: %.2f MB (%.2f%% of total, %.2f%% of de-duplicated)'
+ % (compr_size /mb, p_compr_1, p_compr_2),
+ 'Database size: %.2f MB (uncompressed)' % (db_size / mb),
+ '(some values do not take into account not-yet-uploaded dirty blocks in cache)',
+ sep='\n')
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/cli/umount.py b/src/s3ql/cli/umount.py
new file mode 100644
index 0000000..a555d6f
--- /dev/null
+++ b/src/s3ql/cli/umount.py
@@ -0,0 +1,204 @@
+'''
+umount.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import llfuse
+import sys
+import os
+import logging
+from s3ql.common import (CTRL_NAME, QuietError, setup_logging)
+from s3ql.parse_args import ArgumentParser
+import posixpath
+import subprocess
+import time
+import textwrap
+
+log = logging.getLogger("umount")
+DONTWAIT = False
+
+def parse_args(args):
+ '''Parse command line
+
+ This function writes to stdout/stderr and may call `system.exit()` instead
+ of throwing an exception if it encounters errors.
+ '''
+
+ parser = ArgumentParser(
+ description=textwrap.dedent('''\
+ Unmounts an S3QL file system. The command returns only after all data
+ has been uploaded to the backend. If any file system errors occurred
+ while the file system was mounted, a warning message is printed. Note
+ that errors occuring during the unmount (e.g. a failure to upload the
+ metadata) can not be detected and appear only in the logging messages of
+ the mount program.'''))
+
+ parser.add_debug()
+ parser.add_quiet()
+ parser.add_version()
+ parser.add_argument("mountpoint", metavar='<mountpoint>',
+ type=(lambda x: x.rstrip('/')),
+ help='Mount point to un-mount')
+
+ parser.add_argument('--lazy', "-z", action="store_true", default=False,
+ help="Lazy umount. Detaches the file system immediately, even if there "
+ 'are still open files. The data will be uploaded in the background '
+ 'once all open files have been closed.')
+
+ return parser.parse_args(args)
+
+
+def main(args=None):
+ '''Umount S3QL file system
+
+ This function writes to stdout/stderr and calls `system.exit()` instead
+ of returning.
+ '''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+ mountpoint = options.mountpoint
+
+ # Check if it's a mount point
+ if not posixpath.ismount(mountpoint):
+ print('Not a mount point.', file=sys.stderr)
+ sys.exit(1)
+
+ # Check if it's an S3QL mountpoint
+ ctrlfile = os.path.join(mountpoint, CTRL_NAME)
+ if not (CTRL_NAME not in llfuse.listdir(mountpoint)
+ and os.path.exists(ctrlfile)):
+ print('Not an S3QL file system.', file=sys.stderr)
+ sys.exit(1)
+
+ if options.lazy:
+ lazy_umount(mountpoint)
+ else:
+ blocking_umount(mountpoint)
+
+
+def lazy_umount(mountpoint):
+ '''Invoke fusermount -u -z for mountpoint
+
+ This function writes to stdout/stderr and calls `system.exit()`.
+ '''
+
+ found_errors = False
+ if not warn_if_error(mountpoint):
+ found_errors = True
+ umount_cmd = ('fusermount', '-u', '-z', mountpoint)
+ if not subprocess.call(umount_cmd) == 0:
+ found_errors = True
+
+ if found_errors:
+ sys.exit(1)
+
+
+def blocking_umount(mountpoint):
+ '''Invoke fusermount and wait for daemon to terminate.
+
+ This function writes to stdout/stderr and calls `system.exit()`.
+ '''
+
+ found_errors = False
+
+ devnull = open('/dev/null', 'wb')
+ if subprocess.call(['fuser', '-m', mountpoint], stdout=devnull,
+ stderr=devnull) == 0:
+ print('Cannot unmount, the following processes still access the mountpoint:')
+ subprocess.call(['fuser', '-v', '-m', mountpoint], stdout=sys.stdout,
+ stderr=sys.stdout)
+ raise QuietError()
+
+ ctrlfile = os.path.join(mountpoint, CTRL_NAME)
+
+ log.debug('Flushing cache...')
+ llfuse.setxattr(ctrlfile, b's3ql_flushcache!', b'dummy')
+
+ if not warn_if_error(mountpoint):
+ found_errors = True
+
+ # Get pid
+ log.debug('Trying to get pid')
+ pid = int(llfuse.getxattr(ctrlfile, b's3ql_pid?'))
+ log.debug('PID is %d', pid)
+
+ # Get command line to make race conditions less-likely
+ with open('/proc/%d/cmdline' % pid, 'r') as fh:
+ cmdline = fh.readline()
+ log.debug('cmdline is %r', cmdline)
+
+ # Unmount
+ log.debug('Unmounting...')
+ # This seems to be necessary to prevent weird busy errors
+ time.sleep(3)
+ if subprocess.call(['fusermount', '-u', mountpoint]) != 0:
+ sys.exit(1)
+
+ # Wait for daemon
+ log.debug('Uploading metadata...')
+ step = 0.5
+ while True:
+ try:
+ os.kill(pid, 0)
+ except OSError:
+ log.debug('Kill failed, assuming daemon has quit.')
+ break
+
+ # Check that the process did not terminate and the PID
+ # was reused by a different process
+ try:
+ with open('/proc/%d/cmdline' % pid, 'r') as fh:
+ if fh.readline() != cmdline:
+ log.debug('PID still alive, but cmdline changed')
+ # PID must have been reused, original process terminated
+ break
+ else:
+ log.debug('PID still alive and commandline unchanged.')
+ except OSError:
+ # Process must have exited by now
+ log.debug('Reading cmdline failed, assuming daemon has quit.')
+ break
+
+ if DONTWAIT: # for testing
+ break
+
+ # Process still exists, we wait
+ log.debug('Daemon seems to be alive, waiting...')
+ time.sleep(step)
+ if step < 10:
+ step *= 2
+
+ if found_errors:
+ sys.exit(1)
+
+
+def warn_if_error(mountpoint):
+ '''Check if file system encountered any errors
+
+ If there were errors, a warning is printed to stdout and the
+ function returns False.
+ '''
+
+ log.debug('Trying to get error status')
+ ctrlfile = os.path.join(mountpoint, CTRL_NAME)
+ status = llfuse.getxattr(ctrlfile, 's3ql_errors?')
+
+ if status != 'no errors':
+ print('Some errors occurred while the file system was mounted.\n'
+ 'You should examine the log files and run fsck before mounting the\n'
+ 'file system again.', file=sys.stderr)
+ return False
+ else:
+ return True
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/common.py b/src/s3ql/common.py
new file mode 100644
index 0000000..13b7ef5
--- /dev/null
+++ b/src/s3ql/common.py
@@ -0,0 +1,664 @@
+'''
+common.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from getpass import getpass
+from time import sleep
+import hashlib
+import os
+import stat
+import sys
+import threading
+import logging.handlers
+import traceback
+import time
+import re
+import cPickle as pickle
+from contextlib import contextmanager
+from llfuse import ROOT_INODE
+from .backends.common import NoSuchObject
+
+__all__ = ["get_bucket_home", 'sha256_fh', 'add_stdout_logging',
+ "get_credentials", "get_dbfile", "inode_for_path", "get_path",
+ "ROOT_INODE", "ExceptionStoringThread", 'retry', 'LoggerFilter',
+ "EmbeddedException", 'CTRL_NAME', 'CTRL_INODE', 'unlock_bucket',
+ 'QuietError', 'get_backend', 'add_file_logging', 'setup_excepthook',
+ 'cycle_metadata', 'restore_metadata', 'dump_metadata',
+ 'setup_logging', 'AsyncFn', 'init_tables', 'create_indices',
+ 'create_tables', 'get_seq_no' ]
+
+
+AUTHINFO_BACKEND_PATTERN = r'^backend\s+(\S+)\s+machine\s+(\S+)\s+login\s+(\S+)\s+password\s+(.+)$'
+AUTHINFO_BUCKET_PATTERN = r'^storage-url\s+(\S+)\s+password\s+(.+)$'
+
+log = logging.getLogger('common')
+
+def setup_logging(options, logfile=None):
+ root_logger = logging.getLogger()
+ if root_logger.handlers:
+ log.debug("Logging already initialized.")
+ return
+
+ stdout_handler = add_stdout_logging(options.quiet)
+ if logfile:
+ debug_handler = add_file_logging(os.path.join(options.homedir, logfile))
+ else:
+ debug_handler = stdout_handler
+ setup_excepthook()
+
+ if options.debug:
+ root_logger.setLevel(logging.DEBUG)
+ debug_handler.setLevel(logging.NOTSET)
+ if 'all' not in options.debug:
+ # Adding the filter to the root logger has no effect.
+ debug_handler.addFilter(LoggerFilter(options.debug, logging.INFO))
+ logging.disable(logging.NOTSET)
+ else:
+ root_logger.setLevel(logging.INFO)
+ logging.disable(logging.DEBUG)
+
+ return stdout_handler
+
+
+class LoggerFilter(object):
+ """
+ For use with the logging module as a message filter.
+
+ This filter accepts all messages which have at least the specified
+ priority *or* come from a configured list of loggers.
+ """
+
+ def __init__(self, acceptnames, acceptlevel):
+ """Initializes a Filter object"""
+
+ self.acceptlevel = acceptlevel
+ self.acceptnames = [ x.lower() for x in acceptnames ]
+
+ def filter(self, record):
+ '''Determine if the log message should be printed'''
+
+ if record.levelno >= self.acceptlevel:
+ return True
+
+ if record.name.lower() in self.acceptnames:
+ return True
+
+ return False
+
+def add_stdout_logging(quiet=False):
+ '''Add stdout logging handler to root logger'''
+
+ root_logger = logging.getLogger()
+ formatter = logging.Formatter('%(message)s')
+ handler = logging.StreamHandler()
+ handler.setFormatter(formatter)
+ if quiet:
+ handler.setLevel(logging.WARN)
+ else:
+ handler.setLevel(logging.INFO)
+ root_logger.addHandler(handler)
+ return handler
+
+
+def add_file_logging(logfile):
+
+ root_logger = logging.getLogger()
+ formatter = logging.Formatter('%(asctime)s.%(msecs)03d [%(process)s] %(threadName)s: '
+ '[%(name)s] %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
+ handler = logging.handlers.RotatingFileHandler(logfile, maxBytes=1024**2,
+ backupCount=5)
+ handler.setFormatter(formatter)
+ root_logger.addHandler(handler)
+ return handler
+
+
+@contextmanager
+def get_backend(storage_url, homedir, use_ssl):
+ '''Return backend connection and bucket name
+
+ This is a context manager, since some connections need to be cleaned
+ up properly.
+ '''
+
+ from .backends import s3, local, ftp
+
+ if storage_url.startswith('local://'):
+ conn = local.Connection()
+ bucketname = storage_url[len('local://'):]
+
+ elif storage_url.startswith('s3://'):
+ (login, password) = get_backend_credentials(homedir, 's3', None)
+ conn = s3.Connection(login, password, use_ssl)
+ bucketname = storage_url[len('s3://'):]
+
+ elif storage_url.startswith('s3rr://'):
+ log.warn('Warning: Using S3 reduced redundancy storage (S3) is *not* recommended!')
+ (login, password) = get_backend_credentials(homedir, 's3', None)
+ conn = s3.Connection(login, password, use_ssl, reduced_redundancy=True)
+ bucketname = storage_url[len('s3rr://'):]
+
+ else:
+ pat = r'^([a-z]+)://([a-zA-Z0-9.-]+)(?::([0-9]+))?(/[a-zA-Z0-9./_-]+)$'
+ match = re.match(pat, storage_url)
+ if not match:
+ raise QuietError('Invalid storage url: %r' % storage_url)
+ (backend, host, port, bucketname) = match.groups()
+ (login, password) = get_backend_credentials(homedir, backend, host)
+
+ if backend == 'ftp' and not use_ssl:
+ conn = ftp.Connection(host, port, login, password)
+ elif backend == 'ftps':
+ conn = ftp.TLSConnection(host, port, login, password)
+ elif backend == 'sftp':
+ from .backends import sftp
+ conn = sftp.Connection(host, port, login, password)
+ else:
+ raise QuietError('Unknown backend: %s' % backend)
+
+ try:
+ yield (conn, bucketname)
+ finally:
+ conn.close()
+
+def get_seq_no(bucket):
+ '''Get current metadata sequence number'''
+
+ seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ]
+ if not seq_nos:
+ raise QuietError('Old file system revision, please run `s3qladm upgrade` first.')
+ seq_no = max(seq_nos)
+ for i in [ x for x in seq_nos if x < seq_no - 10 ]:
+ try:
+ del bucket['s3ql_seq_no_%d' % i ]
+ except NoSuchObject:
+ pass # Key list may not be up to date
+
+ return seq_no
+
+def cycle_metadata(bucket):
+ from .backends.common import UnsupportedError
+
+ for i in reversed(range(10)):
+ if "s3ql_metadata_bak_%d" % i in bucket:
+ try:
+ bucket.rename("s3ql_metadata_bak_%d" % i, "s3ql_metadata_bak_%d" % (i + 1))
+ except UnsupportedError:
+ bucket.copy("s3ql_metadata_bak_%d" % i, "s3ql_metadata_bak_%d" % (i + 1))
+
+ try:
+ bucket.rename("s3ql_metadata", "s3ql_metadata_bak_0")
+ except UnsupportedError:
+ bucket.copy("s3ql_metadata", "s3ql_metadata_bak_0")
+
+
+def unlock_bucket(homedir, storage_url, bucket):
+ '''Ask for passphrase if bucket requires one'''
+
+ if 's3ql_passphrase' not in bucket:
+ return
+
+ # Try to read from file
+ keyfile = os.path.join(homedir, 'authinfo')
+ wrap_pw = None
+
+ if os.path.isfile(keyfile):
+ mode = os.stat(keyfile).st_mode
+ if mode & (stat.S_IRGRP | stat.S_IROTH):
+ raise QuietError("%s has insecure permissions, aborting." % keyfile)
+
+ fh = open(keyfile, "r")
+ for line in fh:
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ if re.match(AUTHINFO_BACKEND_PATTERN, line):
+ continue
+ res = re.match(AUTHINFO_BUCKET_PATTERN, line)
+ if not res:
+ log.warn('Cannot parse line in %s:\n %s', keyfile, line)
+ continue
+
+ if storage_url == res.group(1):
+ wrap_pw = res.group(2)
+ log.info('Using encryption password from %s', keyfile)
+ break
+
+ # Otherwise from stdin
+ if wrap_pw is None:
+ if sys.stdin.isatty():
+ wrap_pw = getpass("Enter bucket encryption passphrase: ")
+ else:
+ wrap_pw = sys.stdin.readline().rstrip()
+
+ bucket.passphrase = wrap_pw
+ data_pw = bucket['s3ql_passphrase']
+ bucket.passphrase = data_pw
+
+
+def dump_metadata(ofh, conn):
+ pickler = pickle.Pickler(ofh, 2)
+ data_start = 2048
+ bufsize = 256
+ buf = range(bufsize)
+ tables_to_dump = [('inodes', 'id'),
+ ('contents', 'name, parent_inode'),
+ ('ext_attributes', 'inode, name'),
+ ('objects', 'id'),
+ ('blocks', 'inode, blockno')]
+
+ columns = dict()
+ for (table, _) in tables_to_dump:
+ columns[table] = list()
+ for row in conn.query('PRAGMA table_info(%s)' % table):
+ columns[table].append(row[1])
+
+ ofh.seek(data_start)
+ sizes = dict()
+ for (table, order) in tables_to_dump:
+ log.info('Saving %s' % table)
+ pickler.clear_memo()
+ sizes[table] = 0
+ i = 0
+ for row in conn.query('SELECT * FROM %s ORDER BY %s' % (table, order)):
+ buf[i] = row
+ i += 1
+ if i == bufsize:
+ pickler.dump(buf)
+ pickler.clear_memo()
+ sizes[table] += 1
+ i = 0
+
+ if i != 0:
+ pickler.dump(buf[:i])
+ sizes[table] += 1
+
+ ofh.seek(0)
+ pickler.dump((data_start, tables_to_dump, sizes, columns))
+ assert ofh.tell() < data_start
+
+def restore_metadata(ifh, conn):
+
+ unpickler = pickle.Unpickler(ifh)
+ (data_start, to_dump, sizes, columns) = unpickler.load()
+ ifh.seek(data_start)
+ create_tables(conn)
+ for (table, _) in to_dump:
+ log.info('Loading %s', table)
+ col_str = ', '.join(columns[table])
+ val_str = ', '.join('?' for _ in columns[table])
+ sql_str = 'INSERT INTO %s (%s) VALUES(%s)' % (table, col_str, val_str)
+ for _ in xrange(sizes[table]):
+ buf = unpickler.load()
+ for row in buf:
+ conn.execute(sql_str, row)
+
+ create_indices(conn)
+ conn.execute('ANALYZE')
+
+class QuietError(Exception):
+ '''
+ QuietError is the base class for exceptions that should not result
+ in a stack trace being printed.
+
+ It is typically used for exceptions that are the result of the user
+ supplying invalid input data. The exception argument should be a
+ string containing sufficient information about the problem.
+ '''
+
+ def __init__(self, msg=''):
+ super(QuietError, self).__init__()
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+def setup_excepthook():
+ '''Modify sys.excepthook to log exceptions
+
+ Also makes sure that exceptions derived from `QuietException`
+ do not result in stacktraces.
+ '''
+
+ def excepthook(type_, val, tb):
+ root_logger = logging.getLogger()
+ if isinstance(val, QuietError):
+ root_logger.error(val.msg)
+ else:
+ root_logger.error('Uncaught top-level exception',
+ exc_info=(type_, val, tb))
+
+ sys.excepthook = excepthook
+
+def inode_for_path(path, conn):
+ """Return inode of directory entry at `path`
+
+ Raises `KeyError` if the path does not exist.
+ """
+ from .database import NoSuchRowError
+
+ if not isinstance(path, bytes):
+ raise TypeError('path must be of type bytes')
+
+ # Remove leading and trailing /
+ path = path.lstrip(b"/").rstrip(b"/")
+
+ # Traverse
+ inode = ROOT_INODE
+ for el in path.split(b'/'):
+ try:
+ inode = conn.get_val("SELECT inode FROM contents WHERE name=? AND parent_inode=?",
+ (el, inode))
+ except NoSuchRowError:
+ raise KeyError('Path %s does not exist' % path)
+
+ return inode
+
+
+def get_path(id_, conn, name=None):
+ """Return a full path for inode `id_`.
+
+ If `name` is specified, it is appended at the very end of the
+ path (useful if looking up the path for file name with parent
+ inode).
+ """
+
+ if name is None:
+ path = list()
+ else:
+ if not isinstance(name, bytes):
+ raise TypeError('name must be of type bytes')
+ path = [ name ]
+
+ maxdepth = 255
+ while id_ != ROOT_INODE:
+ # This can be ambigious if directories are hardlinked
+ (name2, id_) = conn.get_row("SELECT name, parent_inode FROM contents WHERE inode=? LIMIT 1",
+ (id_,))
+ path.append(name2)
+ maxdepth -= 1
+ if maxdepth == 0:
+ raise RuntimeError('Failed to resolve name "%s" at inode %d to path',
+ name, id_)
+
+ path.append(b'')
+ path.reverse()
+
+ return b'/'.join(path)
+
+
+def _escape(s):
+ '''Escape '/', '=' and '\0' in s'''
+
+ s = s.replace('=', '=3D')
+ s = s.replace('/', '=2F')
+ s = s.replace('\0', '=00')
+
+ return s
+
+def get_bucket_home(storage_url, homedir):
+ if not os.path.exists(homedir):
+ os.mkdir(homedir)
+ return os.path.join(homedir, _escape(storage_url))
+
+
+def get_backend_credentials(homedir, backend, host):
+ """Get credentials for given backend and host"""
+
+ # Try to read from file
+ keyfile = os.path.join(homedir, 'authinfo')
+
+ if os.path.isfile(keyfile):
+ mode = os.stat(keyfile).st_mode
+ if mode & (stat.S_IRGRP | stat.S_IROTH):
+ raise QuietError("%s has insecure permissions, aborting." % keyfile)
+
+ fh = open(keyfile, "r")
+ for line in fh:
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ if re.match(AUTHINFO_BUCKET_PATTERN, line):
+ continue
+ res = re.match(AUTHINFO_BACKEND_PATTERN, line)
+ if not res:
+ log.warn('Cannot parse line in %s:\n %s', keyfile, line)
+ continue
+
+ if backend == res.group(1) and (host is None or host == res.group(2)):
+ log.info('Using backend credentials from %s', keyfile)
+ return res.group(3, 4)
+
+ # Otherwise from stdin
+ if sys.stdin.isatty():
+ if host:
+ print("Enter backend login for %s: " % host, end='')
+ else:
+ print("Enter backend login: ", end='')
+ key = sys.stdin.readline().rstrip()
+
+ if sys.stdin.isatty():
+ if host:
+ pw = getpass("Enter backend password for %s: " % host)
+ else:
+ pw = getpass("Enter backend password: ")
+ else:
+ pw = sys.stdin.readline().rstrip()
+
+ return (key, pw)
+
+def retry(timeout, fn, *a, **kw):
+ """Wait for fn(*a, **kw) to return True.
+
+ If the return value of fn() returns something True, this value
+ is returned. Otherwise, the function is called repeatedly for
+ `timeout` seconds. If the timeout is reached, `TimeoutError` is
+ raised.
+ """
+
+ step = 0.2
+ waited = 0
+ while waited < timeout:
+ ret = fn(*a, **kw)
+ if ret:
+ return ret
+ sleep(step)
+ waited += step
+ if step < waited / 30:
+ step *= 2
+
+ raise TimeoutError()
+
+class TimeoutError(Exception):
+ '''Raised by `retry()` when a timeout is reached.'''
+
+ pass
+
+# Name and inode of the special s3ql control file
+CTRL_NAME = b'.__s3ql__ctrl__'
+CTRL_INODE = 2
+
+class ExceptionStoringThread(threading.Thread):
+ def __init__(self):
+ super(ExceptionStoringThread, self).__init__()
+ self._exc_info = None
+ self._joined = False
+
+ def run_protected(self):
+ pass
+
+ def run(self):
+ try:
+ self.run_protected()
+ except:
+ # This creates a circular reference chain
+ self._exc_info = sys.exc_info()
+
+ def join_get_exc(self):
+ self._joined = True
+ self.join()
+ return self._exc_info
+
+ def join_and_raise(self):
+ '''Wait for the thread to finish, raise any occurred exceptions'''
+
+ self._joined = True
+ if self.is_alive():
+ self.join()
+
+ if self._exc_info is not None:
+ # Break reference chain
+ exc_info = self._exc_info
+ self._exc_info = None
+ raise EmbeddedException(exc_info, self.name)
+
+ def __del__(self):
+ if not self._joined:
+ raise RuntimeError("ExceptionStoringThread instance was destroyed "
+ "without calling join_and_raise()!")
+
+
+class AsyncFn(ExceptionStoringThread):
+ def __init__(self, fn, *args, **kwargs):
+ super(AsyncFn, self).__init__()
+ self.target = fn
+ self.args = args
+ self.kwargs = kwargs
+
+ def run_protected(self):
+ self.target(*self.args, **self.kwargs)
+
+class EmbeddedException(Exception):
+ '''Encapsulates an exception that happened in a different thread
+ '''
+
+ def __init__(self, exc_info, threadname):
+ super(EmbeddedException, self).__init__()
+ self.exc_info = exc_info
+ self.threadname = threadname
+
+ log.error('Thread %s terminated with exception:\n%s',
+ self.threadname, ''.join(traceback.format_exception(*self.exc_info)))
+
+ def __str__(self):
+ return ''.join(['caused by an exception in thread %s.\n' % self.threadname,
+ 'Original/inner traceback (most recent call last): \n' ] +
+ traceback.format_exception(*self.exc_info))
+
+
+def sha256_fh(fh):
+ fh.seek(0)
+ sha = hashlib.sha256()
+
+ while True:
+ buf = fh.read(128 * 1024)
+ if not buf:
+ break
+ sha.update(buf)
+
+ return sha.digest()
+
+def init_tables(conn):
+ # Insert root directory
+ timestamp = time.time() - time.timezone
+ conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (ROOT_INODE, stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
+ os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
+
+ # Insert control inode, the actual values don't matter that much
+ conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (CTRL_INODE, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, timestamp, timestamp, timestamp, 42))
+
+ # Insert lost+found directory
+ inode = conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,
+ os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
+ conn.execute("INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)",
+ (b"lost+found", inode, ROOT_INODE))
+
+def create_tables(conn):
+ # Table with filesystem metadata
+ # The number of links `refcount` to an inode can in theory
+ # be determined from the `contents` table. However, managing
+ # this separately should be significantly faster (the information
+ # is required for every getattr!)
+ conn.execute("""
+ CREATE TABLE inodes (
+ -- id has to specified *exactly* as follows to become
+ -- an alias for the rowid.
+ id INTEGER PRIMARY KEY,
+ uid INT NOT NULL,
+ gid INT NOT NULL,
+ mode INT NOT NULL,
+ mtime REAL NOT NULL,
+ atime REAL NOT NULL,
+ ctime REAL NOT NULL,
+ refcount INT NOT NULL,
+ target BLOB(256) ,
+ size INT NOT NULL DEFAULT 0,
+ rdev INT NOT NULL DEFAULT 0,
+ locked BOOLEAN NOT NULL DEFAULT 0
+ )
+ """)
+
+ # Table of filesystem objects
+ # id is used by readdir() to restart at the correct
+ # position
+ conn.execute("""
+ CREATE TABLE contents (
+ rowid INTEGER PRIMARY KEY AUTOINCREMENT,
+ name BLOB(256) NOT NULL,
+ inode INT NOT NULL REFERENCES inodes(id),
+ parent_inode INT NOT NULL REFERENCES inodes(id),
+
+ UNIQUE (name, parent_inode)
+ )""")
+
+ # Extended attributes
+ conn.execute("""
+ CREATE TABLE ext_attributes (
+ inode INTEGER NOT NULL REFERENCES inodes(id),
+ name BLOB NOT NULL,
+ value BLOB NOT NULL,
+
+ PRIMARY KEY (inode, name)
+ )""")
+
+ # Refcount is included for performance reasons
+ conn.execute("""
+ CREATE TABLE objects (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ refcount INT NOT NULL,
+ hash BLOB(16) UNIQUE,
+ size INT NOT NULL,
+ compr_size INT
+ )""")
+
+
+ # Maps blocks to objects
+ conn.execute("""
+ CREATE TABLE blocks (
+ inode INTEGER NOT NULL REFERENCES inodes(id),
+ blockno INT NOT NULL,
+ obj_id INTEGER NOT NULL REFERENCES objects(id),
+
+ PRIMARY KEY (inode, blockno)
+ )""")
+
+def create_indices(conn):
+ conn.execute('CREATE INDEX IF NOT EXISTS ix_contents_parent_inode ON contents(parent_inode)')
+ conn.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)')
+ conn.execute('CREATE INDEX IF NOT EXISTS ix_ext_attributes_inode ON ext_attributes(inode)')
+ conn.execute('CREATE INDEX IF NOT EXISTS ix_objects_hash ON objects(hash)')
+ conn.execute('CREATE INDEX IF NOT EXISTS ix_blocks_obj_id ON blocks(obj_id)')
+ conn.execute('CREATE INDEX IF NOT EXISTS ix_blocks_inode ON blocks(inode)')
diff --git a/src/s3ql/daemonize.py b/src/s3ql/daemonize.py
new file mode 100644
index 0000000..fbbee88
--- /dev/null
+++ b/src/s3ql/daemonize.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+
+'''
+daemonize.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+
+The functions in this file are based on the python-daemon module by Ben Finney
+<ben+python@benfinney.id.au>.
+
+The reason for not simply using the module instead is that it does a lot of staff that we don't need (not
+a real problem) and some stuff that we must not do (the real problem).
+
+This main issue is that python-daemon unconditionally closes all open file descriptors. We don't want
+this for S3QL, because we have already opened the database and log files when we daemonize. I think this
+is good design, because it allows us to give a meaningful error message to the user if these files
+cannot be opened (if we open them after daemonizing, the user will only see a vanishing daemon process
+without any indication what went wrong).
+
+According to “Advanced Programming in the Unix Environment”, the point of closing all open file
+descriptors is only to "prevent the daemon from holding open any descriptors that it may have inherited
+from its parent (which could be a shell or some other process)". In this case the user will have to live
+with that.
+'''
+
+from __future__ import division, print_function
+
+import os
+import sys
+import logging
+
+log = logging.getLogger('daemonize')
+
+__all__ = [ 'daemonize' ]
+
+
+def daemonize(workdir='/'):
+ '''Daemonize the process'''
+
+ os.chdir(workdir)
+
+ detach_process_context()
+
+ redirect_stream(sys.stdin, None)
+ redirect_stream(sys.stdout, None)
+ redirect_stream(sys.stderr, None)
+
+
+def detach_process_context():
+ """ Detach the process context from parent and session.
+
+ Detach from the parent process and session group, allowing the
+ parent to exit while this process continues running.
+
+ Reference: “Advanced Programming in the Unix Environment”,
+ section 13.3, by W. Richard Stevens, published 1993 by
+ Addison-Wesley.
+ """
+
+ # Protected member
+ #pylint: disable=W0212
+
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+
+ os.setsid()
+
+ pid = os.fork()
+ if pid > 0:
+ log.info('Daemonizing, new PID is %d', pid)
+ os._exit(0)
+
+
+def redirect_stream(system_stream, target_stream):
+ """ Redirect a system stream to a specified file.
+
+ `system_stream` is a standard system stream such as
+ ``sys.stdout``. `target_stream` is an open file object that
+ should replace the corresponding system stream object.
+
+ If `target_stream` is ``None``, defaults to opening the
+ operating system's null device and using its file descriptor.
+
+ """
+ if target_stream is None:
+ target_fd = os.open(os.devnull, os.O_RDWR)
+ else:
+ target_fd = target_stream.fileno()
+ os.dup2(target_fd, system_stream.fileno())
+
+
diff --git a/src/s3ql/database.py b/src/s3ql/database.py
new file mode 100644
index 0000000..012e030
--- /dev/null
+++ b/src/s3ql/database.py
@@ -0,0 +1,260 @@
+'''
+database.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+
+
+Module Attributes:
+-----------
+
+:initsql: SQL commands that are executed whenever a new
+ connection is created.
+
+'''
+
+from __future__ import division, print_function
+
+import logging
+import apsw
+import os
+import types
+from .common import QuietError
+
+__all__ = ['Connection', 'NoUniqueValueError', 'NoSuchRowError' ]
+
+log = logging.getLogger("database")
+
+sqlite_ver = tuple([ int(x) for x in apsw.sqlitelibversion().split('.') ])
+if sqlite_ver < (3, 7, 0):
+ raise QuietError('SQLite version too old, must be 3.7.0 or newer!\n')
+
+initsql = ('PRAGMA foreign_keys = OFF',
+ 'PRAGMA locking_mode = EXCLUSIVE',
+ 'PRAGMA recursize_triggers = on',
+ 'PRAGMA page_size = 4096',
+ 'PRAGMA wal_autocheckpoint = 25000',
+ 'PRAGMA temp_store = FILE',
+ 'PRAGMA legacy_file_format = off',
+ )
+
+class Connection(object):
+ '''
+ This class wraps an APSW connection object. It should be used instead of any
+ native APSW cursors.
+
+ It provides methods to directly execute SQL commands and creates apsw
+ cursors dynamically.
+
+ Instances are not thread safe. They can be passed between threads,
+ but must not be called concurrently.
+
+ Instances also takes care of converting bytes objects into buffer
+ objects and back, so that they are stored as BLOBS in the database. If you
+ want to store TEXT, you need to supply unicode objects instead. (This
+ functionality is only needed under Python 2.x, under Python 3.x the apsw
+ module already behaves in the correct way).
+
+ Attributes
+ ----------
+
+ :conn: apsw connection object
+ :cur: default cursor, to be used for all queries
+ that do not return a ResultSet (i.e., that finalize
+ the cursor when they return)
+ '''
+
+ def __init__(self, file_, fast_mode=False):
+ self.conn = apsw.Connection(file_)
+ self.file = file_
+
+ cur = self.conn.cursor()
+ for s in initsql:
+ cur.execute(s)
+
+ self.fast_mode(fast_mode)
+
+ def fast_mode(self, on):
+ '''Switch to fast, but insecure mode
+
+ In fast mode, SQLite operates as quickly as possible, but
+ application and system crashes may lead to data corruption.
+ '''
+
+ # WAL mode causes trouble with e.g. copy_tree, so we
+ # always disable WAL for now. See
+ # http://article.gmane.org/gmane.comp.db.sqlite.general/65243
+ on = True
+ cur = self.conn.cursor()
+ if on:
+ cur.execute('PRAGMA synchronous = OFF')
+ cur.execute('PRAGMA journal_mode = OFF')
+ else:
+ cur.execute('PRAGMA synchronous = NORMAL')
+ cur.execute('PRAGMA journal_mode = WAL')
+
+
+ def close(self):
+ self.conn.close()
+
+ def get_size(self):
+ '''Return size of database file'''
+
+ if self.file is not None and self.file not in ('', ':memory:'):
+ return os.path.getsize(self.file)
+ else:
+ return 0
+
+ def query(self, *a, **kw):
+ '''Execute the given SQL statement. Return ResultSet.
+
+ Transforms buffer() to bytes() and vice versa. If the
+ caller may not retrieve all rows of the result, it
+ should delete the `ResultSet` object has soon as
+ possible to terminate the SQL statement.
+ '''
+
+ return ResultSet(self._execute(*a, **kw))
+
+ def execute(self, *a, **kw):
+ '''Execute the given SQL statement. Return number of affected rows '''
+
+ self._execute(*a, **kw)
+ return self.changes()
+
+ def rowid(self, *a, **kw):
+ """Execute SQL statement and return last inserted rowid"""
+
+ self._execute(*a, **kw)
+ return self.conn.last_insert_rowid()
+
+ def _execute(self, statement, bindings=None):
+ '''Execute the given SQL statement
+
+ This method takes care of converting str/bytes to buffer
+ objects.
+ '''
+
+ if isinstance(bindings, types.GeneratorType):
+ bindings = list(bindings)
+
+ # Convert bytes to buffer
+ if isinstance(bindings, dict):
+ newbindings = dict()
+ for key in bindings:
+ if isinstance(bindings[key], bytes):
+ newbindings[key] = buffer(bindings[key])
+ else:
+ newbindings[key] = bindings[key]
+ elif isinstance(bindings, (list, tuple)):
+ newbindings = [ (val if not isinstance(val, bytes) else buffer(val))
+ for val in bindings ]
+ else:
+ newbindings = bindings
+
+ if bindings is not None:
+ return self.conn.cursor().execute(statement, newbindings)
+ else:
+ return self.conn.cursor().execute(statement)
+
+ def has_val(self, *a, **kw):
+ '''Execute statement and check if it gives result rows'''
+
+ res = self._execute(*a, **kw)
+ try:
+ res.next()
+ except StopIteration:
+ return False
+ else:
+ # Finish the active SQL statement
+ res.close()
+ return True
+
+ def get_val(self, *a, **kw):
+ """Execute statement and return first element of first result row.
+
+ If there is no result row, raises `NoSuchRowError`. If there is more
+ than one row, raises `NoUniqueValueError`.
+ """
+
+ return self.get_row(*a, **kw)[0]
+
+ def get_list(self, *a, **kw):
+ """Execute select statement and returns result list"""
+
+ return list(self.query(*a, **kw))
+
+ def get_row(self, *a, **kw):
+ """Execute select statement and return first row.
+
+ If there are no result rows, raises `NoSuchRowError`. If there is more
+ than one result row, raises `NoUniqueValueError`.
+ """
+
+ res = ResultSet(self._execute(*a, **kw))
+ try:
+ row = res.next()
+ except StopIteration:
+ raise NoSuchRowError()
+ try:
+ res.next()
+ except StopIteration:
+ # Fine, we only wanted one row
+ pass
+ else:
+ # Finish the active SQL statement
+ res.close()
+ raise NoUniqueValueError()
+
+ return row
+
+ def last_rowid(self):
+ """Return rowid most recently inserted in the current thread"""
+
+ return self.conn.last_insert_rowid()
+
+ def changes(self):
+ """Return number of rows affected by most recent sql statement"""
+
+ return self.conn.changes()
+
+
+class NoUniqueValueError(Exception):
+ '''Raised if get_val or get_row was called with a query
+ that generated more than one result row.
+ '''
+
+ def __str__(self):
+ return 'Query generated more than 1 result row'
+
+
+class NoSuchRowError(Exception):
+ '''Raised if the query did not produce any result rows'''
+
+ def __str__(self):
+ return 'Query produced 0 result rows'
+
+
+class ResultSet(object):
+ '''Iterator over the result of an SQL query
+
+ This class automatically converts back from buffer() to bytes().'''
+
+ def __init__(self, cur):
+ self.cur = cur
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return [ (col if not isinstance(col, buffer) else bytes(col))
+ for col in self.cur.next() ]
+
+ def close(self):
+ '''Finish query transaction'''
+ self.cur.close()
+
+ # Once the ResultSet goes out of scope, the cursor goes out of scope
+ # too (because query() uses a fresh cursor), so we don't have to
+ # take any special precautions to finish the active SQL statement.
diff --git a/src/s3ql/fs.py b/src/s3ql/fs.py
new file mode 100644
index 0000000..3cf55f9
--- /dev/null
+++ b/src/s3ql/fs.py
@@ -0,0 +1,1054 @@
+'''
+fs.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import os
+import errno
+import stat
+import llfuse
+import collections
+import logging
+from .inode_cache import InodeCache, OutOfInodesError
+from .common import (get_path, CTRL_NAME, CTRL_INODE, LoggerFilter,
+ EmbeddedException, ExceptionStoringThread)
+import time
+from .block_cache import BlockCache
+from cStringIO import StringIO
+from .database import NoSuchRowError
+from .backends.common import NoSuchObject, ChecksumError
+import struct
+import cPickle as pickle
+import math
+import threading
+from llfuse import FUSEError, lock, lock_released
+
+__all__ = [ "Server" ]
+
+# standard logger for this module
+log = logging.getLogger("fs")
+
+# For long requests, we force a GIL release in the following interval
+GIL_RELEASE_INTERVAL = 0.05
+
+class Operations(llfuse.Operations):
+ """A full-featured file system for online data storage
+
+ This class implements low-level FUSE operations and is meant to be passed to
+ llfuse.init().
+
+ The ``access`` method of this class always gives full access, independent of
+ file permissions. If the FUSE library is initialized with ``allow_other`` or
+ ``allow_root``, the ``default_permissions`` option should therefore always
+ be passed as well.
+
+
+ Attributes:
+ -----------
+
+ :cache: Holds information about cached blocks
+ :encountered_errors: Is set to true if a request handler raised an exception
+ :inode_cache: A cache for the attributes of the currently opened inodes.
+ :open_inodes: dict of currently opened inodes. This is used to not remove
+ the blocks of unlinked inodes that are still open.
+ :upload_event: If set, triggers a metadata upload
+
+ Multithreading
+ --------------
+
+ All methods are reentrant and may release the global lock while they
+ are running.
+
+
+ Directory Entry Types
+ ----------------------
+
+ S3QL is quite agnostic when it comes to directory entry types. Every
+ directory entry can contain other entries *and* have a associated data,
+ size, link target and device number. However, S3QL makes some provisions for
+ users relying on unlink()/rmdir() to fail for a directory/file. For that, it
+ explicitly checks the st_mode attribute.
+ """
+
+ def handle_exc(self, fn, exc):
+ '''Handle exceptions that occurred during request processing.
+
+ This method marks the file system as needing fsck and logs the
+ error.
+ '''
+ # Unused arguments
+ #pylint: disable=W0613
+
+ log.error("Unexpected internal filesystem error.\n"
+ "Filesystem may be corrupted, run fsck.s3ql as soon as possible!\n"
+ "Please report this bug on http://code.google.com/p/s3ql/.")
+ self.encountered_errors = True
+
+
+ def __init__(self, bucket, db, cachedir, blocksize, cache_size,
+ cache_entries=768, upload_event=None):
+ super(Operations, self).__init__()
+
+ self.encountered_errors = False
+ self.inodes = InodeCache(db)
+ self.db = db
+ self.upload_event = upload_event
+ self.inode_flush_thread = None
+ self.open_inodes = collections.defaultdict(lambda: 0)
+ self.blocksize = blocksize
+ self.cache = BlockCache(bucket, db, cachedir, cache_size, cache_entries)
+
+ def init(self):
+ self.cache.init()
+ self.inode_flush_thread = InodeFlushThread(self.inodes)
+ self.inode_flush_thread.start()
+
+ def destroy(self):
+ try:
+ self.inode_flush_thread.stop()
+ except EmbeddedException:
+ log.error('FlushThread terminated with exception.')
+ self.encountered_errors = True
+
+ self.inodes.destroy()
+ self.cache.destroy()
+
+ if self.cache.encountered_errors:
+ self.encountered_errors = True
+
+ def lookup(self, id_p, name):
+ if name == CTRL_NAME:
+ inode = self.inodes[CTRL_INODE]
+
+ # Make sure the control file is only writable by the user
+ # who mounted the file system (but don't mark inode as dirty)
+ object.__setattr__(inode, 'uid', os.getuid())
+ object.__setattr__(inode, 'gid', os.getgid())
+
+ return inode
+
+ if name == '.':
+ return self.inodes[id_p]
+
+ if name == '..':
+ id_ = self.db.get_val("SELECT parent_inode FROM contents WHERE inode=?",
+ (id_p,))
+ return self.inodes[id_]
+
+ try:
+ id_ = self.db.get_val("SELECT inode FROM contents WHERE name=? AND parent_inode=?",
+ (name, id_p))
+ except NoSuchRowError:
+ raise(llfuse.FUSEError(errno.ENOENT))
+ return self.inodes[id_]
+
+ def getattr(self, id_):
+ if id_ == CTRL_INODE:
+ # Make sure the control file is only writable by the user
+ # who mounted the file system (but don't mark inode as dirty)
+ inode = self.inodes[CTRL_INODE]
+ object.__setattr__(inode, 'uid', os.getuid())
+ object.__setattr__(inode, 'gid', os.getgid())
+ return inode
+
+ try:
+ return self.inodes[id_]
+ except KeyError:
+ # It is possible to get getattr() for an inode that
+ # has just been unlinked()
+ raise FUSEError(errno.ENOENT)
+
+ def readlink(self, id_):
+ timestamp = time.time()
+ inode = self.inodes[id_]
+ if inode.atime < inode.ctime or inode.atime < inode.mtime:
+ inode.atime = timestamp
+ return inode.target
+
+ def opendir(self, id_):
+ return id_
+
+ def check_args(self, args):
+ '''Check and/or supplement fuse mount options'''
+
+ args.append(b'big_writes')
+ args.append('max_write=131072')
+ args.append('no_remote_lock')
+
+ def readdir(self, id_, off):
+ if off == 0:
+ off = -1
+
+ inode = self.inodes[id_]
+ if inode.atime < inode.ctime or inode.atime < inode.mtime:
+ inode.atime = time.time()
+
+ # The ResultSet is automatically deleted
+ # when yield raises GeneratorExit.
+ res = self.db.query("SELECT rowid, name, inode FROM contents WHERE parent_inode=? "
+ 'AND rowid > ? ORDER BY rowid', (id_, off))
+ for (next_, name, cid_) in res:
+ yield (name, self.inodes[cid_], next_)
+
+ def getxattr(self, id_, name):
+ # Handle S3QL commands
+ if id_ == CTRL_INODE:
+ if name == b's3ql_errors?':
+ if self.encountered_errors:
+ return b'errors encountered'
+ else:
+ return b'no errors'
+ elif name == b's3ql_pid?':
+ return bytes(os.getpid())
+
+ elif name == b's3qlstat':
+ return self.extstat()
+
+ raise llfuse.FUSEError(errno.EINVAL)
+
+ else:
+ try:
+ value = self.db.get_val('SELECT value FROM ext_attributes WHERE inode=? AND name=?',
+ (id_, name))
+ except NoSuchRowError:
+ raise llfuse.FUSEError(llfuse.ENOATTR)
+ return value
+
+ def listxattr(self, id_):
+ names = list()
+ for (name,) in self.db.query('SELECT name FROM ext_attributes WHERE inode=?', (id_,)):
+ names.append(name)
+ return names
+
+ def setxattr(self, id_, name, value):
+
+ # Handle S3QL commands
+ if id_ == CTRL_INODE:
+ if name == b's3ql_flushcache!':
+ self.cache.clear()
+ self.cache.upload_manager.join_all()
+ elif name == 'copy':
+ self.copy_tree(*struct.unpack('II', value))
+ elif name == 'upload-meta':
+ if self.upload_event is not None:
+ self.upload_event.set()
+ else:
+ raise llfuse.FUSEError(errno.ENOTTY)
+ elif name == 'lock':
+ self.lock_tree(*pickle.loads(value))
+ elif name == 'rmtree':
+ self.remove_tree(*pickle.loads(value))
+ elif name == 'logging':
+ update_logging(*pickle.loads(value))
+ elif name == 'cachesize':
+ self.cache.max_size = pickle.loads(value)
+ else:
+ raise llfuse.FUSEError(errno.EINVAL)
+ else:
+ if self.inodes[id_].locked:
+ raise FUSEError(errno.EPERM)
+
+ self.db.execute('INSERT OR REPLACE INTO ext_attributes (inode, name, value) '
+ 'VALUES(?, ?, ?)', (id_, name, value))
+ self.inodes[id_].ctime = time.time()
+
+ def removexattr(self, id_, name):
+
+ if self.inodes[id_].locked:
+ raise FUSEError(errno.EPERM)
+
+ changes = self.db.execute('DELETE FROM ext_attributes WHERE inode=? AND name=?',
+ (id_, name))
+ if changes == 0:
+ raise llfuse.FUSEError(llfuse.ENOATTR)
+ self.inodes[id_].ctime = time.time()
+
+ def lock_tree(self, id0):
+ '''Lock directory tree'''
+
+ log.debug('lock_tree(%d): start', id0)
+ queue = [ id0 ]
+ self.inodes[id0].locked = True
+ processed = 0 # Number of steps since last GIL release
+ stamp = time.time() # Time of last GIL release
+ gil_step = 500 # Approx. number of steps between GIL releases
+ while True:
+ id_p = queue.pop()
+ for (id_,) in self.db.query('SELECT inode FROM contents WHERE parent_inode=?',
+ (id_p,)):
+ self.inodes[id_].locked = True
+ processed += 1
+
+ if self.db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)):
+ queue.append(id_)
+
+ if not queue:
+ break
+
+ if processed > gil_step:
+ dt = time.time() - stamp
+ gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 1)
+ log.debug('lock_tree(%d): Adjusting gil_step to %d',
+ id0, gil_step)
+ processed = 0
+ llfuse.lock.yield_()
+ stamp = time.time()
+
+ log.debug('lock_tree(%d): end', id0)
+
+ def remove_tree(self, id_p0, name0):
+ '''Remove directory tree'''
+
+ log.debug('remove_tree(%d, %s): start', id_p0, name0)
+
+ if self.inodes[id_p0].locked:
+ raise FUSEError(errno.EPERM)
+
+ id0 = self.lookup(id_p0, name0).id
+ queue = [ id0 ]
+ processed = 0 # Number of steps since last GIL release
+ stamp = time.time() # Time of last GIL release
+ gil_step = 50 # Approx. number of steps between GIL releases
+ while True:
+ found_subdirs = False
+ id_p = queue.pop()
+ for (name, id_) in self.db.query('SELECT name, inode FROM contents WHERE '
+ 'parent_inode=?', (id_p,)):
+
+ if self.db.has_val('SELECT 1 FROM contents WHERE parent_inode=?',
+ (id_,)):
+ if not found_subdirs:
+ found_subdirs = True
+ queue.append(id_p)
+ queue.append(id_)
+
+ else:
+ llfuse.invalidate_entry(id_p, name)
+ self._remove(id_p, name, id_, force=True)
+
+ processed += 1
+ if processed > gil_step:
+ if not found_subdirs:
+ found_subdirs = True
+ queue.append(id_p)
+ break
+
+ if not queue:
+ llfuse.invalidate_entry(id_p0, name0)
+ self._remove(id_p0, name0, id0, force=True)
+ break
+
+ if processed > gil_step:
+ dt = time.time() - stamp
+ gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 1)
+ log.debug('remove_tree(%d, %s): Adjusting gil_step to %d',
+ id_p0, name0, gil_step)
+ processed = 0
+ llfuse.lock.yield_()
+ stamp = time.time()
+
+ log.debug('remove_tree(%d, %s): end', id_p0, name0)
+
+
+ def copy_tree(self, src_id, target_id):
+ '''Efficiently copy directory tree'''
+
+ log.debug('copy_tree(%d, %d): start', src_id, target_id)
+
+ # To avoid lookups and make code tidier
+ make_inode = self.inodes.create_inode
+ db = self.db
+
+ # First we make sure that all blocks are in the database
+ self.cache.commit()
+ log.debug('copy_tree(%d, %d): committed cache', src_id, target_id)
+
+ # Copy target attributes
+ src_inode = self.inodes[src_id]
+ target_inode = self.inodes[target_id]
+ for attr in ('atime', 'ctime', 'mtime', 'mode', 'uid', 'gid'):
+ setattr(target_inode, attr, getattr(src_inode, attr))
+
+ # We first replicate into a dummy inode
+ timestamp = time.time()
+ tmp = make_inode(mtime=timestamp, ctime=timestamp, atime=timestamp,
+ uid=0, gid=0, mode=0, refcount=0)
+
+ queue = [ (src_id, tmp.id, 0) ]
+ id_cache = dict()
+ processed = 0 # Number of steps since last GIL release
+ stamp = time.time() # Time of last GIL release
+ gil_step = 100 # Approx. number of steps between GIL releases
+ in_transit = set()
+ while queue:
+ (src_id, target_id, rowid) = queue.pop()
+ log.debug('copy_tree(%d, %d): Processing directory (%d, %d, %d)',
+ src_inode.id, target_inode.id, src_id, target_id, rowid)
+ for (name, id_, rowid) in db.query('SELECT name, inode, rowid FROM contents '
+ 'WHERE parent_inode=? AND rowid > ? '
+ 'ORDER BY rowid', (src_id, rowid)):
+
+ if id_ not in id_cache:
+ inode = self.inodes[id_]
+
+ try:
+ inode_new = make_inode(refcount=1, mode=inode.mode, size=inode.size,
+ uid=inode.uid, gid=inode.gid,
+ mtime=inode.mtime, atime=inode.atime,
+ ctime=inode.ctime, target=inode.target,
+ rdev=inode.rdev)
+ except OutOfInodesError:
+ log.warn('Could not find a free inode')
+ raise FUSEError(errno.ENOSPC)
+
+ id_new = inode_new.id
+
+ if inode.refcount != 1:
+ id_cache[id_] = id_new
+
+ for (obj_id, blockno) in db.query('SELECT obj_id, blockno FROM blocks '
+ 'WHERE inode=?', (id_,)):
+ processed += 1
+ db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (id_new, blockno, obj_id))
+ db.execute('UPDATE objects SET refcount=refcount+1 WHERE id=?', (obj_id,))
+
+ if (id_, blockno) in self.cache.upload_manager.in_transit:
+ in_transit.add((id_, blockno))
+
+ if db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)):
+ queue.append((id_, id_new, 0))
+ else:
+ id_new = id_cache[id_]
+ self.inodes[id_new].refcount += 1
+
+ db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
+ (name, id_new, target_id))
+
+ processed += 1
+
+ if processed > gil_step:
+ log.debug('copy_tree(%d, %d): Requeueing (%d, %d, %d) to yield lock',
+ src_inode.id, target_inode.id, src_id, target_id, rowid)
+ queue.append((src_id, target_id, rowid))
+ break
+
+ if processed > gil_step:
+ dt = time.time() - stamp
+ gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 1)
+ log.debug('copy_tree(%d, %d): Adjusting gil_step to %d',
+ src_inode.id, target_inode.id, gil_step)
+ processed = 0
+ llfuse.lock.yield_()
+ stamp = time.time()
+
+ # If we replicated blocks whose associated objects where still in
+ # transit, we have to wait for the transit to complete before we make
+ # the replicated tree visible to the user. Otherwise access to the newly
+ # created blocks will raise a NoSuchObject exception.
+ while in_transit:
+ log.debug('copy_tree(%d, %d): in_transit: %s',
+ src_inode.id, target_inode.id, in_transit)
+ in_transit = [ x for x in in_transit
+ if x in self.cache.upload_manager.in_transit ]
+ if in_transit:
+ self.cache.upload_manager.join_one()
+
+
+ # Make replication visible
+ self.db.execute('UPDATE contents SET parent_inode=? WHERE parent_inode=?',
+ (target_inode.id, tmp.id))
+ del self.inodes[tmp.id]
+ llfuse.invalidate_inode(target_inode.id)
+
+ log.debug('copy_tree(%d, %d): end', src_inode.id, target_inode.id)
+
+
+ def unlink(self, id_p, name):
+ inode = self.lookup(id_p, name)
+
+ if stat.S_ISDIR(inode.mode):
+ raise llfuse.FUSEError(errno.EISDIR)
+
+ self._remove(id_p, name, inode.id)
+
+ def rmdir(self, id_p, name):
+ inode = self.lookup(id_p, name)
+
+ if self.inodes[id_p].locked:
+ raise FUSEError(errno.EPERM)
+
+ if not stat.S_ISDIR(inode.mode):
+ raise llfuse.FUSEError(errno.ENOTDIR)
+
+ self._remove(id_p, name, inode.id)
+
+
+ def _remove(self, id_p, name, id_, force=False):
+ '''Remove entry `name` with parent inode `id_p`
+
+ `id_` must be the inode of `name`. If `force` is True, then
+ the `locked` attribute is ignored.
+
+ This method releases the global lock.
+ '''
+
+ timestamp = time.time()
+
+ # Check that there are no child entries
+ if self.db.has_val("SELECT 1 FROM contents WHERE parent_inode=?", (id_,)):
+ log.debug("Attempted to remove entry with children: %s",
+ get_path(id_p, self.db, name))
+ raise llfuse.FUSEError(errno.ENOTEMPTY)
+
+ if self.inodes[id_p].locked and not force:
+ raise FUSEError(errno.EPERM)
+
+ self.db.execute("DELETE FROM contents WHERE name=? AND parent_inode=?",
+ (name, id_p))
+ inode = self.inodes[id_]
+ inode.refcount -= 1
+ inode.ctime = timestamp
+
+ inode_p = self.inodes[id_p]
+ inode_p.mtime = timestamp
+ inode_p.ctime = timestamp
+
+ if inode.refcount == 0 and id_ not in self.open_inodes:
+ self.cache.remove(id_, 0, int(math.ceil(inode.size / self.blocksize)))
+ # Since the inode is not open, it's not possible that new blocks
+ # get created at this point and we can safely delete the inode
+ self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_,))
+ del self.inodes[id_]
+
+ def symlink(self, id_p, name, target, ctx):
+ mode = (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
+ stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP |
+ stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
+
+ # Unix semantics require the size of a symlink to be the length
+ # of its target. Therefore, we create symlink directory entries
+ # with this size. If the kernel ever learns to open and read
+ # symlinks directly, it will read the corresponding number of \0
+ # bytes.
+ return self._create(id_p, name, mode, ctx, target=target, size=len(target))
+
+ def rename(self, id_p_old, name_old, id_p_new, name_new):
+ if name_new == CTRL_NAME or name_old == CTRL_NAME:
+ log.warn('Attempted to rename s3ql control file (%s -> %s)',
+ get_path(id_p_old, self.db, name_old),
+ get_path(id_p_new, self.db, name_new))
+ raise llfuse.FUSEError(errno.EACCES)
+
+ if (self.inodes[id_p_old].locked
+ or self.inodes[id_p_new].locked):
+ raise FUSEError(errno.EPERM)
+
+ inode_old = self.lookup(id_p_old, name_old)
+
+ try:
+ inode_new = self.lookup(id_p_new, name_new)
+ except llfuse.FUSEError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ else:
+ target_exists = False
+ else:
+ target_exists = True
+
+
+ if target_exists:
+ self._replace(id_p_old, name_old, id_p_new, name_new,
+ inode_old.id, inode_new.id)
+ else:
+ self._rename(id_p_old, name_old, id_p_new, name_new)
+
+
+ def _rename(self, id_p_old, name_old, id_p_new, name_new):
+ timestamp = time.time()
+
+ self.db.execute("UPDATE contents SET name=?, parent_inode=? WHERE name=? "
+ "AND parent_inode=?", (name_new, id_p_new,
+ name_old, id_p_old))
+
+ inode_p_old = self.inodes[id_p_old]
+ inode_p_new = self.inodes[id_p_new]
+ inode_p_old.mtime = timestamp
+ inode_p_new.mtime = timestamp
+ inode_p_old.ctime = timestamp
+ inode_p_new.ctime = timestamp
+
+ def _replace(self, id_p_old, name_old, id_p_new, name_new,
+ id_old, id_new):
+
+ timestamp = time.time()
+
+ if self.db.has_val("SELECT 1 FROM contents WHERE parent_inode=?", (id_new,)):
+ log.info("Attempted to overwrite entry with children: %s",
+ get_path(id_p_new, self.db, name_new))
+ raise llfuse.FUSEError(errno.EINVAL)
+
+ # Replace target
+ self.db.execute("UPDATE contents SET inode=? WHERE name=? AND parent_inode=?",
+ (id_old, name_new, id_p_new))
+
+
+ # Delete old name
+ self.db.execute('DELETE FROM contents WHERE name=? AND parent_inode=?',
+ (name_old, id_p_old))
+
+ inode_new = self.inodes[id_new]
+ inode_new.refcount -= 1
+ inode_new.ctime = timestamp
+
+ inode_p_old = self.inodes[id_p_old]
+ inode_p_old.ctime = timestamp
+ inode_p_old.mtime = timestamp
+
+ inode_p_new = self.inodes[id_p_new]
+ inode_p_new.ctime = timestamp
+ inode_p_new.mtime = timestamp
+
+ if inode_new.refcount == 0 and id_new not in self.open_inodes:
+ self.cache.remove(id_new, 0,
+ int(math.ceil(inode_new.size / self.blocksize)))
+ # Since the inode is not open, it's not possible that new blocks
+ # get created at this point and we can safely delete the inode
+ self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_new,))
+ del self.inodes[id_new]
+
+
+ def link(self, id_, new_id_p, new_name):
+ if new_name == CTRL_NAME or id_ == CTRL_INODE:
+ log.warn('Attempted to create s3ql control file at %s',
+ get_path(new_id_p, self.db, new_name))
+ raise llfuse.FUSEError(errno.EACCES)
+
+ timestamp = time.time()
+ inode_p = self.inodes[new_id_p]
+
+ if inode_p.refcount == 0:
+ log.warn('Attempted to create entry %s with unlinked parent %d',
+ new_name, new_id_p)
+ raise FUSEError(errno.EINVAL)
+
+ if inode_p.locked:
+ raise FUSEError(errno.EPERM)
+
+ inode_p.ctime = timestamp
+ inode_p.mtime = timestamp
+
+ self.db.execute("INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)",
+ (new_name, id_, new_id_p))
+ inode = self.inodes[id_]
+ inode.refcount += 1
+ inode.ctime = timestamp
+
+ return inode
+
+ def setattr(self, id_, attr):
+ """Handles FUSE setattr() requests"""
+
+ inode = self.inodes[id_]
+ timestamp = time.time()
+
+ if inode.locked:
+ raise FUSEError(errno.EPERM)
+
+ if attr.st_size is not None:
+ len_ = attr.st_size
+
+ # Determine blocks to delete
+ last_block = len_ // self.blocksize
+ cutoff = len_ % self.blocksize
+ total_blocks = int(math.ceil(inode.size / self.blocksize))
+
+ # Adjust file size
+ inode.size = len_
+
+ # Delete blocks and truncate last one if required
+ if cutoff == 0:
+ self.cache.remove(id_, last_block, total_blocks)
+ else:
+ self.cache.remove(id_, last_block + 1, total_blocks)
+
+ try:
+ with self.cache.get(id_, last_block) as fh:
+ fh.truncate(cutoff)
+
+ except NoSuchObject as exc:
+ log.warn('Backend lost block %d of inode %d (id %s)!',
+ last_block, id_, exc.key)
+ self.encountered_errors = True
+ raise FUSEError(errno.EIO)
+
+ except ChecksumError as exc:
+ log.warn('Backend returned malformed data for block %d of inode %d (%s)',
+ last_block, id_, exc)
+ raise FUSEError(errno.EIO)
+
+
+ if attr.st_mode is not None:
+ inode.mode = attr.st_mode
+
+ if attr.st_uid is not None:
+ inode.uid = attr.st_uid
+
+ if attr.st_gid is not None:
+ inode.gid = attr.st_gid
+
+ if attr.st_rdev is not None:
+ inode.rdev = attr.st_rdev
+
+ if attr.st_atime is not None:
+ inode.atime = attr.st_atime
+
+ if attr.st_mtime is not None:
+ inode.mtime = attr.st_mtime
+
+ if attr.st_ctime is not None:
+ inode.ctime = attr.st_ctime
+ else:
+ inode.ctime = timestamp
+
+ return inode
+
+ def mknod(self, id_p, name, mode, rdev, ctx):
+ return self._create(id_p, name, mode, ctx, rdev=rdev)
+
+ def mkdir(self, id_p, name, mode, ctx):
+ return self._create(id_p, name, mode, ctx)
+
+ def extstat(self):
+ '''Return extended file system statistics'''
+
+ entries = self.db.get_val("SELECT COUNT(rowid) FROM contents")
+ blocks = self.db.get_val("SELECT COUNT(id) FROM objects")
+ inodes = self.db.get_val("SELECT COUNT(id) FROM inodes")
+ fs_size = self.db.get_val('SELECT SUM(size) FROM inodes') or 0
+ dedup_size = self.db.get_val('SELECT SUM(size) FROM objects') or 0
+ compr_size = self.db.get_val('SELECT SUM(compr_size) FROM objects') or 0
+
+ return struct.pack('QQQQQQQ', entries, blocks, inodes, fs_size, dedup_size,
+ compr_size, self.db.get_size())
+
+
+ def statfs(self):
+ stat_ = llfuse.StatvfsData
+
+ # Get number of blocks & inodes
+ blocks = self.db.get_val("SELECT COUNT(id) FROM objects")
+ inodes = self.db.get_val("SELECT COUNT(id) FROM inodes")
+ size = self.db.get_val('SELECT SUM(size) FROM objects')
+
+ if size is None:
+ size = 0
+
+ # file system block size,
+ # It would be more appropriate to switch f_bsize and f_frsize,
+ # but since df and stat ignore f_frsize, this way we can
+ # export more information
+ stat_.f_bsize = int(size // blocks) if blocks != 0 else self.blocksize
+ stat_.f_frsize = self.blocksize
+
+ # size of fs in f_frsize units
+ # (since backend is supposed to be unlimited, always return a half-full filesystem,
+ # but at least 50 GB)
+ if stat_.f_bsize != 0:
+ total_blocks = int(max(2 * blocks, 50 * 1024 ** 3 // stat_.f_bsize))
+ else:
+ total_blocks = 2 * blocks
+
+ stat_.f_blocks = total_blocks
+ stat_.f_bfree = total_blocks - blocks
+ stat_.f_bavail = total_blocks - blocks # free for non-root
+
+ total_inodes = max(2 * inodes, 50000)
+ stat_.f_files = total_inodes
+ stat_.f_ffree = total_inodes - inodes
+ stat_.f_favail = total_inodes - inodes # free for non-root
+
+ return stat_
+
+ def open(self, id_, flags):
+ if (self.inodes[id_].locked and
+ (flags & os.O_RDWR or flags & os.O_WRONLY)):
+ raise FUSEError(errno.EPERM)
+
+ self.open_inodes[id_] += 1
+ return id_
+
+ def access(self, id_, mode, ctx):
+ '''Check if requesting process has `mode` rights on `inode`.
+
+ This method always returns true, since it should only be called
+ when permission checking is disabled (if permission checking is
+ enabled, the `default_permissions` FUSE option should be set).
+ '''
+ # Yeah, could be a function and has unused arguments
+ #pylint: disable=R0201,W0613
+
+ return True
+
+ def create(self, id_p, name, mode, ctx):
+ inode = self._create(id_p, name, mode, ctx)
+ self.open_inodes[inode.id] += 1
+ return (inode.id, inode)
+
+ def _create(self, id_p, name, mode, ctx, rdev=0, target=None, size=0):
+ if name == CTRL_NAME:
+ log.warn('Attempted to create s3ql control file at %s',
+ get_path(id_p, self.db, name))
+ raise llfuse.FUSEError(errno.EACCES)
+
+ timestamp = time.time()
+ inode_p = self.inodes[id_p]
+
+ if inode_p.locked:
+ raise FUSEError(errno.EPERM)
+
+ if inode_p.refcount == 0:
+ log.warn('Attempted to create entry %s with unlinked parent %d',
+ name, id_p)
+ raise FUSEError(errno.EINVAL)
+ inode_p.mtime = timestamp
+ inode_p.ctime = timestamp
+
+ try:
+ inode = self.inodes.create_inode(mtime=timestamp, ctime=timestamp, atime=timestamp,
+ uid=ctx.uid, gid=ctx.gid, mode=mode, refcount=1,
+ rdev=rdev, target=target, size=size)
+ except OutOfInodesError:
+ log.warn('Could not find a free inode')
+ raise FUSEError(errno.ENOSPC)
+
+ self.db.execute("INSERT INTO contents(name, inode, parent_inode) VALUES(?,?,?)",
+ (name, inode.id, id_p))
+
+ return inode
+
+
+ def read(self, fh, offset, length):
+ '''Read `size` bytes from `fh` at position `off`
+
+ Unless EOF is reached, returns exactly `size` bytes.
+
+ This method releases the global lock while it is running.
+ '''
+ buf = StringIO()
+ inode = self.inodes[fh]
+
+ # Make sure that we don't read beyond the file size. This
+ # should not happen unless direct_io is activated, but it's
+ # cheap and nice for testing.
+ size = inode.size
+ length = min(size - offset, length)
+
+ while length > 0:
+ tmp = self._read(fh, offset, length)
+ buf.write(tmp)
+ length -= len(tmp)
+ offset += len(tmp)
+
+ # Inode may have expired from cache
+ inode = self.inodes[fh]
+
+ if inode.atime < inode.ctime or inode.atime < inode.mtime:
+ inode.atime = time.time()
+
+ return buf.getvalue()
+
+ def _read(self, id_, offset, length):
+ """Reads at the specified position until the end of the block
+
+ This method may return less than `length` bytes if a blocksize
+ boundary is encountered. It may also read beyond the end of
+ the file, filling the buffer with additional null bytes.
+
+ This method releases the global lock while it is running.
+ """
+
+ # Calculate required block
+ blockno = offset // self.blocksize
+ offset_rel = offset - blockno * self.blocksize
+
+ # Don't try to read into the next block
+ if offset_rel + length > self.blocksize:
+ length = self.blocksize - offset_rel
+
+ try:
+ with self.cache.get(id_, blockno) as fh:
+ fh.seek(offset_rel)
+ buf = fh.read(length)
+
+ except NoSuchObject as exc:
+ log.warn('Backend lost block %d of inode %d (id %s)!',
+ blockno, id_, exc.key)
+ self.encountered_errors = True
+ raise FUSEError(errno.EIO)
+
+ except ChecksumError as exc:
+ log.warn('Backend returned malformed data for block %d of inode %d (%s)',
+ blockno, id_, exc)
+ raise FUSEError(errno.EIO)
+
+ if len(buf) == length:
+ return buf
+ else:
+ # If we can't read enough, add null bytes
+ return buf + b"\0" * (length - len(buf))
+
+ def write(self, fh, offset, buf):
+ '''Handle FUSE write requests.
+
+ This method releases the global lock while it is running.
+ '''
+
+ if self.inodes[fh].locked:
+ raise FUSEError(errno.EPERM)
+
+ total = len(buf)
+ minsize = offset + total
+ while buf:
+ written = self._write(fh, offset, buf)
+ offset += written
+ buf = buf[written:]
+
+ # Update file size if changed
+ # Fuse does not ensure that we do not get concurrent write requests,
+ # so we have to be careful not to undo a size extension made by
+ # a concurrent write.
+ timestamp = time.time()
+ inode = self.inodes[fh]
+ inode.size = max(inode.size, minsize)
+ inode.mtime = timestamp
+ inode.ctime = timestamp
+
+ return total
+
+
+ def _write(self, id_, offset, buf):
+ """Write as much as we can.
+
+ May write less bytes than given in `buf`, returns
+ the number of bytes written.
+
+ This method releases the global lock while it is running.
+ """
+
+ # Calculate required block
+ blockno = offset // self.blocksize
+ offset_rel = offset - blockno * self.blocksize
+
+ # Don't try to write into the next block
+ if offset_rel + len(buf) > self.blocksize:
+ buf = buf[:self.blocksize - offset_rel]
+
+ try:
+ with self.cache.get(id_, blockno) as fh:
+ fh.seek(offset_rel)
+ fh.write(buf)
+
+ except NoSuchObject as exc:
+ log.warn('Backend lost block %d of inode %d (id %s)!',
+ blockno, id_, exc.key)
+ self.encountered_errors = True
+ raise FUSEError(errno.EIO)
+
+ except ChecksumError as exc:
+ log.warn('Backend returned malformed data for block %d of inode %d (%s)',
+ blockno, id_, exc)
+ raise FUSEError(errno.EIO)
+
+ return len(buf)
+
+ def fsync(self, fh, datasync):
+ if not datasync:
+ self.inodes.flush_id(fh)
+
+ self.cache.flush(fh)
+
+ def releasedir(self, fh):
+ # Unused argument
+ #pylint: disable=W0613
+ return
+
+ def release(self, fh):
+ self.open_inodes[fh] -= 1
+
+ if self.open_inodes[fh] == 0:
+ del self.open_inodes[fh]
+
+ inode = self.inodes[fh]
+ if inode.refcount == 0:
+ self.cache.remove(inode.id, 0,
+ int(math.ceil(inode.size // self.blocksize)))
+ # Since the inode is not open, it's not possible that new blocks
+ # get created at this point and we can safely delete the in
+ del self.inodes[fh]
+
+
+ # Called for close() calls.
+ def flush(self, fh):
+ pass
+
+ def fsyncdir(self, fh, datasync):
+ if not datasync:
+ self.inodes.flush_id(fh)
+
+def update_logging(level, modules):
+ root_logger = logging.getLogger()
+ if level == logging.DEBUG:
+ logging.disable(logging.NOTSET)
+ for handler in root_logger.handlers:
+ for filter_ in [ f for f in handler.filters if isinstance(f, LoggerFilter) ]:
+ handler.removeFilter(filter_)
+ handler.setLevel(level)
+ if 'all' not in modules:
+ for handler in root_logger.handlers:
+ handler.addFilter(LoggerFilter(modules, logging.INFO))
+
+ else:
+ logging.disable(logging.DEBUG)
+ root_logger.setLevel(level)
+
+
+class InodeFlushThread(ExceptionStoringThread):
+ '''
+ Periodically commit dirty inodes.
+
+ This class uses the llfuse global lock. When calling objects
+ passed in the constructor, the global lock is acquired first.
+ '''
+
+ def __init__(self, cache):
+ super(InodeFlushThread, self).__init__()
+ self.cache = cache
+ self.stop_event = threading.Event()
+ self.name = 'Inode Flush Thread'
+ self.daemon = True
+
+ def run_protected(self):
+ log.debug('FlushThread: start')
+
+ while not self.stop_event.is_set():
+ with lock:
+ self.cache.flush()
+ self.stop_event.wait(5)
+ log.debug('FlushThread: end')
+
+ def stop(self):
+ '''Wait for thread to finish, raise any occurred exceptions.
+
+ This method releases the global lock.
+ '''
+
+ self.stop_event.set()
+ with lock_released:
+ self.join_and_raise()
+
diff --git a/src/s3ql/fsck.py b/src/s3ql/fsck.py
new file mode 100644
index 0000000..3b020ae
--- /dev/null
+++ b/src/s3ql/fsck.py
@@ -0,0 +1,481 @@
+'''
+fsck.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import os
+from os.path import basename
+import stat
+import time
+import logging
+import re
+from .database import NoSuchRowError
+from .backends.common import NoSuchObject
+from .common import (ROOT_INODE, CTRL_INODE, inode_for_path, sha256_fh, get_path)
+
+__all__ = [ "Fsck" ]
+
+log = logging.getLogger("fsck")
+
+S_IFMT = (stat.S_IFDIR | stat.S_IFREG | stat.S_IFSOCK | stat.S_IFBLK |
+ stat.S_IFCHR | stat.S_IFIFO | stat.S_IFLNK)
+
+class Fsck(object):
+
+ def __init__(self, cachedir_, bucket_, param, conn):
+
+ self.cachedir = cachedir_
+ self.bucket = bucket_
+ self.expect_errors = False
+ self.found_errors = False
+ self.uncorrectable_errors = False
+ self.blocksize = param['blocksize']
+ self.conn = conn
+
+ def check(self):
+ """Check file system
+
+ Sets module variable `found_errors`.
+ """
+
+ self.check_cache()
+ self.check_lof()
+ self.check_contents()
+ self.check_loops()
+ self.check_inode_refcount()
+ self.check_inode_sizes()
+ self.check_inode_unix()
+ self.check_obj_refcounts()
+ self.check_keylist()
+
+
+ def log_error(self, *a, **kw):
+ '''Log file system error if not expected'''
+
+ if not self.expect_errors:
+ return log.warn(*a, **kw)
+
+ def check_cache(self):
+ """Commit uncommitted cache files"""
+
+
+ log.info("Checking cached objects...")
+ if not os.path.exists(self.cachedir):
+ return
+
+ for filename in os.listdir(self.cachedir):
+ self.found_errors = True
+
+ match = re.match('^inode_(\\d+)_block_(\\d+)(\\.d)?$', filename)
+ if match:
+ inode = int(match.group(1))
+ blockno = int(match.group(2))
+ dirty = match.group(3) == '.d'
+ else:
+ raise RuntimeError('Strange file in cache directory: %s' % filename)
+
+ if not dirty:
+ self.log_error('Removing cached block %d of inode %d', blockno, inode)
+ os.unlink(os.path.join(self.cachedir, filename))
+ continue
+
+ self.log_error("Committing changed block %d of inode %d to backend",
+ blockno, inode)
+
+ fh = open(os.path.join(self.cachedir, filename), "rb")
+ fh.seek(0, 2)
+ size = fh.tell()
+ fh.seek(0)
+ hash_ = sha256_fh(fh)
+
+ try:
+ obj_id = self.conn.get_val('SELECT id FROM objects WHERE hash=?', (hash_,))
+
+ except NoSuchRowError:
+ obj_id = self.conn.rowid('INSERT INTO objects (refcount, hash, size) VALUES(?, ?, ?)',
+ (1, hash_, size))
+ self.bucket.store_fh('s3ql_data_%d' % obj_id, fh)
+
+ else:
+ # Verify that this object actually exists
+ if self.bucket.contains('s3ql_data_%d' % obj_id):
+ self.conn.execute('UPDATE objects SET refcount=refcount+1 WHERE id=?',
+ (obj_id,))
+ else:
+ # We don't want to delete the vanished object here, because
+ # check_keylist will do a better job and print all affected
+ # inodes. However, we need to reset the hash so that we can
+ # insert the new object.
+ self.conn.execute('UPDATE objects SET hash=NULL WHERE id=?', (obj_id,))
+ obj_id = self.conn.rowid('INSERT INTO objects (refcount, hash, size) VALUES(?, ?, ?)',
+ (1, hash_, size))
+ self.bucket.store_fh('s3ql_data_%d' % obj_id, fh)
+
+ try:
+ old_obj_id = self.conn.get_val('SELECT obj_id FROM blocks WHERE inode=? AND blockno=?',
+ (inode, blockno))
+ except NoSuchRowError:
+ self.conn.execute('INSERT INTO blocks (obj_id, inode, blockno) VALUES(?,?,?)',
+ (obj_id, inode, blockno))
+ else:
+ self.conn.execute('UPDATE blocks SET obj_id=? WHERE inode=? AND blockno=?',
+ (obj_id, inode, blockno))
+
+ refcount = self.conn.get_val('SELECT refcount FROM objects WHERE id=?',
+ (old_obj_id,))
+ if refcount > 1:
+ self.conn.execute('UPDATE objects SET refcount=refcount-1 WHERE id=?',
+ (old_obj_id,))
+ else:
+ # Don't delete yet, maybe it's still referenced
+ pass
+
+
+ fh.close()
+ os.unlink(os.path.join(self.cachedir, filename))
+
+
+ def check_lof(self):
+ """Ensure that there is a lost+found directory"""
+
+ log.info('Checking lost+found...')
+
+ timestamp = time.time() - time.timezone
+ try:
+ inode_l = self.conn.get_val("SELECT inode FROM contents WHERE name=? AND parent_inode=?",
+ (b"lost+found", ROOT_INODE))
+
+ except NoSuchRowError:
+ self.found_errors = True
+ self.log_error("Recreating missing lost+found directory")
+ inode_l = self.conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,
+ os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
+ self.conn.execute("INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)",
+ (b"lost+found", inode_l, ROOT_INODE))
+
+
+ mode = self.conn.get_val('SELECT mode FROM inodes WHERE id=?', (inode_l,))
+ if not stat.S_ISDIR(mode):
+ self.found_errors = True
+ self.log_error('/lost+found is not a directory! Old entry will be saved as '
+ '/lost+found/inode-%s*', inode_l)
+ # We leave the old inode unassociated, so that it will be added
+ # to lost+found later on.
+ inode_l = self.conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,
+ os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 2))
+ self.conn.execute('UPDATE contents SET inode=? WHERE name=? AND parent_inode=?',
+ (inode_l, b"lost+found", ROOT_INODE))
+
+ def check_contents(self):
+ """Check direntry names"""
+
+ log.info('Checking directory entry names...')
+
+ for (name, id_p) in self.conn.query('SELECT name, parent_inode FROM contents '
+ 'WHERE LENGTH(name) > 255'):
+ path = get_path(id_p, self.conn, name)
+ self.log_error('Entry name %s... in %s has more than 255 characters, '
+ 'this could cause problems',
+ name[:40], path[:-len(name)])
+ self.found_errors = True
+
+
+ def check_loops(self):
+ """Ensure that all directories can be reached from root"""
+
+ log.info('Checking directory reachability...')
+
+ self.conn.execute('CREATE TEMPORARY TABLE loopcheck (inode INTEGER PRIMARY KEY, '
+ 'parent_inode INTEGER)')
+ self.conn.execute('CREATE INDEX ix_loopcheck_parent_inode ON loopcheck(parent_inode)')
+ self.conn.execute('INSERT INTO loopcheck (inode, parent_inode) '
+ 'SELECT inode, parent_inode FROM contents JOIN inodes ON inode == id '
+ 'WHERE mode & ? == ?', (S_IFMT, stat.S_IFDIR))
+ self.conn.execute('CREATE TEMPORARY TABLE loopcheck2 (inode INTEGER PRIMARY KEY)')
+ self.conn.execute('INSERT INTO loopcheck2 (inode) SELECT inode FROM loopcheck')
+
+ def delete_tree(inode_p):
+ for (inode,) in self.conn.query("SELECT inode FROM loopcheck WHERE parent_inode=?",
+ (inode_p,)):
+ delete_tree(inode)
+ self.conn.execute('DELETE FROM loopcheck2 WHERE inode=?', (inode_p,))
+
+ delete_tree(ROOT_INODE)
+
+ if self.conn.has_val("SELECT 1 FROM loopcheck2"):
+ self.found_errors = True
+ self.uncorrectable_errors = True
+ self.log_error("Found unreachable filesystem entries!\n"
+ "This problem cannot be corrected automatically yet.")
+
+ self.conn.execute("DROP TABLE loopcheck")
+ self.conn.execute("DROP TABLE loopcheck2")
+
+ def check_inode_sizes(self):
+ """Check if inode sizes agree with blocks"""
+
+ log.info('Checking inodes (sizes)...')
+
+ self.conn.execute('CREATE TEMPORARY TABLE min_sizes '
+ '(id INTEGER PRIMARY KEY, min_size INTEGER NOT NULL)')
+ try:
+ self.conn.execute('''INSERT INTO min_sizes (id, min_size)
+ SELECT inode, MAX(blockno * ? + size)
+ FROM blocks JOIN objects ON obj_id == id
+ GROUP BY inode''',
+ (self.blocksize,))
+
+ self.conn.execute('''
+ CREATE TEMPORARY TABLE wrong_sizes AS
+ SELECT id, size, min_size
+ FROM inodes JOIN min_sizes USING (id)
+ WHERE size < min_size''')
+
+ for (id_, size_old, size) in self.conn.query('SELECT * FROM wrong_sizes'):
+
+ self.found_errors = True
+ self.log_error("Size of inode %d (%s) does not agree with number of blocks, "
+ "setting from %d to %d",
+ id_, get_path(id_, self.conn), size_old, size)
+ self.conn.execute("UPDATE inodes SET size=? WHERE id=?", (size, id_))
+ finally:
+ self.conn.execute('DROP TABLE min_sizes')
+ self.conn.execute('DROP TABLE IF EXISTS wrong_sizes')
+
+ def check_inode_refcount(self):
+ """Check inode reference counters"""
+
+ log.info('Checking inodes (refcounts)...')
+
+ self.conn.execute('CREATE TEMPORARY TABLE refcounts '
+ '(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)')
+ try:
+ self.conn.execute('INSERT INTO refcounts (id, refcount) '
+ 'SELECT inode, COUNT(name) FROM contents GROUP BY inode')
+
+ self.conn.execute('''
+ CREATE TEMPORARY TABLE wrong_refcounts AS
+ SELECT id, refcounts.refcount, inodes.refcount
+ FROM inodes LEFT JOIN refcounts USING (id)
+ WHERE inodes.refcount != refcounts.refcount
+ OR refcounts.refcount IS NULL''')
+
+ for (id_, cnt, cnt_old) in self.conn.query('SELECT * FROM wrong_refcounts'):
+ # No checks for root and control
+ if id_ in (ROOT_INODE, CTRL_INODE):
+ continue
+
+ self.found_errors = True
+ if cnt is None:
+ (id_p, name) = self.resolve_free(b"/lost+found", b"inode-%d" % id_)
+ self.log_error("Inode %d not referenced, adding as /lost+found/%s", id_, name)
+ self.conn.execute("INSERT INTO contents (name, inode, parent_inode) "
+ "VALUES (?,?,?)", (basename(name), id_, id_p))
+ self.conn.execute("UPDATE inodes SET refcount=? WHERE id=?", (1, id_))
+
+ else:
+ self.log_error("Inode %d (%s) has wrong reference count, setting from %d to %d",
+ id_, get_path(id_, self.conn), cnt_old, cnt)
+ self.conn.execute("UPDATE inodes SET refcount=? WHERE id=?", (cnt, id_))
+ finally:
+ self.conn.execute('DROP TABLE refcounts')
+ self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts')
+
+
+ def check_inode_unix(self):
+ """Check inode attributes for agreement with UNIX conventions
+
+ This means:
+ - Only directories should have child entries
+ - Only regular files should have data blocks and a size
+ - Only symlinks should have a target
+ - Only devices should have a device number
+ - symlink size is length of target
+
+ Note that none of this is enforced by S3QL. However, as long
+ as S3QL only communicates with the UNIX FUSE module, none of
+ the above should happen (and if it does, it would probably
+ confuse the system quite a lot).
+ """
+
+ log.info('Checking inodes (types)...')
+
+ for (inode, mode, size, target, rdev) \
+ in self.conn.query("SELECT id, mode, size, target, rdev FROM inodes"):
+
+ if stat.S_ISLNK(mode) and size != len(target):
+ self.found_errors = True
+ self.log_error('Inode %d (%s): symlink size (%d) does not agree with target '
+ 'length (%d). This is probably going to confuse your system!',
+ inode, get_path(inode, self.conn), size, len(target))
+
+ if size != 0 and (not stat.S_ISREG(mode)
+ and not stat.S_ISLNK(mode)
+ and not stat.S_ISDIR(mode)):
+ self.found_errors = True
+ self.log_error('Inode %d (%s) is not regular file but has non-zero size. '
+ 'This is may confuse your system!',
+ inode, get_path(inode, self.conn))
+
+ if target is not None and not stat.S_ISLNK(mode):
+ self.found_errors = True
+ self.log_error('Inode %d (%s) is not symlink but has symlink target. '
+ 'This is probably going to confuse your system!',
+ inode, get_path(inode, self.conn))
+
+ if rdev != 0 and not (stat.S_ISBLK(mode) or stat.S_ISCHR(mode)):
+ self.found_errors = True
+ self.log_error('Inode %d (%s) is not device but has device number. '
+ 'This is probably going to confuse your system!',
+ inode, get_path(inode, self.conn))
+
+ has_children = self.conn.has_val('SELECT 1 FROM contents WHERE parent_inode=? LIMIT 1',
+ (inode,))
+ if has_children and not stat.S_ISDIR(mode):
+ self.found_errors = True
+ self.log_error('Inode %d (%s) is not a directory but has child entries. '
+ 'This is probably going to confuse your system!',
+ inode, get_path(inode, self.conn))
+
+ has_blocks = self.conn.has_val('SELECT 1 FROM blocks WHERE inode=? LIMIT 1',
+ (inode,))
+ if has_blocks and not stat.S_ISREG(mode):
+ self.found_errors = True
+ self.log_error('Inode %d (%s) is not a regular file but has data blocks. '
+ 'This is probably going to confuse your system!',
+ inode, get_path(inode, self.conn))
+
+
+ def check_obj_refcounts(self):
+ """Check object reference counts"""
+
+ log.info('Checking object reference counts...')
+
+ self.conn.execute('CREATE TEMPORARY TABLE refcounts '
+ '(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)')
+ try:
+ self.conn.execute('INSERT INTO refcounts (id, refcount) '
+ 'SELECT obj_id, COUNT(inode) FROM blocks GROUP BY obj_id')
+
+ self.conn.execute('''
+ CREATE TEMPORARY TABLE wrong_refcounts AS
+ SELECT id, refcounts.refcount, objects.refcount
+ FROM objects LEFT JOIN refcounts USING (id)
+ WHERE objects.refcount != refcounts.refcount
+ OR refcounts.refcount IS NULL''')
+
+ for (id_, cnt, cnt_old) in self.conn.query('SELECT * FROM wrong_refcounts'):
+ self.log_error("Object %s has invalid refcount, setting from %d to %d",
+ id_, cnt_old, cnt or 0)
+ self.found_errors = True
+ if cnt is not None:
+ self.conn.execute("UPDATE objects SET refcount=? WHERE id=?",
+ (cnt, id_))
+ else:
+ # Orphaned object will be picked up by check_keylist
+ self.conn.execute('DELETE FROM objects WHERE id=?', (id_,))
+ finally:
+ self.conn.execute('DROP TABLE refcounts')
+ self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts')
+
+ def check_keylist(self):
+ """Check the list of objects.
+
+ Checks that:
+ - all objects are referred in the object table
+ - all objects in the object table exist
+ - object has correct hash
+ """
+
+ log.info('Checking object list...')
+
+ lof_id = self.conn.get_val("SELECT inode FROM contents WHERE name=? AND parent_inode=?",
+ (b"lost+found", ROOT_INODE))
+
+ # We use this table to keep track of the objects that we have
+ # seen
+ self.conn.execute("CREATE TEMP TABLE obj_ids (id INTEGER PRIMARY KEY)")
+ try:
+ for (i, obj_name) in enumerate(self.bucket.list('s3ql_data_')):
+
+ if i != 0 and i % 5000 == 0:
+ log.info('..processed %d objects so far..', i)
+
+ # We only bother with data objects
+ obj_id = int(obj_name[10:])
+
+ self.conn.execute('INSERT INTO obj_ids VALUES(?)', (obj_id,))
+
+ for (obj_id,) in self.conn.query('SELECT id FROM obj_ids '
+ 'EXCEPT SELECT id FROM objects'):
+ self.found_errors = True
+ self.log_error("Deleting spurious object %d", obj_id)
+ try:
+ del self.bucket['s3ql_data_%d' % obj_id]
+ except NoSuchObject:
+ if self.bucket.read_after_write_consistent():
+ raise
+
+ self.conn.execute('CREATE TEMPORARY TABLE missing AS '
+ 'SELECT id FROM objects EXCEPT SELECT id FROM obj_ids')
+ for (obj_id,) in self.conn.query('SELECT * FROM missing'):
+ self.found_errors = True
+ self.log_error("object %s only exists in table but not in bucket, deleting", obj_id)
+ self.log_error("The following files may lack data and have been moved to /lost+found:")
+ for (id_,) in self.conn.query('SELECT inode FROM blocks WHERE obj_id=?', (obj_id,)):
+ for (name, id_p) in self.conn.query('SELECT name, parent_inode FROM contents '
+ 'WHERE inode=?', (id_,)):
+ path = get_path(id_p, self.conn, name)
+ self.log_error(path)
+ (_, newname) = self.resolve_free(b"/lost+found",
+ path[1:].replace('_', '__').replace('/', '_'))
+
+ self.conn.execute('UPDATE contents SET name=?, parent_inode=? '
+ 'WHERE name=? AND parent_inode=?',
+ (newname, lof_id, name, id_p))
+
+ self.conn.execute("DELETE FROM blocks WHERE obj_id=?", (obj_id,))
+ self.conn.execute("DELETE FROM objects WHERE id=?", (obj_id,))
+ finally:
+ self.conn.execute('DROP TABLE obj_ids')
+ self.conn.execute('DROP TABLE IF EXISTS missing')
+
+
+ def resolve_free(self, path, name):
+ '''Return parent inode and name of an unused directory entry
+
+ The directory entry will be in `path`. If an entry `name` already
+ exists there, we append a numeric suffix.
+ '''
+
+ if not isinstance(path, bytes):
+ raise TypeError('path must be of type bytes')
+
+ inode_p = inode_for_path(path, self.conn)
+
+ i = 0
+ newname = name
+ name += b'-'
+ try:
+ while True:
+ self.conn.get_val("SELECT inode FROM contents WHERE name=? AND parent_inode=?",
+ (newname, inode_p))
+ i += 1
+ newname = name + bytes(i)
+
+ except NoSuchRowError:
+ pass
+
+ # Debugging http://code.google.com/p/s3ql/issues/detail?id=217
+ assert len(newname) < 256
+
+ return (inode_p, newname)
diff --git a/src/s3ql/inode_cache.py b/src/s3ql/inode_cache.py
new file mode 100644
index 0000000..149947f
--- /dev/null
+++ b/src/s3ql/inode_cache.py
@@ -0,0 +1,286 @@
+'''
+inode_cache.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import time
+import logging
+from random import randint
+import apsw
+from .database import NoSuchRowError
+
+__all__ = [ 'InodeCache', 'OutOfInodesError' ]
+log = logging.getLogger('inode_cache')
+
+CACHE_SIZE = 100
+ATTRIBUTES = ('mode', 'refcount', 'uid', 'gid', 'size', 'locked',
+ 'rdev', 'target', 'atime', 'mtime', 'ctime', 'id')
+ATTRIBUTE_STR = ', '.join(ATTRIBUTES)
+UPDATE_ATTRS = ('mode', 'refcount', 'uid', 'gid', 'size', 'locked',
+ 'rdev', 'target', 'atime', 'mtime', 'ctime')
+UPDATE_STR = ', '.join('%s=?' % x for x in UPDATE_ATTRS)
+TIMEZONE = time.timezone
+
+class _Inode(object):
+ '''An inode with its attributes'''
+
+ __slots__ = ATTRIBUTES + ('dirty',)
+
+ def __init__(self):
+ super(_Inode, self).__init__()
+ self.dirty = False
+
+ # This allows access to all st_* attributes, even if they're
+ # not defined in the table
+ def __getattr__(self, key):
+ if key == 'st_nlink':
+ return self.refcount
+
+ elif key == 'st_blocks':
+ return self.size // 512
+
+ elif key == 'st_ino':
+ return self.id
+
+ # Timeout, can effectively be infinite since attribute changes
+ # are only triggered by the kernel's own requests
+ elif key == 'attr_timeout' or key == 'entry_timeout':
+ return 3600
+
+ # We want our blocksize for IO as large as possible to get large
+ # write requests
+ elif key == 'st_blksize':
+ return 128 * 1024
+
+ # Our inodes are already unique
+ elif key == 'generation':
+ return 1
+
+ elif key.startswith('st_'):
+ return getattr(self, key[3:])
+
+ def __eq__(self, other):
+ if not isinstance(other, _Inode):
+ return NotImplemented
+
+ for attr in ATTRIBUTES:
+ if getattr(self, attr) != getattr(other, attr):
+ return False
+
+ return True
+
+
+ def copy(self):
+ copy = _Inode()
+
+ for attr in ATTRIBUTES:
+ setattr(copy, attr, getattr(self, attr))
+
+ return copy
+
+ def __setattr__(self, name, value):
+ if name != 'dirty':
+ object.__setattr__(self, 'dirty', True)
+ object.__setattr__(self, name, value)
+
+
+class InodeCache(object):
+ '''
+ This class maps the `inode` SQL table to a dict, caching the rows.
+
+ If the cache is full and a row is not in the cache, the least-recently
+ retrieved row is deleted from the cache. This means that accessing
+ cached rows will *not* change the order of their expiration.
+
+ Attributes:
+ -----------
+ :attrs: inode indexed dict holding the attributes
+ :cached_rows: list of the inodes that are in cache
+ :pos: position of the most recently retrieved inode in
+ 'cached_rows'.
+
+ Notes
+ -----
+
+ Callers should keep in mind that the changes of the returned inode
+ object will only be written to the database if the inode is still
+ in the cache when its attributes are updated: it is possible for
+ the caller to keep a reference to an inode when that
+ inode has already been expired from the InodeCache. Modifications
+ to this inode object will be lost(!).
+
+ Callers should therefore use the returned inode objects only
+ as long as they can guarantee that no other calls to InodeCache
+ are made that may result in expiration of inodes from the cache.
+
+ Moreover, the caller must make sure that he does not call
+ InodeCache methods while a database transaction is active that
+ may be rolled back. This would rollback database updates
+ performed by InodeCache, which are generally for inodes that
+ are expired from the cache and therefore *not* directly related
+ to the effects of the current method call.
+ '''
+
+ def __init__(self, db):
+ self.attrs = dict()
+ self.cached_rows = list()
+ self.db = db
+
+ # Fill the cache with dummy data, so that we don't have to
+ # check if the cache is full or not (it will always be full)
+ for _ in xrange(CACHE_SIZE):
+ self.cached_rows.append(None)
+
+ self.pos = 0
+
+
+ def __delitem__(self, inode):
+ if self.db.execute('DELETE FROM inodes WHERE id=?', (inode,)) != 1:
+ raise KeyError('No such inode')
+ try:
+ del self.attrs[inode]
+ except KeyError:
+ pass
+
+ def __getitem__(self, id_):
+ try:
+ return self.attrs[id_]
+ except KeyError:
+ try:
+ inode = self.getattr(id_)
+ except NoSuchRowError:
+ raise KeyError('No such inode: %d' % id_)
+
+ old_id = self.cached_rows[self.pos]
+ self.cached_rows[self.pos] = id_
+ self.pos = (self.pos + 1) % CACHE_SIZE
+ if old_id is not None:
+ try:
+ old_inode = self.attrs[old_id]
+ except KeyError:
+ # We may have deleted that inode
+ pass
+ else:
+ del self.attrs[old_id]
+ self.setattr(old_inode)
+ self.attrs[id_] = inode
+ return inode
+
+ def getattr(self, id_):
+ attrs = self.db.get_row("SELECT %s FROM inodes WHERE id=? " % ATTRIBUTE_STR,
+ (id_,))
+ inode = _Inode()
+
+ for (i, id_) in enumerate(ATTRIBUTES):
+ setattr(inode, id_, attrs[i])
+
+ # Convert to local time
+ # Pylint does not detect the attributes
+ #pylint: disable=E1101
+ inode.atime += TIMEZONE
+ inode.mtime += TIMEZONE
+ inode.ctime += TIMEZONE
+
+ inode.dirty = False
+
+ return inode
+
+ def create_inode(self, **kw):
+
+ inode = _Inode()
+
+ for (key, val) in kw.iteritems():
+ setattr(inode, key, val)
+
+ for i in ('atime', 'ctime', 'mtime'):
+ kw[i] -= TIMEZONE
+
+ init_attrs = [ x for x in ATTRIBUTES if x in kw ]
+
+ # We want to restrict inodes to 2^32, and we do not want to immediately
+ # reuse deleted inodes (so that the lack of generation numbers isn't too
+ # likely to cause problems with NFS)
+ sql = ('INSERT INTO inodes (id, %s) VALUES(?, %s)'
+ % (', '.join(init_attrs), ','.join('?' for _ in init_attrs)))
+ bindings = [ kw[x] for x in init_attrs ]
+ for _ in range(100):
+ # _Inode.id is not explicitly defined
+ #pylint: disable-msg=W0201
+ inode.id = randint(0, 2 ** 32 - 1)
+ try:
+ self.db.execute(sql, [inode.id] + bindings)
+ except apsw.ConstraintError:
+ pass
+ else:
+ break
+ else:
+ raise OutOfInodesError()
+
+
+ return self[inode.id]
+
+
+ def setattr(self, inode):
+ if not inode.dirty:
+ return
+ inode.dirty = False
+ inode = inode.copy()
+
+ inode.atime -= TIMEZONE
+ inode.mtime -= TIMEZONE
+ inode.ctime -= TIMEZONE
+
+ self.db.execute("UPDATE inodes SET %s WHERE id=?" % UPDATE_STR,
+ [ getattr(inode, x) for x in UPDATE_ATTRS ] + [inode.id])
+
+ def flush_id(self, id_):
+ if id_ in self.attrs:
+ self.setattr(self.attrs[id_])
+
+ def destroy(self):
+ '''Finalize cache'''
+
+ for i in xrange(len(self.cached_rows)):
+ id_ = self.cached_rows[i]
+ self.cached_rows[i] = None
+ if id_ is not None:
+ try:
+ inode = self.attrs[id_]
+ except KeyError:
+ # We may have deleted that inode
+ pass
+ else:
+ del self.attrs[id_]
+ self.setattr(inode)
+
+ def flush(self):
+ '''Flush all entries to database'''
+
+ # We don't want to use dict.itervalues() since
+ # the dict may change while we iterate
+ for i in xrange(len(self.cached_rows)):
+ id_ = self.cached_rows[i]
+ if id_ is not None:
+ try:
+ inode = self.attrs[id_]
+ except KeyError:
+ # We may have deleted that inode
+ pass
+ else:
+ self.setattr(inode)
+
+ def __del__(self):
+ if self.attrs:
+ raise RuntimeError('InodeCache instance was destroyed without calling close()')
+
+
+
+class OutOfInodesError(Exception):
+
+ def __str__(self):
+ return 'Could not find free rowid in inode table'
diff --git a/src/s3ql/multi_lock.py b/src/s3ql/multi_lock.py
new file mode 100644
index 0000000..865c16c
--- /dev/null
+++ b/src/s3ql/multi_lock.py
@@ -0,0 +1,85 @@
+'''
+multi_lock.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import threading
+import logging
+from contextlib import contextmanager
+import time
+
+__all__ = [ "MultiLock" ]
+
+log = logging.getLogger("MultiLock")
+
+
+# For debugging, can be set to impose an artifical delay when
+# obtaining the lock. Introduced to debug a very
+# timing-critical bug.
+FAKEDELAY = False
+
+class MultiLock(object):
+ """Provides locking for multiple objects.
+
+ This class provides locking for a dynamically changing set of objects:
+ The `acquire` and `release` methods have an additional argument, the
+ locking key. Only locks with the same key can actually see each other,
+ so that several threads can hold locks with different locking keys
+ at the same time.
+
+ MultiLock instances can be used with `with` statements as
+
+ lock = MultiLock()
+ with lock(key):
+ pass
+
+ Note that it is actually possible for one thread to release a lock
+ that has been obtained by a different thread. This is not a bug,
+ but a feature used in `BlockCache._expire_parallel`.
+ """
+
+ def __init__(self):
+ self.locked_keys = set()
+ self.cond = threading.Condition()
+
+
+ @contextmanager
+ def __call__(self, *key):
+ self.acquire(*key)
+ try:
+ yield
+ finally:
+ self.release(*key)
+
+ def acquire(self, *key):
+ '''Acquire lock for given key'''
+
+ if FAKEDELAY:
+ time.sleep(FAKEDELAY)
+
+ # Lock set of lockedkeys (global lock)
+ with self.cond:
+
+ # Wait for given key becoming unused
+ while key in self.locked_keys:
+ self.cond.wait()
+
+ # Mark it as used (local lock)
+ self.locked_keys.add(key)
+
+ def release(self, *key):
+ """Release lock on given key"""
+
+ # Lock set of locked keys (global lock)
+ with self.cond:
+
+ # Mark key as free (release local lock)
+ self.locked_keys.remove(key)
+
+ # Notify other threads
+ self.cond.notifyAll()
diff --git a/src/s3ql/ordered_dict.py b/src/s3ql/ordered_dict.py
new file mode 100644
index 0000000..e52cb69
--- /dev/null
+++ b/src/s3ql/ordered_dict.py
@@ -0,0 +1,219 @@
+'''
+ordered_dict.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import threading
+import collections
+
+
+__all__ = [ "OrderedDict" ]
+
+class OrderedDictElement(object):
+ """An element in an OrderedDict
+
+
+ Attributes:
+ -----------
+
+ :next: Next element in list (closer to tail)
+ :prev: Previous element in list (closer to head)
+ :key: Dict key of the element
+ :value: Dict value of the element
+ """
+
+ __slots__ = [ "next", "prev", "key", "value" ]
+
+ def __init__(self, key, value, next_=None, prev=None):
+ self.key = key
+ self.value = value
+ self.next = next_
+ self.prev = prev
+
+class HeadSentinel(object):
+ '''Sentinel that marks the head of a linked list
+ '''
+
+ __slots__ = [ 'next' ]
+
+ def __init__(self, next_=None):
+ self.next = next_
+
+ def __str__(self):
+ return '<head sentinel>'
+
+class TailSentinel(object):
+ '''Sentinel that marks the tail of a linked list
+ '''
+
+ __slots__ = [ 'prev' ]
+
+ def __init__(self, prev=None):
+ self.prev = prev
+
+ def __str__(self):
+ return '<tail sentinel>'
+
+class OrderedDict(collections.MutableMapping):
+ """Implements an ordered dictionary
+
+ The order is maintained by wrapping dictionary elements in
+ OrderedDictElement objects which are kept in a linked list
+ and a dict at the same time.
+
+ When new elements are added to the ordered dictionary by an
+ obj[key] = val assignment, the new element is added at the
+ beginning of the list. If the key already exists, the position
+ of the element does not change.
+
+ All methods are threadsafe and may be called concurrently
+ from several threads.
+
+ Attributes:
+ -----------
+ :data: Backend dict object that holds OrderedDictElement instances
+ :lock: Global lock, required when rearranging the order
+ :head: First element in list
+ :tail: Last element in list
+
+ """
+
+ def __init__(self):
+ self.data = dict()
+ self.lock = threading.Lock()
+ self.head = HeadSentinel()
+ self.tail = TailSentinel(self.head)
+ self.head.next = self.tail
+
+ def __setitem__(self, key, value):
+ with self.lock:
+ if key in self.data:
+ self.data[key].value = value
+ else:
+ el = OrderedDictElement(key, value, next_=self.head.next, prev=self.head)
+ self.head.next.prev = el
+ self.head.next = el
+ self.data[key] = el
+
+ def __delitem__(self, key):
+ with self.lock:
+ el = self.data.pop(key) # exception can be passed on
+ el.prev.next = el.next
+ el.next.prev = el.prev
+
+ def __getitem__(self, key):
+ return self.data[key].value
+
+ def __len__(self):
+ return len(self.data)
+
+ def __iter__(self):
+ cur = self.head.next
+ while cur is not self.tail:
+ yield cur.key
+ cur = cur.next
+
+ def __reversed__(self):
+ cur = self.tail.prev
+ while cur is not self.head:
+ yield cur.key
+ cur = cur.prev
+
+ def values_rev(self):
+ '''Iterator over all values, starting from tail'''
+ cur = self.tail.prev
+ while cur is not self.head:
+ yield cur.value
+ cur = cur.prev
+
+ def __contains__(self, key):
+ return key in self.data
+
+ def to_head(self, key):
+ """Moves `key` to the head in the ordering
+ """
+ with self.lock:
+ el = self.data[key]
+ # Splice out
+ el.prev.next = el.next
+ el.next.prev = el.prev
+
+ # Insert back at front
+ el.next = self.head.next
+ el.prev = self.head
+
+ self.head.next.prev = el
+ self.head.next = el
+
+ def to_tail(self, key):
+ """Moves `key` to the end in the ordering
+ """
+ with self.lock:
+ el = self.data[key]
+ # Splice out
+ el.prev.next = el.next
+ el.next.prev = el.prev
+
+ # Insert back at end
+ el.next = self.tail
+ el.prev = self.tail.prev
+
+ self.tail.prev.next = el
+ self.tail.prev = el
+
+ def pop_last(self):
+ """Fetch and remove last element
+ """
+ with self.lock:
+ el = self.tail.prev
+ if el is self.head:
+ raise IndexError()
+
+ del self.data[el.key]
+ self.tail.prev = el.prev
+ el.prev.next = self.tail
+
+ return el.value
+
+ def get_last(self):
+ """Fetch last element"""
+ with self.lock:
+ if self.tail.prev is self.head:
+ raise IndexError()
+
+ return self.tail.prev.value
+
+ def pop_first(self):
+ """Fetch and remove first element"""
+ with self.lock:
+ el = self.head.next
+ if el is self.tail:
+ raise IndexError
+ del self.data[el.key]
+ self.head.next = el.next
+ el.next.prev = self.head
+
+ return el.value
+
+ def get_first(self):
+ """Fetch first element"""
+
+ with self.lock:
+ if self.head.next is self.tail:
+ raise IndexError()
+
+ return self.head.next.value
+
+ def clear(self):
+ '''Delete all elements'''
+
+ with self.lock:
+ self.data.clear()
+ self.head = HeadSentinel()
+ self.tail = TailSentinel(self.head)
+ self.head.next = self.tail
diff --git a/src/s3ql/parse_args.py b/src/s3ql/parse_args.py
new file mode 100644
index 0000000..22a0d26
--- /dev/null
+++ b/src/s3ql/parse_args.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+#
+# argparse.py - this file is part of S3QL (http://s3ql.googlecode.com)
+#
+# Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+#
+# This program can be distributed under the terms of the GNU LGPL.
+#
+'''
+This module provides a customized ArgumentParser class. Differences
+are:
+
+ * a --version argument is added by default
+
+ * convenience functions are available for adding --quiet,
+ --debug and --homedir options.
+
+ * instead of the usage string one can pass a usage list. The first
+ element will be prefixed with ``usage: `` as usual. Additional
+ elements will be printed on separate lines and prefixed with
+ `` or: ``.
+
+ * When element of an usage list, the ``DEFAULT_USAGE`` object
+ will be replaced by the automatically generated usage message,
+ excluding any --help arguments.
+
+ * When specified on its own, the replacement will be done including
+ any --help arguments.
+
+ * The ``usage`` and ``add_help`` settings are inherited from the
+ parent parser to the subparsers.
+
+'''
+
+# Pylint really gets confused by this module
+#pylint: disable-all
+
+from __future__ import division, print_function, absolute_import
+
+import s3ql
+import argparse
+import re
+import os
+import textwrap
+
+__all__ = [ 'ArgumentParser', 'DEFAULT_USAGE']
+
+DEFAULT_USAGE = object()
+
+class HelpFormatter(argparse.RawDescriptionHelpFormatter):
+
+ def _format_usage(self, usage, actions, groups, prefix):
+ '''Special handling for usage lists
+
+ If usage is a list object, its elements will be printed on
+ separate lines. DEFAULT_USAGE will be replaced by the
+ default usage string of the parser (but, if `usage`` is a list,
+ excluding any --help arguments)).
+ '''
+
+ if isinstance(usage, list):
+ # Omit help argument
+ actions = [ x for x in actions if not isinstance(x, argparse._HelpAction) ]
+ res = []
+ for s in usage:
+ if not res:
+ res.append('usage: ')
+ else:
+ res.append(' or: ')
+ if s is DEFAULT_USAGE:
+ res.append(super(HelpFormatter, self)._format_usage(None, actions, groups, '')[:-1])
+ else:
+ res.append(s % dict(prog=self._prog))
+ res.append('\n')
+
+ return '%s\n\n' % ''.join(res)
+
+ elif usage is DEFAULT_USAGE:
+ return super(HelpFormatter, self)._format_usage(None, actions, groups, prefix)
+ else:
+ return super(HelpFormatter, self)._format_usage(usage, actions, groups, prefix)
+
+ def format_help(self):
+ help = super(HelpFormatter, self).format_help()
+ if help.count('\n') > 2:
+ return help+'\n'
+ else:
+ return help
+
+
+class SubParsersAction(argparse._SubParsersAction):
+ '''A replacement for _SubParsersAction that keeps
+ track of the parent parser'''
+
+ def __init__(self, **kw):
+ self.parent = kw.pop('parent')
+ super(SubParsersAction, self).__init__(**kw)
+
+ def add_parser(self, *a, **kwargs):
+ '''Pass parent usage and add_help attributes to new parser'''
+
+ if 'usage' not in kwargs:
+ # Inherit, but preserve old progs attribute
+ usage = self.parent.usage
+ repl = dict(prog=self.parent.prog)
+ if isinstance(usage, list):
+ usage = [ (x % repl if isinstance(x, str) else x)
+ for x in usage ]
+ elif usage:
+ usage = usage % repl
+ kwargs['usage'] = usage
+
+ if 'help' in kwargs:
+ kwargs.setdefault('description', kwargs['help'].capitalize() + '.')
+
+ kwargs.setdefault('add_help', self.parent.add_help)
+ kwargs.setdefault('formatter_class', self.parent.formatter_class)
+
+ if 'parents' in kwargs:
+ for p in kwargs['parents']:
+ if p.epilog:
+ kwargs.setdefault('epilog', p.epilog % dict(prog=self.parent.prog))
+
+ return super(SubParsersAction, self).add_parser(*a, **kwargs)
+
+
+class ArgumentParser(argparse.ArgumentParser):
+
+ def __init__(self, *a, **kw):
+ if 'formatter_class' not in kw:
+ kw['formatter_class'] = HelpFormatter
+
+ super(ArgumentParser, self).__init__(*a, **kw)
+ self.register('action', 'parsers', SubParsersAction)
+
+ def add_version(self):
+ self.add_argument('--version', action='version',
+ help="just print program version and exit",
+ version='S3QL %s' % s3ql.VERSION)
+
+ def add_quiet(self):
+ self.add_argument("--quiet", action="store_true", default=False,
+ help="be really quiet")
+
+ def add_debug_modules(self):
+ self.add_argument("--debug", action="append", metavar='<module>',
+ help="activate debugging output from <module>. Use `all` "
+ "to get debug messages from all modules. This option can be "
+ "specified multiple times.")
+
+ def add_debug(self):
+ self.add_argument("--debug", action="store_const", const=['all'],
+ help="activate debugging output")
+
+ def add_homedir(self):
+ self.add_argument("--homedir", type=str, metavar='<path>',
+ default=os.path.expanduser("~/.s3ql"),
+ help='Directory for log files, cache and authentication info. '
+ '(default: `~/.s3ql)`')
+
+ def add_storage_url(self):
+ self.add_argument("storage_url", metavar='<storage-url>',
+ type=storage_url_type,
+ help='Storage URL of the backend that contains the file system')
+
+ def add_ssl(self):
+ self.add_argument("--ssl", action="store_true", default=False,
+ help=textwrap.dedent('''\
+ Use SSL when connecting to remote servers. This option
+ is not enabled by default, because for encrypted file
+ systems, all data is already encrypted anyway, and
+ authentication data is never transmitted in plaintext
+ even for unencrypted file systems.
+ '''))
+
+
+ def add_subparsers(self, **kw):
+ '''Pass parent and set prog to default usage message'''
+ kw.setdefault('parser_class', argparse.ArgumentParser)
+
+ kw['parent'] = self
+
+ # prog defaults to the usage message of this parser, skipping
+ # optional arguments and with no "usage:" prefix
+ if kw.get('prog') is None:
+ formatter = self._get_formatter()
+ positionals = self._get_positional_actions()
+ groups = self._mutually_exclusive_groups
+ formatter.add_usage(None, positionals, groups, '')
+ kw['prog'] = formatter.format_help().strip()
+
+ return super(ArgumentParser, self).add_subparsers(**kw)
+
+
+def storage_url_type(s):
+ '''Validate and canonicalize storage url'''
+
+ if s.startswith('local://'):
+ return 'local://%s' % os.path.abspath(s[len('local://'):])
+ elif s.startswith('s3://') or s.startswith('s3rr://'):
+ return s
+ elif re.match(r'^([a-z]+)://([a-zA-Z0-9.-]+)(?::([0-9]+))?(/[a-zA-Z0-9./_-]+)$',
+ s):
+ return s
+ else:
+ msg = '%s is not a valid storage url.' % s
+ raise argparse.ArgumentTypeError(msg) \ No newline at end of file
diff --git a/src/s3ql/thread_group.py b/src/s3ql/thread_group.py
new file mode 100644
index 0000000..db077b4
--- /dev/null
+++ b/src/s3ql/thread_group.py
@@ -0,0 +1,171 @@
+'''
+thread_group.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import threading
+import sys
+import logging
+from .common import ExceptionStoringThread
+from llfuse import lock, lock_released
+
+log = logging.getLogger("thread_group")
+
+__all__ = [ 'Thread', 'ThreadGroup' ]
+
+class Thread(ExceptionStoringThread):
+ '''
+ A thread that can be executed in a `ThreadGroup`.
+ '''
+
+ def __init__(self):
+ super(Thread, self).__init__()
+ self._group = None
+
+ def run_protected(self):
+ '''Perform task asynchronously
+
+ This method must be overridden in derived classes. It will be
+ called in a separate thread. Exceptions will be encapsulated in
+ `EmbeddedException` and re-raised when the thread is joined.
+ '''
+ pass
+
+ def start(self):
+ if not self._group:
+ raise ValueError('Must set _group attribute first')
+
+ super(Thread, self).start()
+
+ def run(self):
+ try:
+ try:
+ self.run_protected()
+ finally:
+ log.debug('thread: waiting for lock')
+ with self._group.lock:
+ log.debug('thread: calling notify()')
+ self._group.active_threads.remove(self)
+ self._group.finished_threads.append(self)
+ self._group.lock.notifyAll()
+ except:
+ self._exc_info = sys.exc_info() # This creates a circular reference chain
+
+class ThreadGroup(object):
+ '''Represents a group of threads.
+
+ This class uses the llfuse global lock. Methods which release the
+ global lock have are marked as such in their docstring.
+
+ Implementation Note:
+ --------------------
+
+ ThreadGroup instances have an internal lock object that is used to
+ communicate with the started threads. These threads do not hold the global
+ lock when they start and finish. To prevent deadlocks, the instance-level
+ lock must therefore only be acquired when the global lock is not held.
+ '''
+
+ def __init__(self, max_threads):
+ '''Initialize thread group
+
+ `max_active` specifies the maximum number of running threads
+ in the group.
+ '''
+
+ self.max_threads = max_threads
+ self.active_threads = list()
+ self.finished_threads = list()
+ self.lock = threading.Condition(threading.RLock())
+
+ def add_thread(self, t, max_threads=None):
+ '''Add new thread
+
+ `t` must be a `Thread` instance that has overridden
+ the `run_protected` and possibly `finalize` methods.
+
+ This method waits until there are less than `max_threads` active
+ threads, then it starts a new thread executing `fn`.
+
+ If `max_threads` is `None`, the `max_threads` value passed
+ to the constructor is used instead.
+
+ This method may release the global lock.
+ '''
+
+ if not isinstance(t, Thread):
+ raise TypeError('Parameter must be `Thread` instance')
+ t._group = self
+
+ if max_threads is None:
+ max_threads = self.max_threads
+
+ lock.release()
+ with self.lock:
+ lock.acquire()
+ while len(self) >= max_threads:
+ self.join_one()
+
+ self.active_threads.append(t)
+
+ t.start()
+
+ def join_one(self):
+ '''Wait for any one thread to finish
+
+ If the thread terminated with an exception, the exception
+ is encapsulated in `EmbeddedException` and raised again.
+
+ If there are no active threads, the call returns without doing
+ anything.
+
+ If more than one thread has called `join_one`, a single thread
+ that finishes execution will cause all pending `join_one` calls
+ to return.
+
+ This method may release the global lock.
+ '''
+
+ lock.release()
+ with self.lock:
+ lock.acquire()
+
+ # Make sure that at least 1 thread is joined
+ if len(self) == 0:
+ return
+
+ try:
+ t = self.finished_threads.pop()
+ except IndexError:
+ # Wait for thread to terminate
+ log.debug('join_one: wait()')
+ with lock_released:
+ self.lock.wait()
+ try:
+ t = self.finished_threads.pop()
+ except IndexError:
+ # Already joined by other waiting thread
+ return
+
+ t.join_and_raise()
+
+ def join_all(self):
+ '''Call join_one() until all threads have terminated
+
+ This method may release the global lock.
+ '''
+
+ with self.lock:
+ while len(self) > 0:
+ self.join_one()
+
+ def __len__(self):
+ lock.release()
+ with self.lock:
+ lock.acquire()
+ return len(self.active_threads) + len(self.finished_threads)
diff --git a/src/s3ql/upload_manager.py b/src/s3ql/upload_manager.py
new file mode 100644
index 0000000..1cf374d
--- /dev/null
+++ b/src/s3ql/upload_manager.py
@@ -0,0 +1,387 @@
+'''
+upload_manager.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from .backends.common import NoSuchObject
+from .common import sha256_fh, TimeoutError
+from .thread_group import ThreadGroup, Thread
+from .database import NoSuchRowError
+import logging
+import threading
+import os
+import errno
+from llfuse import lock
+import time
+from s3ql.common import EmbeddedException
+
+__all__ = [ "UploadManager", 'retry_exc', 'RemoveThread' ]
+
+# standard logger for this module
+log = logging.getLogger("UploadManager")
+
+
+MAX_UPLOAD_THREADS = 10
+MAX_COMPRESS_THREADS = 1
+MIN_TRANSIT_SIZE = 1024 * 1024
+class UploadManager(object):
+ '''
+ Schedules and executes object uploads to make optimum usage
+ network bandwidth and CPU time.
+
+ Methods which release the
+ global lock have are marked as such in their docstring.
+
+ Attributes:
+ -----------
+
+ :encountered_errors: This attribute is set if some non-fatal errors
+ were encountered during asynchronous operations (for
+ example, an object that was supposed to be deleted did
+ not exist).
+ '''
+
+ def __init__(self, bucket, db, removal_queue):
+ self.upload_threads = ThreadGroup(MAX_UPLOAD_THREADS)
+ self.compress_threads = ThreadGroup(MAX_COMPRESS_THREADS)
+ self.removal_queue = removal_queue
+ self.bucket = bucket
+ self.db = db
+ self.transit_size = 0
+ self.transit_size_lock = threading.Lock()
+ self.in_transit = set()
+ self.encountered_errors = False
+
+ def add(self, el):
+ '''Upload cache entry `el` asynchronously
+
+ Return (uncompressed) size of cache entry.
+
+ This method releases the global lock.
+ '''
+
+ log.debug('UploadManager.add(%s): start', el)
+
+ if (el.inode, el.blockno) in self.in_transit:
+ raise ValueError('Block already in transit')
+
+ old_obj_id = el.obj_id
+ size = os.fstat(el.fileno()).st_size
+ el.seek(0)
+ if log.isEnabledFor(logging.DEBUG):
+ time_ = time.time()
+ hash_ = sha256_fh(el)
+ time_ = time.time() - time_
+ if time_ != 0:
+ rate = size / (1024**2 * time_)
+ else:
+ rate = 0
+ log.debug('UploadManager(inode=%d, blockno=%d): '
+ 'hashed %d bytes in %.3f seconds, %.2f MB/s',
+ el.inode, el.blockno, size, time_, rate)
+ else:
+ hash_ = sha256_fh(el)
+
+ try:
+ el.obj_id = self.db.get_val('SELECT id FROM objects WHERE hash=?', (hash_,))
+
+ except NoSuchRowError:
+ need_upload = True
+ el.obj_id = self.db.rowid('INSERT INTO objects (refcount, hash, size) VALUES(?, ?, ?)',
+ (1, hash_, size))
+ log.debug('add(inode=%d, blockno=%d): created new object %d',
+ el.inode, el.blockno, el.obj_id)
+
+ else:
+ need_upload = False
+ if old_obj_id == el.obj_id:
+ log.debug('add(inode=%d, blockno=%d): unchanged, obj_id=%d',
+ el.inode, el.blockno, el.obj_id)
+ el.dirty = False
+ el.modified_after_upload = False
+ os.rename(el.name + '.d', el.name)
+ return size
+
+ log.debug('add(inode=%d, blockno=%d): (re)linking to %d',
+ el.inode, el.blockno, el.obj_id)
+ self.db.execute('UPDATE objects SET refcount=refcount+1 WHERE id=?',
+ (el.obj_id,))
+
+ to_delete = False
+ if old_obj_id is None:
+ log.debug('add(inode=%d, blockno=%d): no previous object',
+ el.inode, el.blockno)
+ self.db.execute('INSERT INTO blocks (obj_id, inode, blockno) VALUES(?,?,?)',
+ (el.obj_id, el.inode, el.blockno))
+ else:
+ self.db.execute('UPDATE blocks SET obj_id=? WHERE inode=? AND blockno=?',
+ (el.obj_id, el.inode, el.blockno))
+ refcount = self.db.get_val('SELECT refcount FROM objects WHERE id=?',
+ (old_obj_id,))
+ if refcount > 1:
+ log.debug('add(inode=%d, blockno=%d): '
+ 'decreased refcount for prev. obj: %d',
+ el.inode, el.blockno, old_obj_id)
+ self.db.execute('UPDATE objects SET refcount=refcount-1 WHERE id=?',
+ (old_obj_id,))
+ else:
+ log.debug('add(inode=%d, blockno=%d): '
+ 'prev. obj %d marked for removal',
+ el.inode, el.blockno, old_obj_id)
+ self.db.execute('DELETE FROM objects WHERE id=?', (old_obj_id,))
+ to_delete = True
+
+ if need_upload:
+ log.debug('add(inode=%d, blockno=%d): starting compression thread',
+ el.inode, el.blockno)
+ el.modified_after_upload = False
+ self.in_transit.add((el.inode, el.blockno))
+
+ # Create a new fd so that we don't get confused if another
+ # thread repositions the cursor (and do so before unlocking)
+ fh = open(el.name + '.d', 'rb')
+ self.compress_threads.add_thread(CompressThread(el, fh, self, size)) # Releases global lock
+
+ else:
+ el.dirty = False
+ el.modified_after_upload = False
+ os.rename(el.name + '.d', el.name)
+
+ if to_delete:
+ log.debug('add(inode=%d, blockno=%d): removing object %d',
+ el.inode, el.blockno, old_obj_id)
+
+ try:
+ # Note: Old object can not be in transit
+ # Releases global lock
+ self.removal_queue.add_thread(RemoveThread(old_obj_id, self.bucket))
+ except EmbeddedException as exc:
+ exc = exc.exc
+ if isinstance(exc, NoSuchObject):
+ log.warn('Backend seems to have lost object %s', exc.key)
+ self.encountered_errors = True
+ else:
+ raise
+
+ log.debug('add(inode=%d, blockno=%d): end', el.inode, el.blockno)
+ return size
+
+ def join_all(self):
+ '''Wait until all blocks in transit have been uploaded
+
+ This method releases the global lock.
+ '''
+
+ self.compress_threads.join_all()
+ self.upload_threads.join_all()
+
+ def join_one(self):
+ '''Wait until one block has been uploaded
+
+ If there are no blocks in transit, return immediately.
+ This method releases the global lock.
+ '''
+
+ if len(self.upload_threads) == 0:
+ self.compress_threads.join_one()
+
+ self.upload_threads.join_one()
+
+ def upload_in_progress(self):
+ '''Return True if there are any blocks in transit'''
+
+ return len(self.compress_threads) + len(self.upload_threads) > 0
+
+
+class CompressThread(Thread):
+ '''
+ Compress a block and then pass it on for uploading.
+
+ This class uses the llfuse global lock. When calling objects
+ passed in the constructor, the global lock is acquired first.
+
+ The `size` attribute will be updated to the compressed size.
+ '''
+
+ def __init__(self, el, fh, um, size):
+ super(CompressThread, self).__init__()
+ self.el = el
+ self.fh = fh
+ self.um = um
+ self.size = size
+
+ def run_protected(self):
+ '''Compress block
+
+ After compression:
+ - the file handle is closed
+ - the compressed block size is updated in the database
+ - an UploadThread instance started for uploading the data.
+
+ In case of an exception, the block is removed from the in_transit
+ set.
+ '''
+
+ try:
+ if log.isEnabledFor(logging.DEBUG):
+ oldsize = self.size
+ time_ = time.time()
+ (self.size, fn) = self.um.bucket.prep_store_fh('s3ql_data_%d' % self.el.obj_id,
+ self.fh)
+ time_ = time.time() - time_
+ if time_ != 0:
+ rate = oldsize / (1024**2 * time_)
+ else:
+ rate = 0
+ log.debug('CompressionThread(inode=%d, blockno=%d): '
+ 'compressed %d bytes in %.3f seconds, %.2f MB/s',
+ self.el.inode, self.el.blockno, oldsize,
+ time_, rate)
+ else:
+ (self.size, fn) = self.um.bucket.prep_store_fh('s3ql_data_%d' % self.el.obj_id,
+ self.fh)
+
+ self.fh.close()
+
+ with lock:
+ # If we already have the minimum transit size, do not start more
+ # than two threads
+ log.debug('CompressThread(%s): starting upload thread', self.el)
+
+ if self.um.transit_size > MIN_TRANSIT_SIZE:
+ max_threads = 2
+ else:
+ max_threads = None
+
+ self.um.transit_size += self.size
+ self.um.db.execute('UPDATE objects SET compr_size=? WHERE id=?',
+ (self.size, self.el.obj_id))
+ self.um.upload_threads.add_thread(UploadThread(fn, self.el, self.size, self.um),
+ max_threads)
+
+ except EmbeddedException:
+ raise
+ except:
+ with lock:
+ self.um.in_transit.remove((self.el.inode, self.el.blockno))
+ self.um.transit_size -= self.size
+ raise
+
+
+class UploadThread(Thread):
+ '''
+ Uploads a cache entry with the function passed in the constructor.
+
+ This class uses the llfuse global lock. When calling objects
+ passed in the constructor, the global lock is acquired first.
+ '''
+
+ def __init__(self, fn, el, size, um):
+ super(UploadThread, self).__init__()
+ self.fn = fn
+ self.el = el
+ self.size = size
+ self.um = um
+
+ def run_protected(self):
+ '''Upload block by calling self.fn()
+
+ The upload duration is timed. After the upload (or if an exception
+ occurs), the block is removed from in_transit.
+ '''
+ try:
+ if log.isEnabledFor(logging.DEBUG):
+ time_ = time.time()
+ self.fn()
+ time_ = time.time() - time_
+ if time_ != 0:
+ rate = self.size / (1024**2 * time_)
+ else:
+ rate = 0
+ log.debug('CompressionThread(inode=%d, blockno=%d): '
+ 'compressed %d bytes in %.3f seconds, %.2f MB/s',
+ self.el.inode, self.el.blockno, self.size,
+ time_, rate)
+ else:
+ self.fn()
+
+ except:
+ with lock:
+ self.um.in_transit.remove((self.el.inode, self.el.blockno))
+ self.um.transit_size -= self.size
+ raise
+
+ with lock:
+ self.um.in_transit.remove((self.el.inode, self.el.blockno))
+ self.um.transit_size -= self.size
+
+ if not self.el.modified_after_upload:
+ self.el.dirty = False
+ try:
+ os.rename(self.el.name + '.d', self.el.name)
+ except OSError as exc:
+ # Entry may have been removed while being uploaded
+ if exc.errno != errno.ENOENT:
+ raise
+
+
+def retry_exc(timeout, exc_types, fn, *a, **kw):
+ """Wait for fn(*a, **kw) to succeed
+
+ If `fn(*a, **kw)` raises an exception in `exc_types`, the function is called again.
+ If the timeout is reached, `TimeoutError` is raised.
+ """
+
+ step = 0.2
+ waited = 0
+ while waited < timeout:
+ try:
+ return fn(*a, **kw)
+ except BaseException as exc:
+ for exc_type in exc_types:
+ if isinstance(exc, exc_type):
+ log.warn('Encountered %s error when calling %s, retrying...',
+ exc.__class__.__name__, fn.__name__)
+ break
+ else:
+ raise exc
+
+ time.sleep(step)
+ waited += step
+ if step < timeout / 30:
+ step *= 2
+
+ raise TimeoutError()
+
+class RemoveThread(Thread):
+ '''
+ Remove an object from backend. If a transit key is specified, the
+ thread first waits until the object is no longer in transit.
+
+ TThis class uses the llfuse global lock. When calling objects
+ passed in the constructor, the global lock is acquired first.
+ '''
+
+ def __init__(self, id_, bucket, transit_key=None, upload_manager=None):
+ super(RemoveThread, self).__init__()
+ self.id = id_
+ self.bucket = bucket
+ self.transit_key = transit_key
+ self.um = upload_manager
+
+ def run_protected(self):
+ if self.transit_key:
+ while self.transit_key in self.um.in_transit:
+ with lock:
+ self.um.join_one()
+
+ if self.bucket.read_after_create_consistent():
+ self.bucket.delete('s3ql_data_%d' % self.id)
+ else:
+ retry_exc(300, [ NoSuchObject ], self.bucket.delete,
+ 's3ql_data_%d' % self.id) \ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..06e210b
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,16 @@
+'''
+__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+
+# Export all modules
+import os
+testdir = os.path.dirname(__file__)
+__all__ = [ name[:-3] for name in os.listdir(testdir) if name.endswith(".py") and
+ name != '__init__.py' ]
diff --git a/tests/_common.py b/tests/_common.py
new file mode 100644
index 0000000..66ff40f
--- /dev/null
+++ b/tests/_common.py
@@ -0,0 +1,86 @@
+'''
+_common.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+
+This module defines a new TestCase that aborts the test run as
+soon as a test fails. The module also servers as a storage container
+for authentication data that may be required for some test cases.
+
+
+Test case policy
+----------------
+
+Each test should correspond to exactly one function in the tested module. The
+test should assume that any other functions that are called by the tested
+function work perfectly. However, the test must not rely on the result of any
+other functions when checking the correctness of the tested function.
+
+Example: if a module has methods `write_file_to_disk`, `write_some_bytes` and
+`read_file_from_disk`, then the test for `write_file_to_disk` may assume that
+the `write_some_bytes` method that is called by `write_file_to_disk` works
+correctly, but it must not use the `read_file_from_disk` method to check if the
+file has been written correctly.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+import os
+import logging
+from s3ql.common import add_stdout_logging, setup_excepthook
+#from s3ql.common import LoggerFilter
+
+__all__ = [ 'TestCase' ]
+
+log = logging.getLogger()
+
+class TestCase(unittest.TestCase):
+
+ def __init__(self, *a, **kw):
+ super(TestCase, self).__init__(*a, **kw)
+
+ # Initialize logging if not yet initialized
+ root_logger = logging.getLogger()
+ if not root_logger.handlers:
+ handler = add_stdout_logging()
+ setup_excepthook()
+ handler.setLevel(logging.DEBUG)
+ root_logger.setLevel(logging.WARN)
+
+ # For debugging:
+ #root_logger.setLevel(logging.DEBUG)
+ #handler.addFilter(LoggerFilter(['UploadManager'],
+ # logging.INFO))
+
+ def run(self, result=None):
+ if result is None:
+ result = self.defaultTestResult()
+
+ super(TestCase, self).run(result)
+
+ # Abort if any test failed
+ if result.errors or result.failures:
+ result.stop()
+
+# Try to read credentials from file. Meant for developer use only,
+# so that we can run individual tests without the setup.py
+# initialization.
+def init_credentials():
+ keyfile = os.path.expanduser("~/.awssecret")
+
+ if not os.path.isfile(keyfile):
+ return None
+
+ with open(keyfile, "r") as fh:
+ key = fh.readline().rstrip()
+ pw = fh.readline().rstrip()
+
+ return (key, pw)
+
+aws_credentials = init_credentials()
+
+
+
diff --git a/tests/data.tar.bz2 b/tests/data.tar.bz2
new file mode 100644
index 0000000..0c8711f
--- /dev/null
+++ b/tests/data.tar.bz2
Binary files differ
diff --git a/tests/t1_backends.py b/tests/t1_backends.py
new file mode 100644
index 0000000..d629466
--- /dev/null
+++ b/tests/t1_backends.py
@@ -0,0 +1,192 @@
+'''
+t1_backends.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.backends import local, s3
+from s3ql.backends.common import ChecksumError, ObjectNotEncrypted, NoSuchObject
+import tempfile
+import os
+import time
+from _common import TestCase
+import _common
+from random import randrange
+
+class BackendTests(object):
+
+ def newname(self):
+ self.name_cnt += 1
+ # Include special characters
+ return "s3ql_=/_%d" % self.name_cnt
+
+ def test_store(self):
+ key = self.newname()
+ value = self.newname()
+ metadata = { 'jimmy': 'jups@42' }
+
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key)
+ self.bucket.store(key, value, metadata)
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket.fetch(key), (value, metadata))
+ self.assertEquals(self.bucket[key], value)
+
+ def test_fetch(self):
+ key = self.newname()
+ value = self.newname()
+ metadata = { 'jimmy': 'jups@42' }
+
+ self.assertRaises(NoSuchObject, self.bucket.fetch, key)
+ self.bucket.store(key, value, metadata)
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket.fetch(key), (value, metadata))
+
+ def test_lookup(self):
+ key = self.newname()
+ value = self.newname()
+ metadata = { 'jimmy': 'jups@42' }
+
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key)
+ self.bucket.store(key, value, metadata)
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket.lookup(key), metadata)
+
+ def test_contains(self):
+ key = self.newname()
+ value = self.newname()
+
+ self.assertFalse(key in self.bucket)
+ self.bucket[key] = value
+ time.sleep(self.delay)
+ self.assertTrue(key in self.bucket)
+
+ def test_delete(self):
+ key = self.newname()
+ value = self.newname()
+ self.bucket[key] = value
+ time.sleep(self.delay)
+
+ self.assertTrue(key in self.bucket)
+ del self.bucket[key]
+ time.sleep(self.delay)
+ self.assertFalse(key in self.bucket)
+
+ def test_clear(self):
+ self.bucket[self.newname()] = self.newname()
+ self.bucket[self.newname()] = self.newname()
+
+ time.sleep(self.delay)
+ self.assertEquals(len(list(self.bucket)), 2)
+ self.bucket.clear()
+ time.sleep(self.delay)
+ self.assertEquals(len(list(self.bucket)), 0)
+
+ def test_list(self):
+
+ keys = [ self.newname() for dummy in range(12) ]
+ values = [ self.newname() for dummy in range(12) ]
+ for i in range(12):
+ self.bucket[keys[i]] = values[i]
+
+ time.sleep(self.delay)
+ self.assertEquals(sorted(self.bucket.list()), sorted(keys))
+
+ def test_encryption(self):
+ bucket = self.bucket
+ bucket.passphrase = None
+ bucket['plain'] = b'foobar452'
+
+ bucket.passphrase = 'schlurp'
+ bucket.store('encrypted', 'testdata', { 'tag': True })
+ time.sleep(self.delay)
+ self.assertEquals(bucket['encrypted'], b'testdata')
+ self.assertRaises(ObjectNotEncrypted, bucket.fetch, 'plain')
+ self.assertRaises(ObjectNotEncrypted, bucket.lookup, 'plain')
+
+ bucket.passphrase = None
+ self.assertRaises(ChecksumError, bucket.fetch, 'encrypted')
+ self.assertRaises(ChecksumError, bucket.lookup, 'encrypted')
+
+ bucket.passphrase = self.passphrase
+ self.assertRaises(ChecksumError, bucket.fetch, 'encrypted')
+ self.assertRaises(ChecksumError, bucket.lookup, 'encrypted')
+ self.assertRaises(ObjectNotEncrypted, bucket.fetch, 'plain')
+ self.assertRaises(ObjectNotEncrypted, bucket.lookup, 'plain')
+
+ def test_copy(self):
+
+ key1 = self.newname()
+ key2 = self.newname()
+ value = self.newname()
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key1)
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key2)
+
+ self.bucket.store(key1, value)
+ time.sleep(self.delay)
+ self.bucket.copy(key1, key2)
+
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket[key2], value)
+
+
+# This test just takes too long (because we have to wait really long so that we don't
+# get false errors due to propagation delays)
+@unittest.skip('takes too long')
+@unittest.skipUnless(_common.aws_credentials, 'no AWS credentials available')
+class S3Tests(BackendTests, TestCase):
+ @staticmethod
+ def random_name(prefix=""):
+ return "s3ql-" + prefix + str(randrange(1000, 9999, 1))
+
+ def setUp(self):
+ self.name_cnt = 0
+ self.conn = s3.Connection(*_common.aws_credentials)
+
+ self.bucketname = self.random_name()
+ tries = 10
+ while self.conn.bucket_exists(self.bucketname) and tries > 10:
+ self.bucketname = self.random_name()
+ tries -= 1
+
+ if tries == 0:
+ raise RuntimeError("Failed to find an unused bucket name.")
+
+ self.passphrase = 'flurp'
+ self.bucket = self.conn.create_bucket(self.bucketname, self.passphrase)
+
+ # This is the time in which we expect S3 changes to propagate. It may
+ # be much longer for larger objects, but for tests this is usually enough.
+ self.delay = 8
+ time.sleep(self.delay)
+
+ def tearDown(self):
+ self.conn.delete_bucket(self.bucketname, recursive=True)
+
+class LocalTests(BackendTests, TestCase):
+
+ def setUp(self):
+ self.name_cnt = 0
+ self.conn = local.Connection()
+ self.bucket_dir = tempfile.mkdtemp()
+ self.bucketname = os.path.join(self.bucket_dir, 'mybucket')
+ self.passphrase = 'flurp'
+ self.bucket = self.conn.create_bucket(self.bucketname, self.passphrase)
+ self.delay = 0
+
+ def tearDown(self):
+ self.conn.delete_bucket(self.bucketname, recursive=True)
+ os.rmdir(self.bucket_dir)
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(LocalTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t1_multi_lock.py b/tests/t1_multi_lock.py
new file mode 100644
index 0000000..eeaf0ee
--- /dev/null
+++ b/tests/t1_multi_lock.py
@@ -0,0 +1,93 @@
+'''
+t1_ordered_dict.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.multi_lock import MultiLock
+import time
+from s3ql.common import AsyncFn
+from _common import TestCase
+
+BASE_DELAY = 1
+
+@unittest.skip('takes too long')
+class MultiLockTests(TestCase):
+
+ def test_lock(self):
+ mlock = MultiLock()
+ key = (22, 'bar')
+
+ def hold():
+ mlock.acquire(key)
+ time.sleep(2 * BASE_DELAY)
+ mlock.release(key)
+
+ t = AsyncFn(hold)
+ t.start()
+ time.sleep(BASE_DELAY)
+
+ stamp = time.time()
+ with mlock(key):
+ pass
+ self.assertTrue(time.time() - stamp > BASE_DELAY)
+
+ t.join_and_raise()
+
+ def test_nolock(self):
+ mlock = MultiLock()
+ key1 = (22, 'bar')
+ key2 = (23, 'bar')
+
+ def hold():
+ mlock.acquire(key1)
+ time.sleep(2 * BASE_DELAY)
+ mlock.release(key1)
+
+ t = AsyncFn(hold)
+ t.start()
+ time.sleep(BASE_DELAY)
+
+ stamp = time.time()
+ with mlock(key2):
+ pass
+ self.assertTrue(time.time() - stamp < BASE_DELAY)
+
+ t.join_and_raise()
+
+ def test_multi(self):
+ mlock = MultiLock()
+ key = (22, 'bar')
+
+ def lock():
+ mlock.acquire(key)
+
+ def unlock():
+ time.sleep(2 * BASE_DELAY)
+ mlock.release(key)
+
+ t1 = AsyncFn(lock)
+ t1.start()
+ t1.join_and_raise()
+
+ t2 = AsyncFn(unlock)
+ t2.start()
+
+ stamp = time.time()
+ with mlock(key):
+ pass
+ self.assertTrue(time.time() - stamp > BASE_DELAY)
+
+ t2.join_and_raise()
+
+def suite():
+ return unittest.makeSuite(MultiLockTests)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t1_ordered_dict.py b/tests/t1_ordered_dict.py
new file mode 100644
index 0000000..b25bda8
--- /dev/null
+++ b/tests/t1_ordered_dict.py
@@ -0,0 +1,142 @@
+'''
+t1_ordered_dict.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.ordered_dict import OrderedDict
+from _common import TestCase
+
+class OrderedDictTests(TestCase):
+
+ def test_1_add_del(self):
+ od = OrderedDict()
+
+ key1 = 'key1'
+ val1 = 'val1'
+
+ # Add elements
+ def add_one():
+ od[key1] = val1
+ self.assertEqual(od.get_first(), val1)
+ self.assertEquals(od.get_last(), val1)
+ self.assertEqual(od.get(key1), val1)
+ self.assertTrue(od)
+ self.assertTrue(key1 in od)
+ self.assertEqual(len(od), 1)
+
+
+ add_one()
+ del od[key1]
+ self.assertFalse(od)
+ self.assertFalse(key1 in od)
+ self.assertEqual(len(od), 0)
+ self.assertRaises(IndexError, od.get_first)
+ self.assertRaises(IndexError, od.get_last)
+
+ add_one()
+ self.assertEqual(od.pop_first(), val1)
+ self.assertFalse(od)
+ self.assertFalse(key1 in od)
+ self.assertEqual(len(od), 0)
+ self.assertRaises(IndexError, od.get_first)
+ self.assertRaises(IndexError, od.get_last)
+
+ add_one()
+ self.assertEqual(od.pop_last(), val1)
+ self.assertFalse(od)
+ self.assertFalse(key1 in od)
+ self.assertEqual(len(od), 0)
+ self.assertRaises(IndexError, od.get_first)
+ self.assertRaises(IndexError, od.get_last)
+
+
+ def test_2_order_simple(self):
+ od = OrderedDict()
+
+ key1 = 'key1'
+ val1 = 'val1'
+ key2 = 'key2'
+ val2 = 'val2'
+
+ od[key1] = val1
+ od[key2] = val2
+
+ self.assertEqual(od.get_first(), val2)
+ self.assertEquals(od.get_last(), val1)
+
+ od[key1] = val1
+ self.assertEqual(od.get_first(), val2)
+ self.assertEquals(od.get_last(), val1)
+
+ od.to_tail(key1)
+ self.assertEqual(od.get_first(), val2)
+ self.assertEquals(od.get_last(), val1)
+
+ od.to_head(key1)
+ self.assertEqual(od.get_first(), val1)
+ self.assertEquals(od.get_last(), val2)
+
+ def test_3_order_cmplx(self):
+ od = OrderedDict()
+ no = 10
+
+ keys = [ 'key number %d' % i for i in range(no) ]
+ vals = [ 'value number %d' % i for i in range(no) ]
+
+ for i in range(no):
+ od[keys[i]] = vals[i]
+ keys.reverse()
+ self._compareOrder(od, keys)
+
+ # Move around different elements
+ for i in [ 0, int((no - 1) / 2), no - 1]:
+ od.to_head(keys[i])
+ keys = [ keys[i] ] + keys[:i] + keys[i + 1:]
+ self._compareOrder(od, keys)
+
+ od.to_tail(keys[i])
+ keys = keys[:i] + keys[i + 1:] + [ keys[i] ]
+ self._compareOrder(od, keys)
+
+ remove = keys[i]
+ del od[remove]
+ keys = keys[:i] + keys[i + 1:]
+ self._compareOrder(od, keys)
+
+ od[remove] = 'something new'
+ keys.insert(0, remove)
+ self._compareOrder(od, keys)
+
+ def _compareOrder(self, od, keys):
+ od_i = iter(od)
+ keys_i = iter(keys)
+ while True:
+ try:
+ key = keys_i.next()
+ except StopIteration:
+ break
+ self.assertEquals(od_i.next(), key)
+ self.assertRaises(StopIteration, od_i.next)
+
+ od_i = reversed(od)
+ keys_i = reversed(keys)
+ while True:
+ try:
+ key = keys_i.next()
+ except StopIteration:
+ break
+ self.assertEquals(od_i.next(), key)
+ self.assertRaises(StopIteration, od_i.next)
+
+
+def suite():
+ return unittest.makeSuite(OrderedDictTests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t2_block_cache.py b/tests/t2_block_cache.py
new file mode 100644
index 0000000..f80883a
--- /dev/null
+++ b/tests/t2_block_cache.py
@@ -0,0 +1,389 @@
+'''
+t2_block_cache.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+from s3ql.block_cache import BlockCache
+from s3ql.backends import local
+from s3ql.backends.common import NoSuchObject
+from s3ql.common import create_tables, init_tables
+from s3ql.database import Connection
+import os
+import tempfile
+from _common import TestCase
+import unittest2 as unittest
+import stat
+import time
+import llfuse
+import shutil
+
+class cache_tests(TestCase):
+
+ def setUp(self):
+
+ self.bucket_dir = tempfile.mkdtemp()
+ self.bucket = local.Connection().get_bucket(self.bucket_dir)
+
+ self.cachedir = tempfile.mkdtemp() + "/"
+ self.blocksize = 1024
+
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+
+ # Create an inode we can work with
+ self.inode = 42
+ self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?)",
+ (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))
+
+ self.cache = BlockCache(self.bucket, self.db, self.cachedir,
+ 100 * self.blocksize)
+ self.cache.init()
+
+ # Tested methods assume that they are called from
+ # file system request handler
+ llfuse.lock.acquire()
+
+ # We do not want background threads
+ self.cache.commit_thread.stop()
+
+
+ def tearDown(self):
+ self.cache.upload_manager.bucket = self.bucket
+ self.cache.destroy()
+ if os.path.exists(self.cachedir):
+ shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.bucket_dir)
+
+ llfuse.lock.release()
+
+ @staticmethod
+ def random_data(len_):
+ with open("/dev/urandom", "rb") as fh:
+ return fh.read(len_)
+
+ def test_get(self):
+ inode = self.inode
+ blockno = 11
+ data = self.random_data(int(0.5 * self.blocksize))
+
+ # Case 1: Object does not exist yet
+ with self.cache.get(inode, blockno) as fh:
+ fh.seek(0)
+ fh.write(data)
+
+ # Case 2: Object is in cache
+ with self.cache.get(inode, blockno) as fh:
+ fh.seek(0)
+ self.assertEqual(data, fh.read(len(data)))
+
+ # Case 3: Object needs to be downloaded
+ self.cache.clear()
+ self.cache.upload_manager.join_all()
+ with self.cache.get(inode, blockno) as fh:
+ fh.seek(0)
+ self.assertEqual(data, fh.read(len(data)))
+
+
+ def test_expire(self):
+ inode = self.inode
+
+ # Define the 4 most recently accessed ones
+ most_recent = [7,11,10,8]
+ for i in most_recent:
+ time.sleep(0.2)
+ with self.cache.get(inode, i) as fh:
+ fh.write('%d' % i)
+
+ # And some others
+ for i in range(20):
+ if i in most_recent:
+ continue
+ with self.cache.get(inode, i) as fh:
+ fh.write('%d' % i)
+
+ # Flush the 2 most recently accessed ones
+ commit(self.cache, inode, most_recent[-2])
+ commit(self.cache, inode, most_recent[-3])
+
+ # We want to expire 4 entries, 2 of which are already flushed
+ self.cache.max_entries = 16
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=2)
+ self.cache.expire()
+ self.cache.upload_manager.join_all()
+ self.cache.upload_manager.bucket.verify()
+ self.assertEqual(len(self.cache.cache), 16)
+
+ for i in range(20):
+ if i in most_recent:
+ self.assertTrue((inode, i) not in self.cache.cache)
+ else:
+ self.assertTrue((inode, i) in self.cache.cache)
+
+ def test_upload(self):
+ inode = self.inode
+ datalen = int(0.1 * self.cache.max_size)
+ blockno1 = 21
+ blockno2 = 25
+ blockno3 = 7
+
+ data1 = self.random_data(datalen)
+ data2 = self.random_data(datalen)
+ data3 = self.random_data(datalen)
+
+ mngr = self.cache.upload_manager
+
+ # Case 1: create new object
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ el1 = fh
+ mngr.add(el1)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ # Case 2: Link new object
+ self.cache.upload_manager.bucket = TestBucket(self.bucket)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ el2 = fh
+ mngr.add(el2)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ # Case 3: Upload old object, still has references
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data2)
+ mngr.add(el1)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+
+ # Case 4: Upload old object, no references left
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1, no_store=1)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data3)
+ mngr.add(el2)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ # Case 5: Link old object, no references left
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data2)
+ mngr.add(el2)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+
+ # Case 6: Link old object, still has references
+ # (Need to create another object first)
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno3) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ el3 = fh
+ mngr.add(el3)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ self.cache.upload_manager.bucket = TestBucket(self.bucket)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ mngr.add(el1)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+
+
+ def test_remove_referenced(self):
+ inode = self.inode
+ datalen = int(0.1 * self.cache.max_size)
+ blockno1 = 21
+ blockno2 = 24
+ data = self.random_data(datalen)
+
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data)
+ self.cache.clear()
+ self.cache.upload_manager.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ self.cache.upload_manager.bucket = TestBucket(self.bucket)
+ self.cache.remove(inode, blockno1)
+ self.cache.upload_manager.bucket.verify()
+
+ def test_remove_cache(self):
+ inode = self.inode
+ data1 = self.random_data(int(0.4 * self.blocksize))
+
+ # Case 1: Elements only in cache
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ self.cache.remove(inode, 1)
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ self.assertTrue(fh.read(42) == '')
+
+ def test_remove_cache_db(self):
+ inode = self.inode
+ data1 = self.random_data(int(0.4 * self.blocksize))
+
+ # Case 2: Element in cache and db
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ commit(self.cache, inode)
+ self.cache.upload_manager.bucket.verify()
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1)
+ self.cache.remove(inode, 1)
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ self.assertTrue(fh.read(42) == '')
+
+ def test_remove_db(self):
+ inode = self.inode
+ data1 = self.random_data(int(0.4 * self.blocksize))
+
+ # Case 3: Element only in DB
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ self.cache.clear()
+ self.cache.upload_manager.join_all()
+ self.cache.upload_manager.bucket.verify()
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1)
+ self.cache.remove(inode, 1)
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ self.assertTrue(fh.read(42) == '')
+
+
+class TestBucket(object):
+ def __init__(self, bucket, no_fetch=0, no_store=0, no_del=0):
+ self.no_fetch = no_fetch
+ self.no_store = no_store
+ self.no_del = no_del
+ self.bucket = bucket
+
+ def read_after_create_consistent(self):
+ return self.bucket.read_after_create_consistent()
+
+ def read_after_write_consistent(self):
+ return self.bucket.read_after_write_consistent()
+
+ def verify(self):
+ if self.no_fetch != 0:
+ raise RuntimeError('Got too few fetch calls')
+ if self.no_store != 0:
+ raise RuntimeError('Got too few store calls')
+ if self.no_del != 0:
+ raise RuntimeError('Got too few delete calls')
+
+ def prep_store_fh(self, *a, **kw):
+ (size, fn) = self.bucket.prep_store_fh(*a, **kw)
+ def fn2():
+ self.no_store -= 1
+ if self.no_store < 0:
+ raise RuntimeError('Got too many store calls')
+ return fn()
+
+ return (size, fn2)
+
+ def store_fh(self, *a, **kw):
+ self.no_store -= 1
+
+ if self.no_store < 0:
+ raise RuntimeError('Got too many store calls')
+
+ return self.bucket.store_fh(*a, **kw)
+
+ def fetch_fh(self, *a, **kw):
+ self.no_fetch -= 1
+
+ if self.no_fetch < 0:
+ raise RuntimeError('Got too many fetch calls')
+
+ return self.bucket.fetch_fh(*a, **kw)
+
+ def delete(self, *a, **kw):
+ self.no_del -= 1
+
+ if self.no_del < 0:
+ raise RuntimeError('Got too many delete calls')
+
+ try:
+ return self.bucket.delete(*a, **kw)
+ except NoSuchObject:
+ # Don't count key errors
+ self.no_del += 1
+ raise
+
+
+ def __delitem__(self, key):
+ self.delete(key)
+
+ def __iter__(self):
+ return self.bucket.list()
+
+ def __contains__(self, key):
+ return self.bucket.contains(key)
+
+
+def commit(self, inode, block=None):
+ """Upload data for `inode`
+
+ This is only for testing purposes, since the method blocks
+ until all current uploads have been completed.
+ """
+
+ for el in self.cache.itervalues():
+ if el.inode != inode:
+ continue
+ if not el.dirty:
+ continue
+
+ if block is not None and el.blockno != block:
+ continue
+
+ self.upload_manager.add(el)
+
+ self.upload_manager.join_all()
+
+
+def suite():
+ return unittest.makeSuite(cache_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t3_fs_api.py b/tests/t3_fs_api.py
new file mode 100644
index 0000000..04a2249
--- /dev/null
+++ b/tests/t3_fs_api.py
@@ -0,0 +1,777 @@
+'''
+t3_fs_api.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+from random import randint
+from s3ql.fsck import Fsck
+from s3ql import fs
+from s3ql.backends import local
+from s3ql.common import ROOT_INODE, create_tables, init_tables
+from llfuse import FUSEError
+from s3ql.database import Connection
+from _common import TestCase
+import os
+import stat
+import time
+import llfuse
+import unittest2 as unittest
+import errno
+import shutil
+import tempfile
+
+# We need to access to protected members
+#pylint: disable=W0212
+
+class Ctx(object):
+ def __init__(self):
+ self.uid = randint(0, 2 ** 32)
+ self.gid = randint(0, 2 ** 32)
+
+# Determine system clock granularity
+stamp1 = time.time()
+stamp2 = stamp1
+while stamp1 == stamp2:
+ stamp2 = time.time()
+CLOCK_GRANULARITY = 2 * (stamp2 - stamp1)
+del stamp1
+del stamp2
+
+class fs_api_tests(TestCase):
+
+ def setUp(self):
+ self.bucket_dir = tempfile.mkdtemp()
+ self.bucket = local.Connection().get_bucket(self.bucket_dir)
+ self.cachedir = tempfile.mkdtemp() + "/"
+ self.blocksize = 1024
+
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+
+ self.server = fs.Operations(self.bucket, self.db, self.cachedir,
+ self.blocksize, cache_size=self.blocksize * 5)
+
+ # Tested methods assume that they are called from
+ # file system request handler
+ llfuse.lock.acquire()
+
+ self.server.init()
+
+ # We don't want background flushing
+ self.server.cache.commit_thread.stop()
+ self.server.inode_flush_thread.stop()
+
+ # Keep track of unused filenames
+ self.name_cnt = 0
+
+ def tearDown(self):
+ self.server.destroy()
+ if os.path.exists(self.cachedir):
+ shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.bucket_dir)
+ llfuse.lock.release()
+
+ @staticmethod
+ def random_data(len_):
+ with open("/dev/urandom", "rb") as fd:
+ return fd.read(len_)
+
+ def fsck(self):
+ self.server.cache.clear()
+ self.server.cache.upload_manager.join_all()
+ self.server.inodes.flush()
+ fsck = Fsck(self.cachedir, self.bucket,
+ { 'blocksize': self.blocksize }, self.db)
+ fsck.check()
+ self.assertFalse(fsck.found_errors)
+
+ def newname(self):
+ self.name_cnt += 1
+ return "s3ql_%d" % self.name_cnt
+
+ def test_getattr_root(self):
+ self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode))
+ self.fsck()
+
+ def test_create(self):
+ ctx = Ctx()
+ mode = self.dir_mode()
+ name = self.newname()
+
+ inode_p_old = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server._create(ROOT_INODE, name, mode, ctx)
+
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE))
+
+ inode = self.server.getattr(id_)
+
+ self.assertEqual(inode.mode, mode)
+ self.assertEqual(inode.uid, ctx.uid)
+ self.assertEqual(inode.gid, ctx.gid)
+ self.assertEqual(inode.refcount, 1)
+ self.assertEqual(inode.size, 0)
+
+ inode_p_new = self.server.getattr(ROOT_INODE)
+
+ self.assertGreater(inode_p_new.mtime, inode_p_old.mtime)
+ self.assertGreater(inode_p_new.ctime, inode_p_old.ctime)
+
+ self.fsck()
+
+ def test_extstat(self):
+ # Test with zero contents
+ self.server.extstat()
+
+ # Test with empty file
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+ self.server.extstat()
+
+ # Test with data in file
+ fh = self.server.open(inode.id, os.O_RDWR)
+ self.server.write(fh, 0, 'foobar')
+ self.server.release(fh)
+
+ self.server.extstat()
+
+ self.fsck()
+
+ @staticmethod
+ def dir_mode():
+ return (randint(0, 07777) & ~stat.S_IFDIR) | stat.S_IFDIR
+
+ @staticmethod
+ def file_mode():
+ return (randint(0, 07777) & ~stat.S_IFREG) | stat.S_IFREG
+
+ def test_getxattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.assertRaises(FUSEError, self.server.getxattr, inode.id, 'nonexistant-attr')
+
+ self.server.setxattr(inode.id, 'my-attr', 'strabumm!')
+ self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!')
+
+ self.fsck()
+
+ def test_link(self):
+ name = self.newname()
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
+ self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+ time.sleep(CLOCK_GRANULARITY)
+
+ inode_before = self.server.getattr(inode.id).copy()
+ self.server.link(inode.id, inode_p_new.id, name)
+
+ inode_after = self.server.lookup(inode_p_new.id, name)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, inode_p_new.id))
+
+ self.assertEqual(inode_before.id, id_)
+ self.assertEqual(inode_after.refcount, 2)
+ self.assertGreater(inode_after.ctime, inode_before.ctime)
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+
+ self.fsck()
+
+ def test_listxattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.assertListEqual([], self.server.listxattr(inode.id))
+
+ self.server.setxattr(inode.id, 'key1', 'blub')
+ self.assertListEqual(['key1'], self.server.listxattr(inode.id))
+
+ self.server.setxattr(inode.id, 'key2', 'blub')
+ self.assertListEqual(sorted(['key1', 'key2']),
+ sorted(self.server.listxattr(inode.id)))
+
+ self.fsck()
+
+ def test_read(self):
+
+ len_ = self.blocksize
+ data = self.random_data(len_)
+ off = self.blocksize // 2
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+
+ self.server.write(fh, off, data)
+ inode_before = self.server.getattr(inode.id).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.assertTrue(self.server.read(fh, off, len_) == data)
+ inode_after = self.server.getattr(inode.id)
+ self.assertGreater(inode_after.atime, inode_before.atime)
+ self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off])
+ self.assertTrue(self.server.read(fh, self.blocksize, len_) == data[off:])
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_readdir(self):
+
+ # Create a few entries
+ names = [ 'entry_%2d' % i for i in range(20) ]
+ for name in names:
+ (fh, _) = self.server.create(ROOT_INODE, name,
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ # Delete some to make sure that we don't have continous rowids
+ remove_no = [0, 2, 3, 5, 9]
+ for i in remove_no:
+ self.server.unlink(ROOT_INODE, names[i])
+ del names[i]
+
+ # Read all
+ fh = self.server.opendir(ROOT_INODE)
+ self.assertListEqual(sorted(names + ['lost+found']) ,
+ sorted(x[0] for x in self.server.readdir(fh, 0)))
+ self.server.releasedir(fh)
+
+ # Read in parts
+ fh = self.server.opendir(ROOT_INODE)
+ entries = list()
+ try:
+ next_ = 0
+ while True:
+ gen = self.server.readdir(fh, next_)
+ for _ in range(3):
+ (name, _, next_) = next(gen)
+ entries.append(name)
+
+ except StopIteration:
+ pass
+
+ self.assertListEqual(sorted(names + ['lost+found']) ,
+ sorted(entries))
+ self.server.releasedir(fh)
+
+ self.fsck()
+
+ def test_release(self):
+ name = self.newname()
+
+ # Test that entries are deleted when they're no longer referenced
+ (fh, inode) = self.server.create(ROOT_INODE, name,
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'foobar')
+ self.server.unlink(ROOT_INODE, name)
+ self.assertFalse(self.db.has_val('SELECT 1 FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertTrue(self.server.getattr(inode.id).id)
+ self.server.release(fh)
+
+ self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
+
+ self.fsck()
+
+ def test_removexattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.assertRaises(FUSEError, self.server.removexattr, inode.id, 'some name')
+ self.server.setxattr(inode.id, 'key1', 'blub')
+ self.server.removexattr(inode.id, 'key1')
+ self.assertListEqual([], self.server.listxattr(inode.id))
+
+ self.fsck()
+
+ def test_rename(self):
+ oldname = self.newname()
+ newname = self.newname()
+
+ inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+ inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+
+ self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)
+
+ inode_p_old_after = self.server.getattr(ROOT_INODE)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (oldname, ROOT_INODE)))
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (newname, inode_p_new.id))
+ self.assertEqual(inode.id, id_)
+
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+ self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
+ self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)
+
+
+ self.fsck()
+
+ def test_replace_file(self):
+ oldname = self.newname()
+ newname = self.newname()
+
+ (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'some data to deal with')
+ self.server.release(fh)
+ self.server.setxattr(inode.id, 'test_xattr', '42*8')
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+ inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
+
+ (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'even more data to deal with')
+ self.server.release(fh)
+ self.server.setxattr(inode2.id, 'test_xattr', '42*8')
+
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)
+
+ inode_p_old_after = self.server.getattr(ROOT_INODE)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (oldname, ROOT_INODE)))
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (newname, inode_p_new.id))
+ self.assertEqual(inode.id, id_)
+
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+ self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
+ self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)
+
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))
+
+ self.fsck()
+
+ def test_replace_dir(self):
+ oldname = self.newname()
+ newname = self.newname()
+
+ inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+ inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
+
+ inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx())
+
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)
+
+ inode_p_old_after = self.server.getattr(ROOT_INODE)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (oldname, ROOT_INODE)))
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (newname, inode_p_new.id))
+ self.assertEqual(inode.id, id_)
+
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+ self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
+ self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)
+
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))
+
+ self.fsck()
+
+ def test_setattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0641, Ctx())
+ self.server.release(fh)
+ inode_old = self.server.getattr(inode.id).copy()
+
+ attr = llfuse.EntryAttributes()
+ attr.st_mode = self.file_mode()
+ attr.st_uid = randint(0, 2 ** 32)
+ attr.st_gid = randint(0, 2 ** 32)
+ attr.st_rdev = randint(0, 2 ** 32)
+ attr.st_atime = time.timezone + randint(0, 2 ** 32) / 10 ** 6
+ attr.st_mtime = time.timezone + randint(0, 2 ** 32) / 10 ** 6
+
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.setattr(inode.id, attr)
+ inode_new = self.server.getattr(inode.id)
+ self.assertGreater(inode_new.ctime, inode_old.ctime)
+
+ for key in attr.__slots__:
+ if getattr(attr, key) is not None:
+ self.assertEquals(getattr(attr, key),
+ getattr(inode_new, key))
+
+
+ def test_truncate(self):
+ len_ = int(2.7 * self.blocksize)
+ data = self.random_data(len_)
+ attr = llfuse.EntryAttributes()
+
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx())
+ self.server.write(fh, 0, data)
+
+ attr.st_size = len_ // 2
+ self.server.setattr(inode.id, attr)
+ self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2])
+ attr.st_size = len_
+ self.server.setattr(inode.id, attr)
+ self.assertTrue(self.server.read(fh, 0, len_)
+ == data[:len_ // 2] + b'\0' * (len_ // 2))
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_truncate_0(self):
+ len1 = 158
+ len2 = 133
+ attr = llfuse.EntryAttributes()
+
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, self.random_data(len1))
+ self.server.release(fh)
+ self.server.inodes.flush()
+
+ fh = self.server.open(inode.id, os.O_RDWR)
+ attr.st_size = 0
+ self.server.setattr(inode.id, attr)
+ self.server.write(fh, 0, self.random_data(len2))
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_setxattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.server.setxattr(inode.id, 'my-attr', 'strabumm!')
+ self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!')
+
+ self.fsck()
+
+ def test_statfs(self):
+ # Test with zero contents
+ self.server.statfs()
+
+ # Test with empty file
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+ self.server.statfs()
+
+ # Test with data in file
+ fh = self.server.open(inode.id, None)
+ self.server.write(fh, 0, 'foobar')
+ self.server.release(fh)
+
+ self.server.statfs()
+
+ def test_symlink(self):
+ target = self.newname()
+ name = self.newname()
+
+ inode_p_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ inode = self.server.symlink(ROOT_INODE, name, target, Ctx())
+ inode_p_after = self.server.getattr(ROOT_INODE)
+
+ self.assertEqual(target, self.server.readlink(inode.id))
+
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE))
+
+ self.assertEqual(inode.id, id_)
+ self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
+ self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
+
+
+ def test_unlink(self):
+ name = self.newname()
+
+ (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'some data to deal with')
+ self.server.release(fh)
+
+ # Add extended attributes
+ self.server.setxattr(inode.id, 'test_xattr', '42*8')
+
+ inode_p_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.unlink(ROOT_INODE, name)
+ inode_p_after = self.server.getattr(ROOT_INODE)
+
+ self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
+ self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))
+
+ self.fsck()
+
+ def test_rmdir(self):
+ name = self.newname()
+ inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx())
+ inode_p_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.rmdir(ROOT_INODE, name)
+ inode_p_after = self.server.getattr(ROOT_INODE)
+
+ self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
+ self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))
+
+ self.fsck()
+
+ def test_relink(self):
+ name = self.newname()
+ name2 = self.newname()
+ data = 'some data to deal with'
+
+ (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx())
+ self.server.write(fh, 0, data)
+ self.server.unlink(ROOT_INODE, name)
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))
+
+ self.server.link(inode.id, ROOT_INODE, name2)
+ self.server.release(fh)
+
+ fh = self.server.open(inode.id, os.O_RDONLY)
+ self.assertTrue(self.server.read(fh, 0, len(data)) == data)
+ self.server.release(fh)
+ self.fsck()
+
+ def test_write(self):
+ len_ = self.blocksize
+ data = self.random_data(len_)
+ off = self.blocksize // 2
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ inode_before = self.server.getattr(inode.id).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.write(fh, off, data)
+ inode_after = self.server.getattr(inode.id)
+
+ self.assertGreater(inode_after.mtime, inode_before.mtime)
+ self.assertGreater(inode_after.ctime, inode_before.ctime)
+ self.assertEqual(inode_after.size, off + len_)
+
+ self.server.write(fh, 0, data)
+ inode_after = self.server.getattr(inode.id)
+ self.assertEqual(inode_after.size, off + len_)
+
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_copy_tree(self):
+
+ src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
+ dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx())
+
+ # Create file
+ (fh, f1_inode) = self.server.create(src_inode.id, 'file1',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file1 contents')
+ self.server.release(fh)
+
+ # Create hardlink
+ (fh, f2_inode) = self.server.create(src_inode.id, 'file2',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file2 contents')
+ self.server.release(fh)
+ f2_inode = self.server.link(f2_inode.id, src_inode.id, 'file2_hardlink')
+
+ # Create subdirectory
+ d1_inode = self.server.mkdir(src_inode.id, 'dir1', self.dir_mode(), Ctx())
+ d2_inode = self.server.mkdir(d1_inode.id, 'dir2', self.dir_mode(), Ctx())
+
+ # ..with a 3rd hardlink
+ f2_inode = self.server.link(f2_inode.id, d1_inode.id, 'file2_hardlink')
+
+ # Replicate
+ self.server.copy_tree(src_inode.id, dst_inode.id)
+
+ # Change files
+ fh = self.server.open(f1_inode.id, os.O_RDWR)
+ self.server.write(fh, 0, 'new file1 contents')
+ self.server.release(fh)
+
+ fh = self.server.open(f2_inode.id, os.O_RDWR)
+ self.server.write(fh, 0, 'new file2 contents')
+ self.server.release(fh)
+
+ # Get copy properties
+ f1_inode_c = self.server.lookup(dst_inode.id, 'file1')
+ f2_inode_c = self.server.lookup(dst_inode.id, 'file2')
+ f2h_inode_c = self.server.lookup(dst_inode.id, 'file2_hardlink')
+ d1_inode_c = self.server.lookup(dst_inode.id, 'dir1')
+ d2_inode_c = self.server.lookup(d1_inode_c.id, 'dir2')
+ f2_h_inode_c = self.server.lookup(d1_inode_c.id, 'file2_hardlink')
+
+ # Check file1
+ fh = self.server.open(f1_inode_c.id, os.O_RDWR)
+ self.assertEqual(self.server.read(fh, 0, 42), 'file1 contents')
+ self.server.release(fh)
+ self.assertNotEqual(f1_inode.id, f1_inode_c.id)
+
+ # Check file2
+ fh = self.server.open(f2_inode_c.id, os.O_RDWR)
+ self.assertTrue(self.server.read(fh, 0, 42) == 'file2 contents')
+ self.server.release(fh)
+ self.assertEqual(f2_inode_c.id, f2h_inode_c.id)
+ self.assertEqual(f2_inode_c.refcount, 3)
+ self.assertNotEqual(f2_inode.id, f2_inode_c.id)
+ self.assertEqual(f2_h_inode_c.id, f2_inode_c.id)
+
+ # Check subdir1
+ self.assertNotEqual(d1_inode.id, d1_inode_c.id)
+ self.assertNotEqual(d2_inode.id, d2_inode_c.id)
+
+ self.fsck()
+
+ def test_lock_tree(self):
+
+ inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
+
+ # Create file
+ (fh, inode1a) = self.server.create(inode1.id, 'file1',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file1 contents')
+ self.server.release(fh)
+
+ # Create subdirectory
+ inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx())
+ (fh, inode2a) = self.server.create(inode2.id, 'file2',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file2 contents')
+ self.server.release(fh)
+
+ # Another file
+ (fh, inode3) = self.server.create(ROOT_INODE, 'file1',
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ # Lock
+ self.server.lock_tree(inode1.id)
+
+ for i in (inode1.id, inode1a.id, inode2.id, inode2a.id):
+ self.assertTrue(self.server.inodes[i].locked)
+
+ # Remove
+ with self.assertRaises(FUSEError) as cm:
+ self.server._remove(inode1.id, 'file1', inode1a.id)
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # Rename / Replace
+ with self.assertRaises(FUSEError) as cm:
+ self.server.rename(ROOT_INODE, 'file1', inode1.id, 'file2')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.rename(inode1.id, 'file1', ROOT_INODE, 'file2')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # Open
+ with self.assertRaises(FUSEError) as cm:
+ self.server.open(inode2a.id, os.O_RDWR)
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.open(inode2a.id, os.O_WRONLY)
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ self.server.release(self.server.open(inode3.id, os.O_WRONLY))
+
+ # Write
+ fh = self.server.open(inode2a.id, os.O_RDONLY)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.write(fh, 0, 'foo')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ self.server.release(fh)
+
+ # Create
+ with self.assertRaises(FUSEError) as cm:
+ self.server._create(inode2.id, 'dir1', self.dir_mode(), Ctx())
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # Setattr
+ with self.assertRaises(FUSEError) as cm:
+ self.server.setattr(inode2a.id, dict())
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # xattr
+ with self.assertRaises(FUSEError) as cm:
+ self.server.setxattr(inode2.id, 'name', 'value')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.removexattr(inode2.id, 'name')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ self.fsck()
+
+ def test_remove_tree(self):
+
+ inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
+
+ # Create file
+ (fh, inode1a) = self.server.create(inode1.id, 'file1',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file1 contents')
+ self.server.release(fh)
+
+ # Create subdirectory
+ inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx())
+ (fh, inode2a) = self.server.create(inode2.id, 'file2',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file2 contents')
+ self.server.release(fh)
+
+ # Remove
+ self.server.remove_tree(ROOT_INODE, 'source')
+
+ for (id_p, name) in ((ROOT_INODE, 'source'),
+ (inode1.id, 'file1'),
+ (inode1.id, 'dir1'),
+ (inode2.id, 'file2')):
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, id_p)))
+
+ for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id):
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,)))
+
+ self.fsck()
+
+
+def suite():
+ return unittest.makeSuite(fs_api_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t3_fsck.py b/tests/t3_fsck.py
new file mode 100644
index 0000000..e111320
--- /dev/null
+++ b/tests/t3_fsck.py
@@ -0,0 +1,329 @@
+'''
+t3_fsck.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.fsck import Fsck
+from s3ql.backends import local
+from s3ql.database import Connection
+from s3ql.common import ROOT_INODE, create_tables, init_tables
+from _common import TestCase
+import os
+import stat
+import tempfile
+import time
+import shutil
+
+class fsck_tests(TestCase):
+
+ def setUp(self):
+ self.bucket_dir = tempfile.mkdtemp()
+ self.passphrase = 'schnupp'
+ self.bucket = local.Connection().get_bucket(self.bucket_dir, self.passphrase)
+ self.cachedir = tempfile.mkdtemp() + "/"
+ self.blocksize = 1024
+
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+
+ self.fsck = Fsck(self.cachedir, self.bucket,
+ { 'blocksize': self.blocksize }, self.db)
+ self.fsck.expect_errors = True
+
+ def tearDown(self):
+ shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.bucket_dir)
+
+ def assert_fsck(self, fn):
+ '''Check that fn detects and corrects an error'''
+
+
+ self.fsck.found_errors = False
+ fn()
+ self.assertTrue(self.fsck.found_errors)
+ self.fsck.found_errors = False
+ fn()
+ self.assertFalse(self.fsck.found_errors)
+
+ def test_cache(self):
+ inode = 6
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ fh = open(self.cachedir + 'inode_%d_block_1.d' % inode, 'wb')
+ fh.write('somedata')
+ fh.close()
+
+ self.assert_fsck(self.fsck.check_cache)
+ self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')
+
+ fh = open(self.cachedir + 'inode_%d_block_1' % inode, 'wb')
+ fh.write('otherdata')
+ fh.close()
+
+ self.assert_fsck(self.fsck.check_cache)
+ self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')
+
+
+ def test_lof1(self):
+
+ # Make lost+found a file
+ inode = self.db.get_val("SELECT inode FROM contents WHERE name=? AND parent_inode=?",
+ (b"lost+found", ROOT_INODE))
+ self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,))
+ self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))
+
+ self.assert_fsck(self.fsck.check_lof)
+
+ def test_lof2(self):
+ # Remove lost+found
+ self.db.execute('DELETE FROM contents WHERE name=? and parent_inode=?',
+ (b'lost+found', ROOT_INODE))
+
+ self.assert_fsck(self.fsck.check_lof)
+
+ def test_inode_refcount(self):
+
+ # Create an orphaned inode
+ self.db.execute("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 2, 0))
+
+ self.assert_fsck(self.fsck.check_inode_refcount)
+
+ # Create an inode with wrong refcount
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1, 0))
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
+ (b'name1', inode, ROOT_INODE))
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
+ (b'name2', inode, ROOT_INODE))
+
+ self.assert_fsck(self.fsck.check_inode_refcount)
+
+ def test_inode_sizes(self):
+
+ id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 2, 0))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', id_, ROOT_INODE))
+
+ # Create a block
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(?, ?)',
+ (1, 500))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (id_, 0, obj_id))
+
+
+ self.assert_fsck(self.fsck.check_inode_sizes)
+
+
+
+ def test_keylist(self):
+ # Create an object that only exists in the bucket
+ self.bucket['s3ql_data_4364'] = 'Testdata'
+ self.assert_fsck(self.fsck.check_keylist)
+
+ # Create an object that does not exist in the bucket
+ self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
+ (34, 1, 0))
+ self.assert_fsck(self.fsck.check_keylist)
+
+ @staticmethod
+ def random_data(len_):
+ with open("/dev/urandom", "rb") as fd:
+ return fd.read(len_)
+
+ def test_loops(self):
+
+ # Create some directory inodes
+ inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1))
+ for dummy in range(3) ]
+
+ inodes.append(inodes[0])
+ last = inodes[0]
+ for inode in inodes[1:]:
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
+ (bytes(inode), inode, last))
+ last = inode
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_refcount()
+ self.assertFalse(self.fsck.found_errors)
+ self.fsck.check_loops()
+ self.assertTrue(self.fsck.found_errors)
+ # We can't fix loops yet
+
+ def test_obj_refcounts(self):
+
+ obj_id = 42
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0))
+
+ self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
+ (obj_id, 2, 0))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 1, obj_id))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 2, obj_id))
+
+ self.fsck.found_errors = False
+ self.fsck.check_obj_refcounts()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 3, obj_id))
+ self.assert_fsck(self.fsck.check_obj_refcounts)
+
+ self.db.execute('DELETE FROM blocks WHERE obj_id=?', (obj_id,))
+ self.assert_fsck(self.fsck.check_obj_refcounts)
+
+ def test_unix_size(self):
+
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+
+ def test_unix_size_symlink(self):
+
+ inode = 42
+ target = 'some funny random string'
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,target,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1,
+ target, len(target)))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_target(self):
+
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET target = ? WHERE id=?', ('foo', inode))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_rdev(self):
+
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_child(self):
+
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('foo', ROOT_INODE, inode))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_blocks(self):
+
+ obj_id = 87
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
+ (obj_id, 2, 0))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 1, obj_id))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(fsck_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t3_inode_cache.py b/tests/t3_inode_cache.py
new file mode 100644
index 0000000..b1d8814
--- /dev/null
+++ b/tests/t3_inode_cache.py
@@ -0,0 +1,99 @@
+'''
+t2_inode_cache.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+
+from s3ql import inode_cache
+from s3ql.common import create_tables, init_tables
+from s3ql.database import Connection
+from _common import TestCase
+import unittest2 as unittest
+import time
+import tempfile
+
+class cache_tests(TestCase):
+
+ def setUp(self):
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+ self.cache = inode_cache.InodeCache(self.db)
+
+ def tearDown(self):
+ self.cache.destroy()
+
+ def test_create(self):
+ attrs = {'mode': 784,
+ 'refcount': 3,
+ 'uid': 7,
+ 'gid': 2,
+ 'size': 34674,
+ 'target': 'foobar',
+ 'rdev': 11,
+ 'atime': time.time(),
+ 'ctime': time.time(),
+ 'mtime': time.time() }
+
+ inode = self.cache.create_inode(**attrs)
+
+ for key in attrs.keys():
+ self.assertEqual(attrs[key], getattr(inode, key))
+
+ self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?',
+ (inode.id,)))
+
+
+ def test_del(self):
+ attrs = {'mode': 784,
+ 'refcount': 3,
+ 'uid': 7,
+ 'target': 'foobar',
+ 'gid': 2,
+ 'size': 34674,
+ 'rdev': 11,
+ 'atime': time.time(),
+ 'ctime': time.time(),
+ 'mtime': time.time() }
+ inode = self.cache.create_inode(**attrs)
+ del self.cache[inode.id]
+ self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
+ self.assertRaises(KeyError, self.cache.__delitem__, inode.id)
+
+ def test_get(self):
+ attrs = {'mode': 784,
+ 'refcount': 3,
+ 'uid': 7,
+ 'gid': 2,
+ 'target': 'foobar',
+ 'size': 34674,
+ 'rdev': 11,
+ 'atime': time.time(),
+ 'ctime': time.time(),
+ 'mtime': time.time() }
+ inode = self.cache.create_inode(**attrs)
+ self.assertEqual(inode, self.cache[inode.id])
+
+ self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
+ # Entry should still be in cache
+ self.assertEqual(inode, self.cache[inode.id])
+
+ # Now it should be out of the cache
+ for _ in xrange(inode_cache.CACHE_SIZE + 1):
+ dummy = self.cache[self.cache.create_inode(**attrs).id]
+
+ self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
+
+
+
+def suite():
+ return unittest.makeSuite(cache_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t4_adm.py b/tests/t4_adm.py
new file mode 100644
index 0000000..c033fd3
--- /dev/null
+++ b/tests/t4_adm.py
@@ -0,0 +1,67 @@
+'''
+t4_adm.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+from _common import TestCase
+import unittest2 as unittest
+import tempfile
+import sys
+import os
+from cStringIO import StringIO
+import shutil
+import s3ql.cli.mkfs
+import s3ql.cli.adm
+from s3ql.backends import local
+
+class AdmTests(TestCase):
+
+ def setUp(self):
+ self.cache_dir = tempfile.mkdtemp()
+ self.bucket_dir = tempfile.mkdtemp()
+
+ self.bucketname = 'local://' + os.path.join(self.bucket_dir, 'mybucket')
+ self.passphrase = 'oeut3d'
+
+ def tearDown(self):
+ shutil.rmtree(self.cache_dir)
+ shutil.rmtree(self.bucket_dir)
+
+ def mkfs(self):
+ sys.stdin = StringIO('%s\n%s\n' % (self.passphrase, self.passphrase))
+ try:
+ s3ql.cli.mkfs.main(['--homedir', self.cache_dir, self.bucketname ])
+ except BaseException as exc:
+ self.fail("mkfs.s3ql failed: %s" % exc)
+
+ def test_passphrase(self):
+ self.mkfs()
+
+ passphrase_new = 'sd982jhd'
+ sys.stdin = StringIO('%s\n%s\n%s\n' % (self.passphrase,
+ passphrase_new, passphrase_new))
+ try:
+ s3ql.cli.adm.main(['passphrase', self.bucketname ])
+ except BaseException as exc:
+ self.fail("s3qladm failed: %s" % exc)
+
+
+ bucket = local.Connection().get_bucket(os.path.join(self.bucket_dir, 'mybucket'))
+ bucket.passphrase = passphrase_new
+
+ bucket.passphrase = bucket['s3ql_passphrase']
+ self.assertTrue(isinstance(bucket['s3ql_seq_no_0'], str))
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(AdmTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t4_fuse.py b/tests/t4_fuse.py
new file mode 100644
index 0000000..8c5daa1
--- /dev/null
+++ b/tests/t4_fuse.py
@@ -0,0 +1,301 @@
+'''
+t4_fuse.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+from _common import TestCase
+from cStringIO import StringIO
+from os.path import basename
+from s3ql.common import retry, AsyncFn
+import filecmp
+import os.path
+import s3ql.cli.fsck
+import s3ql.cli.mkfs
+import s3ql.cli.mount
+import s3ql.cli.umount
+import shutil
+import stat
+import llfuse
+import subprocess
+import sys
+import tempfile
+import time
+import unittest2 as unittest
+
+# For debugging
+USE_VALGRIND = False
+
+class fuse_tests(TestCase):
+
+ def setUp(self):
+ # We need this to test multi block operations
+ self.src = __file__
+ if os.path.getsize(self.src) < 1048:
+ raise RuntimeError("test file %s should be bigger than 1 kb" % self.src)
+
+ self.mnt_dir = tempfile.mkdtemp()
+ self.cache_dir = tempfile.mkdtemp()
+ self.bucket_dir = tempfile.mkdtemp()
+
+ self.bucketname = 'local://' + os.path.join(self.bucket_dir, 'mybucket')
+ self.passphrase = 'oeut3d'
+
+ self.mount_thread = None
+ self.name_cnt = 0
+
+ def tearDown(self):
+ # Umount if still mounted
+ if os.path.ismount(self.mnt_dir):
+ subprocess.call(['fusermount', '-z', '-u', self.mnt_dir])
+
+ # Try to wait for mount thread to prevent spurious errors
+ # because the db file is being removed
+ if self.mount_thread and USE_VALGRIND:
+ retry(60, lambda: self.mount_thread.poll() is not None)
+ elif self.mount_thread:
+ self.mount_thread.join(60)
+
+ shutil.rmtree(self.mnt_dir)
+ shutil.rmtree(self.cache_dir)
+ shutil.rmtree(self.bucket_dir)
+
+ if not USE_VALGRIND and not self.mount_thread.is_alive():
+ self.mount_thread.join_and_raise()
+
+ def mount(self):
+
+ sys.stdin = StringIO('%s\n%s\n' % (self.passphrase, self.passphrase))
+ try:
+ s3ql.cli.mkfs.main(['-L', 'test fs', '--blocksize', '500',
+ '--homedir', self.cache_dir, self.bucketname ])
+ except BaseException as exc:
+ self.fail("mkfs.s3ql failed: %s" % exc)
+
+
+ # Note: When running inside test suite, we have less available
+ # file descriptors
+ if USE_VALGRIND:
+ if __name__ == '__main__':
+ mypath = sys.argv[0]
+ else:
+ mypath = __file__
+ basedir = os.path.abspath(os.path.join(os.path.dirname(mypath), '..'))
+ self.mount_thread = subprocess.Popen(['valgrind', 'python-dbg',
+ os.path.join(basedir, 'bin', 'mount.s3ql'),
+ "--fg", '--homedir', self.cache_dir,
+ '--max-cache-entries', '500',
+ self.bucketname, self.mnt_dir],
+ stdin=subprocess.PIPE)
+ print(self.passphrase, file=self.mount_thread.stdin)
+ retry(30, os.path.ismount, self.mnt_dir)
+ else:
+ sys.stdin = StringIO('%s\n' % self.passphrase)
+ self.mount_thread = AsyncFn(s3ql.cli.mount.main,
+ ["--fg", '--homedir', self.cache_dir,
+ '--max-cache-entries', '500',
+ self.bucketname, self.mnt_dir])
+ self.mount_thread.start()
+
+ # Wait for mountpoint to come up
+ try:
+ retry(3, os.path.ismount, self.mnt_dir)
+ except:
+ self.mount_thread.join_and_raise()
+
+ def umount(self):
+ time.sleep(0.5)
+ devnull = open('/dev/null', 'wb')
+ retry(5, lambda: subprocess.call(['fuser', '-m', self.mnt_dir],
+ stdout=devnull, stderr=devnull) == 1)
+ s3ql.cli.umount.DONTWAIT = True
+ try:
+ s3ql.cli.umount.main([self.mnt_dir])
+ except BaseException as exc:
+ self.fail("Umount failed: %s" % exc)
+
+ # Now wait for server process
+ if USE_VALGRIND:
+ self.assertEqual(self.mount_thread.wait(), 0)
+ else:
+ exc = self.mount_thread.join_get_exc()
+ self.assertIsNone(exc)
+ self.assertFalse(os.path.ismount(self.mnt_dir))
+
+ # Now run an fsck
+ sys.stdin = StringIO('%s\n' % self.passphrase)
+ try:
+ s3ql.cli.fsck.main(['--force', '--homedir', self.cache_dir,
+ self.bucketname])
+ except BaseException as exc:
+ self.fail("fsck failed: %s" % exc)
+
+ def runTest(self):
+ # Run all tests in same environment, mounting and umounting
+ # just takes too long otherwise
+
+ self.mount()
+ self.tst_chown()
+ self.tst_link()
+ self.tst_mkdir()
+ self.tst_mknod()
+ self.tst_readdir()
+ self.tst_statvfs()
+ self.tst_symlink()
+ self.tst_truncate()
+ self.tst_write()
+ self.umount()
+
+ def newname(self):
+ self.name_cnt += 1
+ return "s3ql_%d" % self.name_cnt
+
+ def tst_mkdir(self):
+ dirname = self.newname()
+ fullname = self.mnt_dir + "/" + dirname
+ os.mkdir(fullname)
+ fstat = os.stat(fullname)
+ self.assertTrue(stat.S_ISDIR(fstat.st_mode))
+ self.assertEquals(llfuse.listdir(fullname), [])
+ self.assertEquals(fstat.st_nlink, 1)
+ self.assertTrue(dirname in llfuse.listdir(self.mnt_dir))
+ os.rmdir(fullname)
+ self.assertRaises(OSError, os.stat, fullname)
+ self.assertTrue(dirname not in llfuse.listdir(self.mnt_dir))
+
+ def tst_symlink(self):
+ linkname = self.newname()
+ fullname = self.mnt_dir + "/" + linkname
+ os.symlink("/imaginary/dest", fullname)
+ fstat = os.lstat(fullname)
+ self.assertTrue(stat.S_ISLNK(fstat.st_mode))
+ self.assertEquals(os.readlink(fullname), "/imaginary/dest")
+ self.assertEquals(fstat.st_nlink, 1)
+ self.assertTrue(linkname in llfuse.listdir(self.mnt_dir))
+ os.unlink(fullname)
+ self.assertRaises(OSError, os.lstat, fullname)
+ self.assertTrue(linkname not in llfuse.listdir(self.mnt_dir))
+
+ def tst_mknod(self):
+ filename = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, filename)
+ fstat = os.lstat(filename)
+ self.assertTrue(stat.S_ISREG(fstat.st_mode))
+ self.assertEquals(fstat.st_nlink, 1)
+ self.assertTrue(basename(filename) in llfuse.listdir(self.mnt_dir))
+ self.assertTrue(filecmp.cmp(src, filename, False))
+ os.unlink(filename)
+ self.assertRaises(OSError, os.stat, filename)
+ self.assertTrue(basename(filename) not in llfuse.listdir(self.mnt_dir))
+
+ def tst_chown(self):
+ filename = os.path.join(self.mnt_dir, self.newname())
+ os.mkdir(filename)
+ fstat = os.lstat(filename)
+ uid = fstat.st_uid
+ gid = fstat.st_gid
+
+ uid_new = uid + 1
+ os.chown(filename, uid_new, -1)
+ fstat = os.lstat(filename)
+ self.assertEquals(fstat.st_uid, uid_new)
+ self.assertEquals(fstat.st_gid, gid)
+
+ gid_new = gid + 1
+ os.chown(filename, -1, gid_new)
+ fstat = os.lstat(filename)
+ self.assertEquals(fstat.st_uid, uid_new)
+ self.assertEquals(fstat.st_gid, gid_new)
+
+ os.rmdir(filename)
+ self.assertRaises(OSError, os.stat, filename)
+ self.assertTrue(basename(filename) not in llfuse.listdir(self.mnt_dir))
+
+
+ def tst_write(self):
+ name = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, name)
+ self.assertTrue(filecmp.cmp(name, src, False))
+
+ # Don't unlink file, we want to see if cache flushing
+ # works
+
+ def tst_statvfs(self):
+ os.statvfs(self.mnt_dir)
+
+ def tst_link(self):
+ name1 = os.path.join(self.mnt_dir, self.newname())
+ name2 = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, name1)
+ self.assertTrue(filecmp.cmp(name1, src, False))
+ os.link(name1, name2)
+
+ fstat1 = os.lstat(name1)
+ fstat2 = os.lstat(name2)
+
+ self.assertEquals(fstat1, fstat2)
+ self.assertEquals(fstat1.st_nlink, 2)
+
+ self.assertTrue(basename(name2) in llfuse.listdir(self.mnt_dir))
+ self.assertTrue(filecmp.cmp(name1, name2, False))
+ os.unlink(name2)
+ fstat1 = os.lstat(name1)
+ self.assertEquals(fstat1.st_nlink, 1)
+ os.unlink(name1)
+
+ def tst_readdir(self):
+ dir_ = os.path.join(self.mnt_dir, self.newname())
+ file_ = dir_ + "/" + self.newname()
+ subdir = dir_ + "/" + self.newname()
+ subfile = subdir + "/" + self.newname()
+ src = self.src
+
+ os.mkdir(dir_)
+ shutil.copyfile(src, file_)
+ os.mkdir(subdir)
+ shutil.copyfile(src, subfile)
+
+ listdir_is = llfuse.listdir(dir_)
+ listdir_is.sort()
+ listdir_should = [ basename(file_), basename(subdir) ]
+ listdir_should.sort()
+ self.assertEquals(listdir_is, listdir_should)
+
+ os.unlink(file_)
+ os.unlink(subfile)
+ os.rmdir(subdir)
+ os.rmdir(dir_)
+
+ def tst_truncate(self):
+ filename = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, filename)
+ self.assertTrue(filecmp.cmp(filename, src, False))
+ fstat = os.stat(filename)
+ size = fstat.st_size
+ fd = os.open(filename, os.O_RDWR)
+
+ os.ftruncate(fd, size + 1024) # add > 1 block
+ self.assertEquals(os.stat(filename).st_size, size + 1024)
+
+ os.ftruncate(fd, size - 1024) # Truncate > 1 block
+ self.assertEquals(os.stat(filename).st_size, size - 1024)
+
+ os.close(fd)
+ os.unlink(filename)
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(fuse_tests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t5_cli.py b/tests/t5_cli.py
new file mode 100644
index 0000000..8e6bf68
--- /dev/null
+++ b/tests/t5_cli.py
@@ -0,0 +1,74 @@
+'''
+t5_cli.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+import os.path
+import errno
+import s3ql.cli.ctrl
+import s3ql.cli.lock
+import s3ql.cli.remove
+import llfuse
+import unittest2 as unittest
+import t4_fuse
+
+class cliTests(t4_fuse.fuse_tests):
+
+ def runTest(self):
+ self.mount()
+ self.tst_lock_rm()
+ self.tst_ctrl_flush()
+ self.umount()
+
+ def tst_ctrl_flush(self):
+
+ try:
+ s3ql.cli.ctrl.main(['flushcache', self.mnt_dir])
+ except BaseException as exc:
+ self.fail("s3qladm failed: %s" % exc)
+
+ def tst_lock_rm(self):
+
+ # Extract tar
+ tempdir = os.path.join(self.mnt_dir, 'lock_dir')
+ filename = os.path.join(tempdir, 'myfile')
+ os.mkdir(tempdir)
+ with open(filename, 'w') as fh:
+ fh.write('Hello, world')
+
+ # copy
+ try:
+ s3ql.cli.lock.main([tempdir])
+ except BaseException as exc:
+ self.fail("s3qllock failed: %s" % exc)
+
+ # Try to delete
+ with self.assertRaises(OSError) as cm:
+ os.unlink(filename)
+ self.assertEqual(cm.exception[0], errno.EPERM)
+
+ # Try to write
+ with self.assertRaises(IOError) as cm:
+ open(filename, 'w+').write('Hello')
+ self.assertEqual(cm.exception[0], errno.EPERM)
+
+ # delete properly
+ try:
+ s3ql.cli.remove.main([tempdir])
+ except BaseException as exc:
+ self.fail("s3qlrm failed: %s" % exc)
+
+ self.assertTrue('lock_dir' not in llfuse.listdir(self.mnt_dir))
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(cliTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t5_cp.py b/tests/t5_cp.py
new file mode 100644
index 0000000..f7f758c
--- /dev/null
+++ b/tests/t5_cp.py
@@ -0,0 +1,75 @@
+'''
+t5_cp.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+import os.path
+from s3ql.cli.cp import main as s3qlcp
+import subprocess
+import tarfile
+import tempfile
+import errno
+import unittest2 as unittest
+import t4_fuse
+
+
+class cpTests(t4_fuse.fuse_tests):
+
+ def runTest(self):
+ try:
+ subprocess.call(['rsync', '--version'],
+ stderr=subprocess.STDOUT,
+ stdout=open('/dev/null', 'wb'))
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ raise unittest.SkipTest('rsync not installed')
+ raise
+
+ self.mount()
+ self.tst_cp()
+
+ self.umount()
+
+ def tst_cp(self):
+
+ # Extract tar
+ data_file = os.path.join(os.path.dirname(__file__), 'data.tar.bz2')
+ tempdir = tempfile.mkdtemp()
+ tarfile.open(data_file).extractall(tempdir)
+
+ # Rsync
+ subprocess.check_call(['rsync', '-aHAX', tempdir + '/',
+ os.path.join(self.mnt_dir, 'orig') + '/'])
+
+ # copy
+ try:
+ s3qlcp([os.path.join(self.mnt_dir, 'orig'),
+ os.path.join(self.mnt_dir, 'copy')])
+ except BaseException as exc:
+ self.fail("s3qlcp failed: %s" % exc)
+
+ # compare
+ rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete',
+ tempdir + '/',
+ os.path.join(self.mnt_dir, 'copy') + '/'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out = rsync.communicate()[0]
+ if out:
+ self.fail('Copy not equal to original, rsync says:\n' + out)
+ elif rsync.returncode != 0:
+ self.fail('rsync failed with ' + out)
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(cpTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/util/cmdline_lexer.py b/util/cmdline_lexer.py
new file mode 100644
index 0000000..ac6df12
--- /dev/null
+++ b/util/cmdline_lexer.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+'''
+cmdline_lexer.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from pygments.token import Comment, Name, Generic, Literal
+from pygments.lexer import RegexLexer
+
+__all__ = [ 'CommandLineLexer' ]
+
+class CommandLineLexer(RegexLexer):
+ """
+ A lexer that highlights a command line with variable parts
+ """
+
+ name = 'CommandLine'
+ aliases = ['commandline']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'#.*\n', Comment),
+ (r'[^[<]+', Literal),
+ (r'\[[^[\]]+\]', Generic.Emph),
+ (r'<[^>]+>', Name.Variable),
+ ],
+
+ }
+
diff --git a/util/distribute_setup.py b/util/distribute_setup.py
new file mode 100644
index 0000000..697f7ab
--- /dev/null
+++ b/util/distribute_setup.py
@@ -0,0 +1,485 @@
+#!python
+"""Bootstrap distribute installation
+
+If you want to use setuptools in your package's setup.py, just include this
+file in the same directory with it, and add this to the top of your setup.py::
+
+ from distribute_setup import use_setuptools
+ use_setuptools()
+
+If you want to require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, you can do so by supplying
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+
+#pylint: disable-all
+#@PydevCodeAnalysisIgnore
+
+import os
+import sys
+import time
+import fnmatch
+import tempfile
+import tarfile
+from distutils import log
+
+try:
+ from site import USER_SITE
+except ImportError:
+ USER_SITE = None
+
+try:
+ import subprocess
+
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ return subprocess.call(args) == 0
+
+except ImportError:
+ # will be used for python 2.3
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ # quoting arguments if windows
+ if sys.platform == 'win32':
+ def quote(arg):
+ if ' ' in arg:
+ return '"%s"' % arg
+ return arg
+ args = [quote(arg) for arg in args]
+ return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
+
+DEFAULT_VERSION = "0.6.12"
+DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
+SETUPTOOLS_FAKED_VERSION = "0.6c11"
+
+SETUPTOOLS_PKG_INFO = """\
+Metadata-Version: 1.0
+Name: setuptools
+Version: %s
+Summary: xxxx
+Home-page: xxx
+Author: xxx
+Author-email: xxx
+License: xxx
+Description: xxx
+""" % SETUPTOOLS_FAKED_VERSION
+
+
+def _install(tarball):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # installing
+ log.warn('Installing Distribute')
+ if not _python_cmd('setup.py', 'install'):
+ log.warn('Something went wrong during the installation.')
+ log.warn('See the error message above.')
+ finally:
+ os.chdir(old_wd)
+
+
+def _build_egg(egg, tarball, to_dir):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # building an egg
+ log.warn('Building a Distribute egg in %s', to_dir)
+ _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+ finally:
+ os.chdir(old_wd)
+ # returning the result
+ log.warn(egg)
+ if not os.path.exists(egg):
+ raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+ egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
+ % (version, sys.version_info[0], sys.version_info[1]))
+ if not os.path.exists(egg):
+ tarball = download_setuptools(version, download_base,
+ to_dir, download_delay)
+ _build_egg(egg, tarball, to_dir)
+ sys.path.insert(0, egg)
+ import setuptools
+ setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, download_delay=15, no_fake=True):
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ was_imported = 'pkg_resources' in sys.modules or \
+ 'setuptools' in sys.modules
+ try:
+ try:
+ import pkg_resources
+ if not hasattr(pkg_resources, '_distribute'):
+ if not no_fake:
+ _fake_setuptools()
+ raise ImportError
+ except ImportError:
+ return _do_download(version, download_base, to_dir, download_delay)
+ try:
+ pkg_resources.require("distribute>="+version)
+ return
+ except pkg_resources.VersionConflict:
+ e = sys.exc_info()[1]
+ if was_imported:
+ sys.stderr.write(
+ "The required version of distribute (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U distribute'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
+ sys.exit(2)
+ else:
+ del pkg_resources, sys.modules['pkg_resources'] # reload ok
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ except pkg_resources.DistributionNotFound:
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ finally:
+ if not no_fake:
+ _create_fake_setuptools_pkg_info(to_dir)
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, delay=15):
+ """Download distribute from a specified location and return its filename
+
+ `version` should be a valid distribute version number that is available
+ as an egg for download under the `download_base` URL (which should end
+ with a '/'). `to_dir` is the directory where the egg will be downloaded.
+ `delay` is the number of seconds to pause before an actual download
+ attempt.
+ """
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ try:
+ from urllib.request import urlopen
+ except ImportError:
+ from urllib2 import urlopen
+ tgz_name = "distribute-%s.tar.gz" % version
+ url = download_base + tgz_name
+ saveto = os.path.join(to_dir, tgz_name)
+ src = dst = None
+ if not os.path.exists(saveto): # Avoid repeated downloads
+ try:
+ log.warn("Downloading %s", url)
+ src = urlopen(url)
+ # Read/write all in one block, so we don't create a corrupt file
+ # if the download is interrupted.
+ data = src.read()
+ dst = open(saveto, "wb")
+ dst.write(data)
+ finally:
+ if src:
+ src.close()
+ if dst:
+ dst.close()
+ return os.path.realpath(saveto)
+
+def _no_sandbox(function):
+ def __no_sandbox(*args, **kw):
+ try:
+ from setuptools.sandbox import DirectorySandbox
+ if not hasattr(DirectorySandbox, '_old'):
+ def violation(*args):
+ pass
+ DirectorySandbox._old = DirectorySandbox._violation
+ DirectorySandbox._violation = violation
+ patched = True
+ else:
+ patched = False
+ except ImportError:
+ patched = False
+
+ try:
+ return function(*args, **kw)
+ finally:
+ if patched:
+ DirectorySandbox._violation = DirectorySandbox._old
+ del DirectorySandbox._old
+
+ return __no_sandbox
+
+@_no_sandbox
+def _patch_file(path, content):
+ """Will backup the file then patch it"""
+ existing_content = open(path).read()
+ if existing_content == content:
+ # already patched
+ log.warn('Already patched.')
+ return False
+ log.warn('Patching...')
+ _rename_path(path)
+ f = open(path, 'w')
+ try:
+ f.write(content)
+ finally:
+ f.close()
+ return True
+
+
+def _same_content(path, content):
+ return open(path).read() == content
+
+def _rename_path(path):
+ new_name = path + '.OLD.%s' % time.time()
+ log.warn('Renaming %s into %s', path, new_name)
+ os.rename(path, new_name)
+ return new_name
+
+@_no_sandbox
+def _remove_flat_installation(placeholder):
+ if not os.path.isdir(placeholder):
+ log.warn('Unkown installation at %s', placeholder)
+ return False
+ found = False
+ for file in os.listdir(placeholder):
+ if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
+ found = True
+ break
+ if not found:
+ log.warn('Could not locate setuptools*.egg-info')
+ return
+
+ log.warn('Removing elements out of the way...')
+ pkg_info = os.path.join(placeholder, file)
+ if os.path.isdir(pkg_info):
+ patched = _patch_egg_dir(pkg_info)
+ else:
+ patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
+
+ if not patched:
+ log.warn('%s already patched.', pkg_info)
+ return False
+ # now let's move the files out of the way
+ for element in ('setuptools', 'pkg_resources.py', 'site.py'):
+ element = os.path.join(placeholder, element)
+ if os.path.exists(element):
+ _rename_path(element)
+ else:
+ log.warn('Could not find the %s element of the '
+ 'Setuptools distribution', element)
+ return True
+
+
+def _after_install(dist):
+ log.warn('After install bootstrap.')
+ placeholder = dist.get_command_obj('install').install_purelib
+ _create_fake_setuptools_pkg_info(placeholder)
+
+@_no_sandbox
+def _create_fake_setuptools_pkg_info(placeholder):
+ if not placeholder or not os.path.exists(placeholder):
+ log.warn('Could not find the install location')
+ return
+ pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
+ setuptools_file = 'setuptools-%s-py%s.egg-info' % \
+ (SETUPTOOLS_FAKED_VERSION, pyver)
+ pkg_info = os.path.join(placeholder, setuptools_file)
+ if os.path.exists(pkg_info):
+ log.warn('%s already exists', pkg_info)
+ return
+
+ log.warn('Creating %s', pkg_info)
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+
+ pth_file = os.path.join(placeholder, 'setuptools.pth')
+ log.warn('Creating %s', pth_file)
+ f = open(pth_file, 'w')
+ try:
+ f.write(os.path.join(os.curdir, setuptools_file))
+ finally:
+ f.close()
+
+@_no_sandbox
+def _patch_egg_dir(path):
+ # let's check if it's already patched
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ if os.path.exists(pkg_info):
+ if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
+ log.warn('%s already patched.', pkg_info)
+ return False
+ _rename_path(path)
+ os.mkdir(path)
+ os.mkdir(os.path.join(path, 'EGG-INFO'))
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+ return True
+
+
+def _before_install():
+ log.warn('Before install bootstrap.')
+ _fake_setuptools()
+
+
+def _under_prefix(location):
+ if 'install' not in sys.argv:
+ return True
+ args = sys.argv[sys.argv.index('install')+1:]
+ for index, arg in enumerate(args):
+ for option in ('--root', '--prefix'):
+ if arg.startswith('%s=' % option):
+ top_dir = arg.split('root=')[-1]
+ return location.startswith(top_dir)
+ elif arg == option:
+ if len(args) > index:
+ top_dir = args[index+1]
+ return location.startswith(top_dir)
+ elif option == '--user' and USER_SITE is not None:
+ return location.startswith(USER_SITE)
+ return True
+
+
+def _fake_setuptools():
+ log.warn('Scanning installed packages')
+ try:
+ import pkg_resources
+ except ImportError:
+ # we're cool
+ log.warn('Setuptools or Distribute does not seem to be installed.')
+ return
+ ws = pkg_resources.working_set
+ try:
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
+ replacement=False))
+ except TypeError:
+ # old distribute API
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+
+ if setuptools_dist is None:
+ log.warn('No setuptools distribution found')
+ return
+ # detecting if it was already faked
+ setuptools_location = setuptools_dist.location
+ log.warn('Setuptools installation detected at %s', setuptools_location)
+
+ # if --root or --preix was provided, and if
+ # setuptools is not located in them, we don't patch it
+ if not _under_prefix(setuptools_location):
+ log.warn('Not patching, --root or --prefix is installing Distribute'
+ ' in another location')
+ return
+
+ # let's see if its an egg
+ if not setuptools_location.endswith('.egg'):
+ log.warn('Non-egg installation')
+ res = _remove_flat_installation(setuptools_location)
+ if not res:
+ return
+ else:
+ log.warn('Egg installation')
+ pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
+ if (os.path.exists(pkg_info) and
+ _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+ log.warn('Already patched.')
+ return
+ log.warn('Patching...')
+ # let's create a fake egg replacing setuptools one
+ res = _patch_egg_dir(setuptools_location)
+ if not res:
+ return
+ log.warn('Patched done.')
+ _relaunch()
+
+
+def _relaunch():
+ log.warn('Relaunching...')
+ # we have to relaunch the process
+ args = [sys.executable] + sys.argv
+ sys.exit(subprocess.call(args))
+
+
+def _extractall(self, path=".", members=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ import copy
+ import operator
+ from tarfile import ExtractError
+ directories = []
+
+ if members is None:
+ members = self
+
+ for tarinfo in members:
+ if tarinfo.isdir():
+ # Extract directories with a safe mode.
+ directories.append(tarinfo)
+ tarinfo = copy.copy(tarinfo)
+ tarinfo.mode = 448 # decimal for oct 0700
+ self.extract(tarinfo, path)
+
+ # Reverse sort directories.
+ if sys.version_info < (2, 4):
+ def sorter(dir1, dir2):
+ return cmp(dir1.name, dir2.name)
+ directories.sort(sorter)
+ directories.reverse()
+ else:
+ directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError:
+ e = sys.exc_info()[1]
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+
+def main(argv, version=DEFAULT_VERSION):
+ """Install or upgrade setuptools and EasyInstall"""
+ tarball = download_setuptools()
+ _install(tarball)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/util/sphinx_pipeinclude.py b/util/sphinx_pipeinclude.py
new file mode 100644
index 0000000..3434825
--- /dev/null
+++ b/util/sphinx_pipeinclude.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+'''
+sphinx_pipe.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Implements a Sphinx extension that provides a `pipeinclude` directive
+to include the output of a program.
+
+
+Copyright (C) 2008-2011 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from docutils.parsers.rst.directives.misc import Include
+import subprocess
+import shlex
+from docutils import io, nodes, statemachine
+import os.path
+
+class PipeInclude(Include):
+ """
+ Include program output as ReST source.
+ """
+
+ def run(self):
+ source = self.state_machine.input_lines.source(
+ self.lineno - self.state_machine.input_offset - 1)
+ source_dir = os.path.dirname(os.path.abspath(source))
+
+ command = self.arguments[0].encode('UTF-8')
+ encoding = self.options.get(
+ 'encoding', self.state.document.settings.input_encoding)
+ tab_width = self.options.get(
+ 'tab-width', self.state.document.settings.tab_width)
+
+ try:
+ child = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
+ cwd=source_dir)
+ include_file = io.FileInput(
+ source=child.stdout, encoding=encoding,
+ error_handler=(self.state.document.settings.\
+ input_encoding_error_handler),
+ handle_io_errors=None)
+ except IOError, error:
+ raise self.severe('Problems with "%s" directive path:\n%s: %s.' %
+ (self.name, error.__class__.__name__, str(error)))
+ # Hack: Since Python 2.6, the string interpolation returns a
+ # unicode object if one of the supplied %s replacements is a
+ # unicode object. IOError has no `__unicode__` method and the
+ # fallback `__repr__` does not report the file name. Explicitely
+ # converting to str fixes this for now::
+ # print '%s\n%s\n%s\n' %(error, str(error), repr(error))
+ startline = self.options.get('start-line', None)
+ endline = self.options.get('end-line', None)
+ try:
+ if startline or (endline is not None):
+ include_lines = include_file.readlines()
+ include_text = ''.join(include_lines[startline:endline])
+ else:
+ include_text = include_file.read()
+ except UnicodeError, error:
+ raise self.severe(
+ 'Problem with "%s" directive:\n%s: %s'
+ % (self.name, error.__class__.__name__, error))
+ # start-after/end-before: no restrictions on newlines in match-text,
+ # and no restrictions on matching inside lines vs. line boundaries
+ after_text = self.options.get('start-after', None)
+ if after_text:
+ # skip content in include_text before *and incl.* a matching text
+ after_index = include_text.find(after_text)
+ if after_index < 0:
+ raise self.severe('Problem with "start-after" option of "%s" '
+ 'directive:\nText not found.' % self.name)
+ include_text = include_text[after_index + len(after_text):]
+ before_text = self.options.get('end-before', None)
+ if before_text:
+ # skip content in include_text after *and incl.* a matching text
+ before_index = include_text.find(before_text)
+ if before_index < 0:
+ raise self.severe('Problem with "end-before" option of "%s" '
+ 'directive:\nText not found.' % self.name)
+ include_text = include_text[:before_index]
+ if 'literal' in self.options:
+ # Convert tabs to spaces, if `tab_width` is positive.
+ if tab_width >= 0:
+ text = include_text.expandtabs(tab_width)
+ else:
+ text = include_text
+ literal_block = nodes.literal_block(include_text, text,
+ source=command)
+ literal_block.line = 1
+ return [literal_block]
+ else:
+ include_lines = statemachine.string2lines(
+ include_text, tab_width, convert_whitespace=1)
+ self.state_machine.insert_input(include_lines, command)
+ return []
+
+
+def setup(app):
+ app.add_directive('pipeinclude', PipeInclude)
+