summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Changes.txt30
-rw-r--r--PKG-INFO2
-rwxr-xr-xbin/fsck.s3ql4
-rwxr-xr-xbin/mkfs.s3ql4
-rwxr-xr-xbin/mount.s3ql4
-rwxr-xr-xbin/s3qladm4
-rwxr-xr-xbin/s3qlcp4
-rwxr-xr-xbin/s3qlctrl4
-rwxr-xr-xbin/s3qllock4
-rwxr-xr-xbin/s3qlrm4
-rwxr-xr-xbin/s3qlstat4
-rwxr-xr-xbin/umount.s3ql4
-rwxr-xr-xcontrib/benchmark.py19
-rw-r--r--contrib/expire_backups.12
-rwxr-xr-xcontrib/expire_backups.py2
-rwxr-xr-xcontrib/fsck_db.py97
-rw-r--r--contrib/pcp.12
-rw-r--r--doc/html/.buildinfo2
-rw-r--r--doc/html/_sources/backends.txt11
-rw-r--r--doc/html/_sources/installation.txt21
-rw-r--r--doc/html/_sources/issues.txt22
-rw-r--r--doc/html/about.html10
-rw-r--r--doc/html/adm.html10
-rw-r--r--doc/html/backends.html20
-rw-r--r--doc/html/contrib.html10
-rw-r--r--doc/html/fsck.html14
-rw-r--r--doc/html/general.html10
-rw-r--r--doc/html/index.html10
-rw-r--r--doc/html/installation.html29
-rw-r--r--doc/html/issues.html30
-rw-r--r--doc/html/man/adm.html10
-rw-r--r--doc/html/man/cp.html10
-rw-r--r--doc/html/man/ctrl.html10
-rw-r--r--doc/html/man/expire_backups.html10
-rw-r--r--doc/html/man/fsck.html14
-rw-r--r--doc/html/man/index.html10
-rw-r--r--doc/html/man/lock.html10
-rw-r--r--doc/html/man/mkfs.html10
-rw-r--r--doc/html/man/mount.html10
-rw-r--r--doc/html/man/pcp.html10
-rw-r--r--doc/html/man/rm.html10
-rw-r--r--doc/html/man/stat.html10
-rw-r--r--doc/html/man/umount.html10
-rw-r--r--doc/html/mkfs.html10
-rw-r--r--doc/html/mount.html10
-rw-r--r--doc/html/objects.inv2
-rw-r--r--doc/html/resources.html10
-rw-r--r--doc/html/search.html10
-rw-r--r--doc/html/searchindex.js2
-rw-r--r--doc/html/special.html10
-rw-r--r--doc/html/tips.html10
-rw-r--r--doc/html/umount.html10
-rw-r--r--doc/latex/manual.aux1
-rw-r--r--doc/latex/manual.log44
-rw-r--r--doc/latex/manual.tex75
-rw-r--r--doc/man/fsck.s3ql.15
-rw-r--r--doc/man/mkfs.s3ql.12
-rw-r--r--doc/man/mount.s3ql.12
-rw-r--r--doc/man/s3qladm.12
-rw-r--r--doc/man/s3qlcp.12
-rw-r--r--doc/man/s3qlctrl.12
-rw-r--r--doc/man/s3qllock.12
-rw-r--r--doc/man/s3qlrm.12
-rw-r--r--doc/man/s3qlstat.12
-rw-r--r--doc/man/umount.s3ql.12
-rw-r--r--doc/manual.pdfbin287015 -> 286415 bytes
-rw-r--r--rst/backends.rst11
-rw-r--r--rst/installation.rst21
-rw-r--r--rst/issues.rst22
-rwxr-xr-xsetup.py83
-rw-r--r--src/s3ql.egg-info/PKG-INFO2
-rw-r--r--src/s3ql.egg-info/SOURCES.txt31
-rw-r--r--src/s3ql.egg-info/entry_points.txt20
-rw-r--r--src/s3ql.egg-info/requires.txt1
-rw-r--r--src/s3ql/__init__.py19
-rw-r--r--src/s3ql/_deltadump.c6278
-rw-r--r--src/s3ql/_deltadump.pyx448
-rw-r--r--src/s3ql/adm.py507
-rw-r--r--src/s3ql/backends/common.py14
-rw-r--r--src/s3ql/backends/local.py5
-rw-r--r--src/s3ql/backends/s3.py25
-rw-r--r--src/s3ql/block_cache.py14
-rw-r--r--src/s3ql/cleanup_manager.py40
-rw-r--r--src/s3ql/cli/__init__.py12
-rw-r--r--src/s3ql/cli/adm.py945
-rw-r--r--src/s3ql/cli/fsck.py245
-rw-r--r--src/s3ql/common.py308
-rw-r--r--src/s3ql/cp.py (renamed from src/s3ql/cli/cp.py)13
-rw-r--r--src/s3ql/ctrl.py (renamed from src/s3ql/cli/ctrl.py)11
-rw-r--r--src/s3ql/daemonize.py4
-rw-r--r--src/s3ql/database.py2
-rw-r--r--src/s3ql/deltadump.py66
-rw-r--r--src/s3ql/fs.py50
-rw-r--r--src/s3ql/fsck.py875
-rw-r--r--src/s3ql/inode_cache.py42
-rw-r--r--src/s3ql/lock.py (renamed from src/s3ql/cli/lock.py)11
-rw-r--r--src/s3ql/metadata.py199
-rw-r--r--src/s3ql/mkfs.py (renamed from src/s3ql/cli/mkfs.py)64
-rw-r--r--src/s3ql/mount.py (renamed from src/s3ql/cli/mount.py)102
-rw-r--r--src/s3ql/ordered_dict.py3
-rw-r--r--src/s3ql/parse_args.py10
-rw-r--r--src/s3ql/remove.py (renamed from src/s3ql/cli/remove.py)11
-rw-r--r--src/s3ql/statfs.py (renamed from src/s3ql/cli/statfs.py)8
-rw-r--r--src/s3ql/umount.py (renamed from src/s3ql/cli/umount.py)11
-rw-r--r--tests/t1_dump.py213
-rw-r--r--tests/t2_block_cache.py9
-rw-r--r--tests/t3_fs_api.py51
-rw-r--r--tests/t3_fsck.py187
-rw-r--r--tests/t3_inode_cache.py5
-rw-r--r--tests/t4_fuse.py38
-rw-r--r--tests/t5_ctrl.py40
-rw-r--r--tests/t5_fsck.py89
-rw-r--r--tests/t5_lock_rm.py (renamed from tests/t5_cli.py)27
113 files changed, 9726 insertions, 2210 deletions
diff --git a/Changes.txt b/Changes.txt
index 4c112de..fa13250 100644
--- a/Changes.txt
+++ b/Changes.txt
@@ -1,3 +1,33 @@
+2011-11-27, S3QL 1.7
+
+ * Extended attribute names are now de-duplicated.
+
+ * Metadata is now stored in a custom, delta-encoded binary format and
+ then BZIP2 compressed, resulting in a 5-fold speedup when dumping
+ and compressing.
+
+ * Inodes are now assigned sequentially rather than randomly, and old
+ inodes are not reused. This makes S3QL fully NFS compatible and
+ allows metadata to be stored much more efficiently, resulting in
+ a 4 to 8 fold decrease in compressed metadata size.
+
+ * s3qlcp now also copies extended attributes.
+
+ * s3qlcp no longer generates incorrect block reference counts when
+ copying a file that has identical blocks (i.e., that can be
+ de-duplicated within itself).
+
+ * Eliminated a race condition in s3qlcp. When copying a file with
+ s3qlcp immediately after it was modified or created, it was
+ possible that s3qlcp would copy the new file attributes (size,
+ modification time, etc.) but the old data blocks.
+
+ A copy of a newly created file would then contain only zero bytes,
+ while a copy of a modified file would look like the original but
+ contain data from before the modification.
+
+ * "mkfs.s3ql --force" and "s3qladm clear" are working again.
+
2011-11-20, S3QL 1.6
* fsck: gracefully recover if temporary indices already exist
diff --git a/PKG-INFO b/PKG-INFO
index fc583e4..cc2f675 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: s3ql
-Version: 1.6
+Version: 1.7
Summary: a full-featured file system for online data storage
Home-page: http://code.google.com/p/s3ql/
Author: Nikolaus Rath
diff --git a/bin/fsck.s3ql b/bin/fsck.s3ql
index f0dffb6..3c69f69 100755
--- a/bin/fsck.s3ql
+++ b/bin/fsck.s3ql
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.fsck
-s3ql.cli.fsck.main(sys.argv[1:]) \ No newline at end of file
+import s3ql.fsck
+s3ql.fsck.main(sys.argv[1:]) \ No newline at end of file
diff --git a/bin/mkfs.s3ql b/bin/mkfs.s3ql
index 47b3131..94f3443 100755
--- a/bin/mkfs.s3ql
+++ b/bin/mkfs.s3ql
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.mkfs
-s3ql.cli.mkfs.main(sys.argv[1:]) \ No newline at end of file
+import s3ql.mkfs
+s3ql.mkfs.main(sys.argv[1:]) \ No newline at end of file
diff --git a/bin/mount.s3ql b/bin/mount.s3ql
index 94fc5f8..8e5e7b1 100755
--- a/bin/mount.s3ql
+++ b/bin/mount.s3ql
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.mount
-s3ql.cli.mount.main(sys.argv[1:]) \ No newline at end of file
+import s3ql.mount
+s3ql.mount.main(sys.argv[1:]) \ No newline at end of file
diff --git a/bin/s3qladm b/bin/s3qladm
index f4667d3..3baa85e 100755
--- a/bin/s3qladm
+++ b/bin/s3qladm
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.adm
-s3ql.cli.adm.main(sys.argv[1:])
+import s3ql.adm
+s3ql.adm.main(sys.argv[1:])
diff --git a/bin/s3qlcp b/bin/s3qlcp
index c62ef24..cbf7355 100755
--- a/bin/s3qlcp
+++ b/bin/s3qlcp
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.cp
-s3ql.cli.cp.main(sys.argv[1:])
+import s3ql.cp
+s3ql.cp.main(sys.argv[1:])
diff --git a/bin/s3qlctrl b/bin/s3qlctrl
index 27dbd7c..9a44906 100755
--- a/bin/s3qlctrl
+++ b/bin/s3qlctrl
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.ctrl
-s3ql.cli.ctrl.main(sys.argv[1:])
+import s3ql.ctrl
+s3ql.ctrl.main(sys.argv[1:])
diff --git a/bin/s3qllock b/bin/s3qllock
index b13c730..2873229 100755
--- a/bin/s3qllock
+++ b/bin/s3qllock
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.lock
-s3ql.cli.lock.main(sys.argv[1:])
+import s3ql.lock
+s3ql.lock.main(sys.argv[1:])
diff --git a/bin/s3qlrm b/bin/s3qlrm
index a98b3c5..5b9ed34 100755
--- a/bin/s3qlrm
+++ b/bin/s3qlrm
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.remove
-s3ql.cli.remove.main(sys.argv[1:])
+import s3ql.remove
+s3ql.remove.main(sys.argv[1:])
diff --git a/bin/s3qlstat b/bin/s3qlstat
index 1650c6f..748af6e 100755
--- a/bin/s3qlstat
+++ b/bin/s3qlstat
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.statfs
-s3ql.cli.statfs.main(sys.argv[1:])
+import s3ql.statfs
+s3ql.statfs.main(sys.argv[1:])
diff --git a/bin/umount.s3ql b/bin/umount.s3ql
index 48f014b..db082a8 100755
--- a/bin/umount.s3ql
+++ b/bin/umount.s3ql
@@ -19,5 +19,5 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-import s3ql.cli.umount
-s3ql.cli.umount.main(sys.argv[1:]) \ No newline at end of file
+import s3ql.umount
+s3ql.umount.main(sys.argv[1:]) \ No newline at end of file
diff --git a/contrib/benchmark.py b/contrib/benchmark.py
index e001586..0737a86 100755
--- a/contrib/benchmark.py
+++ b/contrib/benchmark.py
@@ -12,16 +12,20 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
+from s3ql.backends.common import get_bucket, BetterBucket, NoSuchBucket
+from s3ql.backends.local import Bucket
+from s3ql.common import setup_logging, BUFSIZE, QuietError
+from s3ql.parse_args import ArgumentParser
import argparse
import atexit
import logging
+import math
import os
+import shutil
+import subprocess
import sys
import tempfile
-import math
import time
-import subprocess
-import shutil
# We are running from the S3QL source directory, make sure
# that we use modules from this directory
@@ -30,10 +34,6 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
sys.path = [os.path.join(basedir, 'src')] + sys.path
-from s3ql.backends.common import get_bucket, BetterBucket
-from s3ql.backends.local import Bucket
-from s3ql.common import setup_logging, BUFSIZE
-from s3ql.parse_args import ArgumentParser
log = logging.getLogger('benchmark')
@@ -92,7 +92,10 @@ def main(args=None):
# Upload random data to prevent effects of compression
# on the network layer
log.info('Measuring raw backend throughput..')
- bucket = get_bucket(options, plain=True)
+ try:
+ bucket = get_bucket(options, plain=True)
+ except NoSuchBucket as exc:
+ raise QuietError(str(exc))
with bucket.open_write('s3ql_testdata') as dst:
with open('/dev/urandom', 'rb', 0) as src:
stamp = time.time()
diff --git a/contrib/expire_backups.1 b/contrib/expire_backups.1
index 21bca4f..7763c13 100644
--- a/contrib/expire_backups.1
+++ b/contrib/expire_backups.1
@@ -1,4 +1,4 @@
-.TH "EXPIRE_BACKUPS" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "EXPIRE_BACKUPS" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
expire_backups \- Intelligently expire old backups
.
diff --git a/contrib/expire_backups.py b/contrib/expire_backups.py
index 7a49d9b..8130431 100755
--- a/contrib/expire_backups.py
+++ b/contrib/expire_backups.py
@@ -29,7 +29,7 @@ if (os.path.exists(os.path.join(basedir, 'setup.py')) and
from s3ql.common import setup_logging, QuietError
from s3ql.parse_args import ArgumentParser
-from s3ql.cli.remove import main as s3qlrm
+from s3ql.remove import main as s3qlrm
log = logging.getLogger('expire_backups')
diff --git a/contrib/fsck_db.py b/contrib/fsck_db.py
new file mode 100755
index 0000000..81fe6cb
--- /dev/null
+++ b/contrib/fsck_db.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+'''
+fsck_db.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+from __future__ import division, print_function, absolute_import
+from argparse import ArgumentTypeError
+import cPickle as pickle
+import logging
+import os
+import sys
+
+# We are running from the S3QL source directory, make sure
+# that we use modules from this directory
+basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
+if (os.path.exists(os.path.join(basedir, 'setup.py')) and
+ os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
+ sys.path = [os.path.join(basedir, 'src')] + sys.path
+
+from s3ql import CURRENT_FS_REV
+from s3ql.common import setup_logging
+from s3ql.fsck import ROFsck
+from s3ql.parse_args import ArgumentParser
+from s3ql.adm import _add_name
+from s3ql.database import Connection
+
+log = logging.getLogger("fsck")
+
+def parse_args(args):
+
+ parser = ArgumentParser(
+ description="Checks S3QL file system metadata")
+
+ parser.add_log('~/.s3ql/fsck_db.log')
+ parser.add_debug_modules()
+ parser.add_quiet()
+ parser.add_version()
+
+ def db_path(s):
+ s = os.path.splitext(s)[0]
+ if not os.path.exists(s + '.db'):
+ raise ArgumentTypeError('Unable to read %s.db' % s)
+ if not os.path.exists(s + '.params'):
+ raise ArgumentTypeError('Unable to read %s.params' % s)
+ return s
+
+ parser.add_argument("path", metavar='<path>', type=db_path,
+ help='Database to be checked')
+
+ options = parser.parse_args(args)
+
+ return options
+
+def main(args=None):
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ # Temporary hack to allow checking previous revision with most recent fsck
+ param = pickle.load(open(options.path + '.params', 'rb'))
+ if param['revision'] == CURRENT_FS_REV-1:
+ log.info('Upgrading...')
+ db = Connection(options.path + '.db')
+ db.execute("""
+ CREATE TABLE ext_attributes_new (
+ inode INTEGER NOT NULL REFERENCES inodes(id),
+ name_id INTEGER NOT NULL REFERENCES names(id),
+ value BLOB NOT NULL,
+
+ PRIMARY KEY (inode, name_id)
+ )""")
+ for (inode, name, val) in db.query('SELECT inode, name, value FROM ext_attributes'):
+ db.execute('INSERT INTO ext_attributes_new (inode, name_id, value) VALUES(?,?,?)',
+ (inode, _add_name(db, name), val))
+ db.execute('DROP TABLE ext_attributes')
+ db.execute('ALTER TABLE ext_attributes_new RENAME TO ext_attributes')
+ db.execute("""
+ CREATE VIEW ext_attributes_v AS
+ SELECT * FROM ext_attributes JOIN names ON names.id = name_id
+ """)
+ db.close()
+ param['revision'] = CURRENT_FS_REV
+ pickle.dump(param, open(options.path + '.params', 'wb'), 2)
+
+ fsck = ROFsck(options.path)
+ fsck.check()
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
+
diff --git a/contrib/pcp.1 b/contrib/pcp.1
index d555916..7a5d815 100644
--- a/contrib/pcp.1
+++ b/contrib/pcp.1
@@ -1,4 +1,4 @@
-.TH "PCP" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "PCP" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
pcp \- Recursive, parallel copy of directory trees
.
diff --git a/doc/html/.buildinfo b/doc/html/.buildinfo
index 5d20c5c..f1dbcbc 100644
--- a/doc/html/.buildinfo
+++ b/doc/html/.buildinfo
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: 9abdf7b77d18c0453e942734b36f33dc
+config: 2e0d0afe97b3c3ef21d13e52ee19ae74
tags: fbb0d17656682115ca4d033fb2f83ba1
diff --git a/doc/html/_sources/backends.txt b/doc/html/_sources/backends.txt
index 5bc9521..002793f 100644
--- a/doc/html/_sources/backends.txt
+++ b/doc/html/_sources/backends.txt
@@ -154,11 +154,11 @@ problems.
S3 compatible
=============
-S3QL is also able to access other, S3 compatible storage services for
-which no specific backend exists. Note that when accessing such
-services, only the lowest common denominator of available features can
-be used, so it is generally recommended to use a service specific
-backend instead.
+S3QL is also able to access other, S3 compatible storage services like
+OpenStack_ for which no specific backend exists. Note that when
+accessing such services, only the lowest common denominator of
+available features can be used, so it is generally recommended to use
+a service specific backend instead.
The storage URL for accessing an arbitrary S3 compatible storage
service is ::
@@ -173,6 +173,7 @@ to use HTTPS connections. Note, however, that at this point S3QL does
not verify the server certificate (cf. `issue 267
<http://code.google.com/p/s3ql/issues/detail?id=267>`_).
+.. _OpenStack: http://www.openstack.org/
Local
=====
diff --git a/doc/html/_sources/installation.txt b/doc/html/_sources/installation.txt
index 0cfa670..6056906 100644
--- a/doc/html/_sources/installation.txt
+++ b/doc/html/_sources/installation.txt
@@ -34,7 +34,7 @@ that is not the case.
possible*.
* `Python <http://www.python.org/>`_, version 2.6.6 or newer, but not
- Python 3.x.
+ Python 3.x. Make sure to also install the development headers.
* The `PyCrypto++ Python Module
<http://pypi.python.org/pypi/pycryptopp>`_. To check if this module
@@ -46,13 +46,19 @@ that is not the case.
argparse.__version__'`. If argparse is installed, this will print
the version number. You need version 1.1 or later.
+* `SQLite <http://www.sqlite.org/>`_ version 3.7.0 or newer. SQLite
+ has to be installed as a *shared library* with development headers.
+
* The `APSW Python Module <http://code.google.com/p/apsw/>`_. To check
which (if any) version of APWS is installed, run the command ::
- python -c 'import apsw; print apsw.apswversion(), apsw.sqlitelibversion()'
+ python -c 'import apsw; print apsw.apswversion()'
- If APSW is installed, this should print two version numbers which
- both have to be at least 3.7.0.
+ The printed version number should be at least 3.7.0. Note that APSW
+ must be linked *dynamically* against SQLite, so you can *not* use
+ the Ubuntu PPA at
+ https://launchpad.net/~ubuntu-rogerbinns/+archive/apsw (these
+ packages are statically linked).
* The `PyLibLZMA Python module
<http://pypi.python.org/pypi/pyliblzma>`_. To check if this module
@@ -64,9 +70,9 @@ that is not the case.
<http://code.google.com/p/python-llfuse/>`_. To check if this module
is installed, execute `python -c 'import llfuse; print
llfuse.__version__'`. This should print a version number. You need at
- least version 0.29.
+ least version 0.36.
- Note that earlier S3QL versions shipped with a builtin version of
+ Note that early S3QL versions shipped with a built-in version of
this module. If you are upgrading from such a version, make sure to
completely remove the old S3QL version first.
@@ -79,7 +85,8 @@ To install S3QL itself, proceed as follows:
1. Download S3QL from http://code.google.com/p/s3ql/downloads/list
2. Unpack it into a folder of your choice
-3. Run `python setup.py test` to run a self-test. If this fails, ask
+3. Run `python setup.py build` to build S3QL.
+4. Run `python setup.py test` to run a self-test. If this fails, ask
for help on the `mailing list
<http://groups.google.com/group/s3ql>`_ or report a bug in the
`issue tracker <http://code.google.com/p/s3ql/issues/list>`_.
diff --git a/doc/html/_sources/issues.txt b/doc/html/_sources/issues.txt
index ac2cb8c..e79b2e1 100644
--- a/doc/html/_sources/issues.txt
+++ b/doc/html/_sources/issues.txt
@@ -45,28 +45,6 @@ Known Issues
the `--noleaf` option to work correctly on S3QL file systems. This
bug has already been fixed in recent find versions.
-
-* In theory, S3QL is not fully compatible with NFS. Since S3QL does
- not support *inode generation numbers*, NFS clients may (once again,
- in theory) accidentally read or write the wrong file in the
- following situation:
-
- #. An S3QL file system is exported over NFS
- #. NFS client 1 opens a file A
- #. Another NFS client 2 (or the server itself) deletes file A (without
- client 1 knowing about this)
- #. A new file B is created by either of the clients or the server
- #. NFS client 1 tries to read or write file A (which has actually already been deleted).
-
- In this situation it is possible that NFS client 1 actually writes
- or reads the newly created file B instead. The chances of this are 1
- to (2^32 - *n*) where *n* is the total number of directory entries
- in the S3QL file system (as displayed by `s3qlstat`).
-
- Luckily enough, as long as you have less than about 2 thousand
- million directory entries (2^31), the chances for this are totally
- irrelevant and you don't have to worry about it.
-
* The `umount` and `fusermount -u` commands will *not* block until all
data has been uploaded to the backend. (this is a FUSE limitation
that will hopefully be removed in the future, see `issue 159
diff --git a/doc/html/about.html b/doc/html/about.html
index 646ebb1..bd2bf49 100644
--- a/doc/html/about.html
+++ b/doc/html/about.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>About S3QL &mdash; S3QL v1.6 documentation</title>
+ <title>About S3QL &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="#" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Installation" href="installation.html" />
<link rel="prev" title="S3QL User’s Guide" href="index.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="index.html" title="S3QL User’s Guide"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -182,7 +182,7 @@ will, although being inconvenient, not endanger any stored data.</p>
<li class="right" >
<a href="index.html" title="S3QL User’s Guide"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/adm.html b/doc/html/adm.html
index e9d19e6..f488b1a 100644
--- a/doc/html/adm.html
+++ b/doc/html/adm.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Managing Buckets &mdash; S3QL v1.6 documentation</title>
+ <title>Managing Buckets &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Mounting" href="mount.html" />
<link rel="prev" title="File System Creation" href="mkfs.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="mkfs.html" title="File System Creation"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -207,7 +207,7 @@ for help on the mailing list first (see <a class="reference internal" href="reso
<li class="right" >
<a href="mkfs.html" title="File System Creation"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/backends.html b/doc/html/backends.html
index 61fc8bb..5d55173 100644
--- a/doc/html/backends.html
+++ b/doc/html/backends.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Storage Backends &mdash; S3QL v1.6 documentation</title>
+ <title>Storage Backends &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="File System Creation" href="mkfs.html" />
<link rel="prev" title="General Information" href="general.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="general.html" title="General Information"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -210,11 +210,11 @@ problems.</p>
</div>
<div class="section" id="s3-compatible">
<h2>S3 compatible<a class="headerlink" href="#s3-compatible" title="Permalink to this headline">¶</a></h2>
-<p>S3QL is also able to access other, S3 compatible storage services for
-which no specific backend exists. Note that when accessing such
-services, only the lowest common denominator of available features can
-be used, so it is generally recommended to use a service specific
-backend instead.</p>
+<p>S3QL is also able to access other, S3 compatible storage services like
+<a class="reference external" href="http://www.openstack.org/">OpenStack</a> for which no specific backend exists. Note that when
+accessing such services, only the lowest common denominator of
+available features can be used, so it is generally recommended to use
+a service specific backend instead.</p>
<p>The storage URL for accessing an arbitrary S3 compatible storage
service is</p>
<div class="highlight-commandline"><div class="highlight"><pre><span class="l">s3c://</span><span class="nv">&lt;hostname&gt;</span><span class="l">:</span><span class="nv">&lt;port&gt;</span><span class="l">/</span><span class="nv">&lt;bucketname&gt;</span><span class="l">/</span><span class="nv">&lt;prefix&gt;</span><span class="l"></span>
@@ -270,7 +270,7 @@ with <a class="reference external" href="http://fuse.sourceforge.net/sshfs.html"
<li class="right" >
<a href="general.html" title="General Information"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/contrib.html b/doc/html/contrib.html
index 3ea1cd5..2ba8abe 100644
--- a/doc/html/contrib.html
+++ b/doc/html/contrib.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Contributed Programs &mdash; S3QL v1.6 documentation</title>
+ <title>Contributed Programs &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Tips &amp; Tricks" href="tips.html" />
<link rel="prev" title="Checking for Errors" href="fsck.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="fsck.html" title="Checking for Errors"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -226,7 +226,7 @@ properly unmounts it when the system is shut down.</p>
<li class="right" >
<a href="fsck.html" title="Checking for Errors"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/fsck.html b/doc/html/fsck.html
index fdd8837..a1c4142 100644
--- a/doc/html/fsck.html
+++ b/doc/html/fsck.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Checking for Errors &mdash; S3QL v1.6 documentation</title>
+ <title>Checking for Errors &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Contributed Programs" href="contrib.html" />
<link rel="prev" title="Unmounting" href="umount.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="umount.html" title="Unmounting"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -130,10 +130,6 @@ specified multiple times.</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--force</span></kbd></td>
<td>Force checking even if file system is marked clean.</td></tr>
-<tr><td class="option-group" colspan="2">
-<kbd><span class="option">--renumber-inodes</span></kbd></td>
-</tr>
-<tr><td>&nbsp;</td><td>Renumber inodes to be stricly sequential starting from 3</td></tr>
</tbody>
</table>
</div></blockquote>
@@ -154,7 +150,7 @@ specified multiple times.</td></tr>
<li class="right" >
<a href="umount.html" title="Unmounting"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/general.html b/doc/html/general.html
index a551de0..ed2aa35 100644
--- a/doc/html/general.html
+++ b/doc/html/general.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>General Information &mdash; S3QL v1.6 documentation</title>
+ <title>General Information &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Storage Backends" href="backends.html" />
<link rel="prev" title="Installation" href="installation.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="installation.html" title="Installation"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -237,7 +237,7 @@ will ever be a concern.</p>
<li class="right" >
<a href="installation.html" title="Installation"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/index.html b/doc/html/index.html
index f6c928f..35fcf23 100644
--- a/doc/html/index.html
+++ b/doc/html/index.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>S3QL User’s Guide &mdash; S3QL v1.6 documentation</title>
+ <title>S3QL User’s Guide &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="#" />
+ <link rel="top" title="S3QL v1.7 documentation" href="#" />
<link rel="next" title="About S3QL" href="about.html" />
</head>
<body>
@@ -33,7 +33,7 @@
<li class="right" style="margin-right: 10px">
<a href="about.html" title="About S3QL"
accesskey="N">next</a></li>
- <li><a href="#">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="#">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -180,7 +180,7 @@
<li class="right" style="margin-right: 10px">
<a href="about.html" title="About S3QL"
>next</a></li>
- <li><a href="#">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="#">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/installation.html b/doc/html/installation.html
index 47b2b40..7f8e3d6 100644
--- a/doc/html/installation.html
+++ b/doc/html/installation.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Installation &mdash; S3QL v1.6 documentation</title>
+ <title>Installation &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="General Information" href="general.html" />
<link rel="prev" title="About S3QL" href="about.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="about.html" title="About S3QL"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -112,7 +112,7 @@ so under Linux you should actually use <em>2.6.26 or newer whenever
possible</em>.</p>
</li>
<li><p class="first"><a class="reference external" href="http://www.python.org/">Python</a>, version 2.6.6 or newer, but not
-Python 3.x.</p>
+Python 3.x. Make sure to also install the development headers.</p>
</li>
<li><p class="first">The <a class="reference external" href="http://pypi.python.org/pypi/pycryptopp">PyCrypto++ Python Module</a>. To check if this module
is installed, try to execute <tt class="docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span> <span class="pre">pycryptopp'</span></tt>.</p>
@@ -122,13 +122,19 @@ installed, try to execute <tt class="docutils literal"><span class="pre">python<
<span class="pre">argparse.__version__'</span></tt>. If argparse is installed, this will print
the version number. You need version 1.1 or later.</p>
</li>
+<li><p class="first"><a class="reference external" href="http://www.sqlite.org/">SQLite</a> version 3.7.0 or newer. SQLite
+has to be installed as a <em>shared library</em> with development headers.</p>
+</li>
<li><p class="first">The <a class="reference external" href="http://code.google.com/p/apsw/">APSW Python Module</a>. To check
which (if any) version of APWS is installed, run the command</p>
-<div class="highlight-commandline"><div class="highlight"><pre><span class="l">python -c &#39;import apsw; print apsw.apswversion(), apsw.sqlitelibversion()&#39;</span>
+<div class="highlight-commandline"><div class="highlight"><pre><span class="l">python -c &#39;import apsw; print apsw.apswversion()&#39;</span>
</pre></div>
</div>
-<p>If APSW is installed, this should print two version numbers which
-both have to be at least 3.7.0.</p>
+<p>The printed version number should be at least 3.7.0. Note that APSW
+must be linked <em>dynamically</em> against SQLite, so you can <em>not</em> use
+the Ubuntu PPA at
+<a class="reference external" href="https://launchpad.net/~ubuntu-rogerbinns/+archive/apsw">https://launchpad.net/~ubuntu-rogerbinns/+archive/apsw</a> (these
+packages are statically linked).</p>
</li>
<li><p class="first">The <a class="reference external" href="http://pypi.python.org/pypi/pyliblzma">PyLibLZMA Python module</a>. To check if this module
is installed, execute <tt class="docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span> <span class="pre">lzma;</span> <span class="pre">print</span>
@@ -138,8 +144,8 @@ least version 0.5.3.</p>
<li><p class="first">The <a class="reference external" href="http://code.google.com/p/python-llfuse/">Python LLFUSE module</a>. To check if this module
is installed, execute <tt class="docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">'import</span> <span class="pre">llfuse;</span> <span class="pre">print</span>
<span class="pre">llfuse.__version__'</span></tt>. This should print a version number. You need at
-least version 0.29.</p>
-<p>Note that earlier S3QL versions shipped with a builtin version of
+least version 0.36.</p>
+<p>Note that early S3QL versions shipped with a built-in version of
this module. If you are upgrading from such a version, make sure to
completely remove the old S3QL version first.</p>
</li>
@@ -151,6 +157,7 @@ completely remove the old S3QL version first.</p>
<ol class="arabic simple">
<li>Download S3QL from <a class="reference external" href="http://code.google.com/p/s3ql/downloads/list">http://code.google.com/p/s3ql/downloads/list</a></li>
<li>Unpack it into a folder of your choice</li>
+<li>Run <tt class="docutils literal"><span class="pre">python</span> <span class="pre">setup.py</span> <span class="pre">build</span></tt> to build S3QL.</li>
<li>Run <tt class="docutils literal"><span class="pre">python</span> <span class="pre">setup.py</span> <span class="pre">test</span></tt> to run a self-test. If this fails, ask
for help on the <a class="reference external" href="http://groups.google.com/group/s3ql">mailing list</a> or report a bug in the
<a class="reference external" href="http://code.google.com/p/s3ql/issues/list">issue tracker</a>.</li>
@@ -182,7 +189,7 @@ have to run <tt class="docutils literal"><span class="pre">sudo</span> <span cla
<li class="right" >
<a href="about.html" title="About S3QL"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/issues.html b/doc/html/issues.html
index 91333ee..e2ac0ae 100644
--- a/doc/html/issues.html
+++ b/doc/html/issues.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Known Issues &mdash; S3QL v1.6 documentation</title>
+ <title>Known Issues &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Manpages" href="man/index.html" />
<link rel="prev" title="Tips &amp; Tricks" href="tips.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="tips.html" title="Tips &amp; Tricks"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -121,26 +121,6 @@ being followed, then this is a bug in the application.</p>
the <tt class="docutils literal"><span class="pre">--noleaf</span></tt> option to work correctly on S3QL file systems. This
bug has already been fixed in recent find versions.</p>
</li>
-<li><p class="first">In theory, S3QL is not fully compatible with NFS. Since S3QL does
-not support <em>inode generation numbers</em>, NFS clients may (once again,
-in theory) accidentally read or write the wrong file in the
-following situation:</p>
-<ol class="arabic simple">
-<li>An S3QL file system is exported over NFS</li>
-<li>NFS client 1 opens a file A</li>
-<li>Another NFS client 2 (or the server itself) deletes file A (without
-client 1 knowing about this)</li>
-<li>A new file B is created by either of the clients or the server</li>
-<li>NFS client 1 tries to read or write file A (which has actually already been deleted).</li>
-</ol>
-<p>In this situation it is possible that NFS client 1 actually writes
-or reads the newly created file B instead. The chances of this are 1
-to (2^32 - <em>n</em>) where <em>n</em> is the total number of directory entries
-in the S3QL file system (as displayed by <tt class="docutils literal"><span class="pre">s3qlstat</span></tt>).</p>
-<p>Luckily enough, as long as you have less than about 2 thousand
-million directory entries (2^31), the chances for this are totally
-irrelevant and you don&#8217;t have to worry about it.</p>
-</li>
<li><p class="first">The <tt class="docutils literal"><span class="pre">umount</span></tt> and <tt class="docutils literal"><span class="pre">fusermount</span> <span class="pre">-u</span></tt> commands will <em>not</em> block until all
data has been uploaded to the backend. (this is a FUSE limitation
that will hopefully be removed in the future, see <a class="reference external" href="http://code.google.com/p/s3ql/issues/detail?id=159">issue 159</a>). If you use
@@ -182,7 +162,7 @@ the backend.</p>
<li class="right" >
<a href="tips.html" title="Tips &amp; Tricks"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/man/adm.html b/doc/html/man/adm.html
index ad97564..c0d893d 100644
--- a/doc/html/man/adm.html
+++ b/doc/html/man/adm.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The s3qladm command &mdash; S3QL v1.6 documentation</title>
+ <title>The s3qladm command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The mount.s3ql command" href="mount.html" />
<link rel="prev" title="The mkfs.s3ql command" href="mkfs.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="mkfs.html" title="The mkfs.s3ql command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -201,7 +201,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="mkfs.html" title="The mkfs.s3ql command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/cp.html b/doc/html/man/cp.html
index 2a243e7..053e439 100644
--- a/doc/html/man/cp.html
+++ b/doc/html/man/cp.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The s3qlcp command &mdash; S3QL v1.6 documentation</title>
+ <title>The s3qlcp command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The s3qlrm command" href="rm.html" />
<link rel="prev" title="The s3qlctrl command" href="ctrl.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="ctrl.html" title="The s3qlctrl command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -207,7 +207,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="ctrl.html" title="The s3qlctrl command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/ctrl.html b/doc/html/man/ctrl.html
index 98a1477..2f27d56 100644
--- a/doc/html/man/ctrl.html
+++ b/doc/html/man/ctrl.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The s3qlctrl command &mdash; S3QL v1.6 documentation</title>
+ <title>The s3qlctrl command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The s3qlcp command" href="cp.html" />
<link rel="prev" title="The s3qlstat command" href="stat.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="stat.html" title="The s3qlstat command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -198,7 +198,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="stat.html" title="The s3qlstat command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/expire_backups.html b/doc/html/man/expire_backups.html
index 2c93e3d..eda45a1 100644
--- a/doc/html/man/expire_backups.html
+++ b/doc/html/man/expire_backups.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The expire_backups command &mdash; S3QL v1.6 documentation</title>
+ <title>The expire_backups command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="Further Resources / Getting Help" href="../resources.html" />
<link rel="prev" title="The pcp command" href="pcp.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="pcp.html" title="The pcp command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -228,7 +228,7 @@ error occured.</p>
<li class="right" >
<a href="pcp.html" title="The pcp command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/fsck.html b/doc/html/man/fsck.html
index ab675fb..ce8932c 100644
--- a/doc/html/man/fsck.html
+++ b/doc/html/man/fsck.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The fsck.s3ql command &mdash; S3QL v1.6 documentation</title>
+ <title>The fsck.s3ql command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The pcp command" href="pcp.html" />
<link rel="prev" title="The umount.s3ql command" href="umount.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="umount.html" title="The umount.s3ql command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -156,10 +156,6 @@ specified multiple times.</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--force</span></kbd></td>
<td>Force checking even if file system is marked clean.</td></tr>
-<tr><td class="option-group" colspan="2">
-<kbd><span class="option">--renumber-inodes</span></kbd></td>
-</tr>
-<tr><td>&nbsp;</td><td>Renumber inodes to be stricly sequential starting from 3</td></tr>
</tbody>
</table>
</div></blockquote>
@@ -193,7 +189,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="umount.html" title="The umount.s3ql command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/index.html b/doc/html/man/index.html
index 88e59ba..8a3cf42 100644
--- a/doc/html/man/index.html
+++ b/doc/html/man/index.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Manpages &mdash; S3QL v1.6 documentation</title>
+ <title>Manpages &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="next" title="The mkfs.s3ql command" href="mkfs.html" />
<link rel="prev" title="Known Issues" href="../issues.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="../issues.html" title="Known Issues"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -135,7 +135,7 @@ here in the User&#8217;s Guide.</p>
<li class="right" >
<a href="../issues.html" title="Known Issues"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/man/lock.html b/doc/html/man/lock.html
index fa5e34c..f7619fc 100644
--- a/doc/html/man/lock.html
+++ b/doc/html/man/lock.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The s3qllock command &mdash; S3QL v1.6 documentation</title>
+ <title>The s3qllock command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The umount.s3ql command" href="umount.html" />
<link rel="prev" title="The s3qlrm command" href="rm.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="rm.html" title="The s3qlrm command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -198,7 +198,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="rm.html" title="The s3qlrm command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/mkfs.html b/doc/html/man/mkfs.html
index d461de2..f127fe9 100644
--- a/doc/html/man/mkfs.html
+++ b/doc/html/man/mkfs.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The mkfs.s3ql command &mdash; S3QL v1.6 documentation</title>
+ <title>The mkfs.s3ql command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The s3qladm command" href="adm.html" />
<link rel="prev" title="Manpages" href="index.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="index.html" title="Manpages"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -193,7 +193,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="index.html" title="Manpages"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/mount.html b/doc/html/man/mount.html
index 6e25971..0b8fa8d 100644
--- a/doc/html/man/mount.html
+++ b/doc/html/man/mount.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The mount.s3ql command &mdash; S3QL v1.6 documentation</title>
+ <title>The mount.s3ql command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The s3qlstat command" href="stat.html" />
<link rel="prev" title="The s3qladm command" href="adm.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="adm.html" title="The s3qladm command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -247,7 +247,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="adm.html" title="The s3qladm command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/pcp.html b/doc/html/man/pcp.html
index 4d8e0d8..b40dddc 100644
--- a/doc/html/man/pcp.html
+++ b/doc/html/man/pcp.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The pcp command &mdash; S3QL v1.6 documentation</title>
+ <title>The pcp command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The expire_backups command" href="expire_backups.html" />
<link rel="prev" title="The fsck.s3ql command" href="fsck.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="fsck.html" title="The fsck.s3ql command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -171,7 +171,7 @@ error occured.</p>
<li class="right" >
<a href="fsck.html" title="The fsck.s3ql command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/rm.html b/doc/html/man/rm.html
index 1498e40..72c4fd9 100644
--- a/doc/html/man/rm.html
+++ b/doc/html/man/rm.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The s3qlrm command &mdash; S3QL v1.6 documentation</title>
+ <title>The s3qlrm command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The s3qllock command" href="lock.html" />
<link rel="prev" title="The s3qlcp command" href="cp.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="cp.html" title="The s3qlcp command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -171,7 +171,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="cp.html" title="The s3qlcp command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/stat.html b/doc/html/man/stat.html
index 0648c64..4a6ce8f 100644
--- a/doc/html/man/stat.html
+++ b/doc/html/man/stat.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The s3qlstat command &mdash; S3QL v1.6 documentation</title>
+ <title>The s3qlstat command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The s3qlctrl command" href="ctrl.html" />
<link rel="prev" title="The mount.s3ql command" href="mount.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="mount.html" title="The mount.s3ql command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -166,7 +166,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="mount.html" title="The mount.s3ql command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/man/umount.html b/doc/html/man/umount.html
index 7d69961..3021f6c 100644
--- a/doc/html/man/umount.html
+++ b/doc/html/man/umount.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>The umount.s3ql command &mdash; S3QL v1.6 documentation</title>
+ <title>The umount.s3ql command &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="../_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="../index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="../index.html" />
<link rel="up" title="Manpages" href="index.html" />
<link rel="next" title="The fsck.s3ql command" href="fsck.html" />
<link rel="prev" title="The s3qllock command" href="lock.html" />
@@ -38,7 +38,7 @@
<li class="right" >
<a href="lock.html" title="The s3qllock command"
accesskey="P">previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" accesskey="U">Manpages</a> &raquo;</li>
</ul>
</div>
@@ -176,7 +176,7 @@ system, conventional locations are <tt class="file docutils literal"><span class
<li class="right" >
<a href="lock.html" title="The s3qllock command"
>previous</a> |</li>
- <li><a href="../index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="../index.html">S3QL v1.7 documentation</a> &raquo;</li>
<li><a href="index.html" >Manpages</a> &raquo;</li>
</ul>
</div>
diff --git a/doc/html/mkfs.html b/doc/html/mkfs.html
index d262fd2..509e98e 100644
--- a/doc/html/mkfs.html
+++ b/doc/html/mkfs.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>File System Creation &mdash; S3QL v1.6 documentation</title>
+ <title>File System Creation &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Managing Buckets" href="adm.html" />
<link rel="prev" title="Storage Backends" href="backends.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="backends.html" title="Storage Backends"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -154,7 +154,7 @@ option to prevent accidental creation of an encrypted bucket.</p>
<li class="right" >
<a href="backends.html" title="Storage Backends"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/mount.html b/doc/html/mount.html
index f650aa6..4c835dc 100644
--- a/doc/html/mount.html
+++ b/doc/html/mount.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Mounting &mdash; S3QL v1.6 documentation</title>
+ <title>Mounting &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Advanced S3QL Features" href="special.html" />
<link rel="prev" title="Managing Buckets" href="adm.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="adm.html" title="Managing Buckets"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -375,7 +375,7 @@ mounted.</li>
<li class="right" >
<a href="adm.html" title="Managing Buckets"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/objects.inv b/doc/html/objects.inv
index 3159050..7e0717f 100644
--- a/doc/html/objects.inv
+++ b/doc/html/objects.inv
@@ -1,6 +1,6 @@
# Sphinx inventory version 2
# Project: S3QL
-# Version: 1.6
+# Version: 1.7
# The remainder of this file is compressed using zlib.
xÚ}RMÕ0 ¼çWD‚k¡½q$Ø•@À‰c•¦Þ×hó…í<xÿ·i«×z‹Æ3ã±c›ò¥Ë€ ƒ‰4ñðÆ›¼n^kv™nFþ…^sÍ»ÓÙÅ“žJúª¤\$nèö§ß›M°ñÞ°K±š¾Ô÷›lÚÛoŸÔ ¢‹üÞ+W´ªD$oÕûÜå_q¯ÔÊi„Ór©ËÛÂ#DvvŽ!Vsêé­BþÑ[Æ^ KûÏi(–Ùæ=ߦÈèúu@©ßä‹š¶á“}Ús)ƒukVYF…M/Þ߀A;>“ÌØ’¤­„¯æsËè;<=€~Ÿâ£;¬ƒ“ìF,ºÞȶâ@{‹­ ·Ù¸mô»PDc'w“ÿwIRoæzÛÞ­ªš>fo£É4&æéMdù››è¸ÎáÐáƒ!Ö` ’;ƒ¼B:?+å
ùPûjãVˆŽä~H-+è¼3½óŽ/G§XÙÍ5ûK\ a6X!P*háÙGlð6OA9fíʵ弟ե=`ô \ No newline at end of file
diff --git a/doc/html/resources.html b/doc/html/resources.html
index fbd8899..e674dc0 100644
--- a/doc/html/resources.html
+++ b/doc/html/resources.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Further Resources / Getting Help &mdash; S3QL v1.6 documentation</title>
+ <title>Further Resources / Getting Help &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="prev" title="The expire_backups command" href="man/expire_backups.html" />
</head>
<body>
@@ -33,7 +33,7 @@
<li class="right" style="margin-right: 10px">
<a href="man/expire_backups.html" title="The expire_backups command"
accesskey="P">previous</a></li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -105,7 +105,7 @@ can subscribe by sending a mail to
<li class="right" style="margin-right: 10px">
<a href="man/expire_backups.html" title="The expire_backups command"
>previous</a></li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/search.html b/doc/html/search.html
index 635ab9c..99f8f4d 100644
--- a/doc/html/search.html
+++ b/doc/html/search.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Search &mdash; S3QL v1.6 documentation</title>
+ <title>Search &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -24,7 +24,7 @@
<script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/searchtools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<script type="text/javascript">
jQuery(function() { Search.loadIndex("searchindex.js"); });
</script>
@@ -35,7 +35,7 @@
<div class="related">
<h3>Navigation</h3>
<ul>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -100,7 +100,7 @@
<div class="related">
<h3>Navigation</h3>
<ul>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/searchindex.js b/doc/html/searchindex.js
index e3bc6da..6991227 100644
--- a/doc/html/searchindex.js
+++ b/doc/html/searchindex.js
@@ -1 +1 @@
-Search.setIndex({objects:{},terms:{suffici:1,all:[0,1,2,3,5,7,8,11,12,13,14,15,17,19,20,21,22,23,24,25,26,27],code:[8,17,0,22,11,23,10,25,21,3,4,26,6,20],partial:12,global:[],mnt:[5,14,27],month:[5,25],prefix:[12,1],stumbl:7,notquitesecret:1,follow:[0,1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17,19,20,22,23,24,25,26,27],disk:[14,21],whose:12,decid:[14,1],middl:7,depend:[8,9,22,23,1,25,3,5,14,20,27],million:7,ulimit:[27,20],readabl:21,send:16,rel:[12,4],init:[27,7],program:[0,2,3,4,5,6,7,8,9,10,11,13,15,17,19,20,22,23,24,25,26,27],those:12,under:[12,22],aris:12,sha256:21,neglig:[],worth:7,introduc:[17,13],sourc:[17,21,22,4,5,13],everi:[17,0,27,1,25,12,5,14,13],risk:[],mkf:[8,2,23,18,9],far:14,faq:16,account:[20,12,27],util:[14,19],pycryptopp:22,failur:1,veri:[21,12,7],affect:[17,27,7,13,1],tri:[15,7],administr:[0,13],level:[26,13],did:21,who:[11,27,20,24],list:[21,22,25,26,12,13,5,15,16],upload:[21,11,27,24,26,12,13,5,14,20,7],"try":[5,25,12,22,1],larg:[17,14,7,13],stderr:27,small:[14,7],blocksiz:[2,23,20,27],mount:[17,9,0,20,18,11,27,10,24,1,26,12,7,3,5,6,14,15,13],dir:[17,27],pleas:[21,16],upper:12,smaller:[14,21,7],slower:[17,27,13],ifac:27,ten:12,whitespac:[],compens:1,sync:4,sign:12,consequ:1,hors:[0,13],design:[0,21,13,1],pass:[27,4],download:[15,22,3],further:[9,15,16],correspond:12,port:12,rath:27,even:[8,0,11,24,1,12,19,13],what:[5,25,15,26,1],bzip2:[21,20,27],sub:7,entir:[21,0,10,13],descriptor:[27,20],section:[12,1],abl:[0,11,24,1,12,13,16],weren:16,asia:[],find:[0,12,27,7,13],access:[21,20,27,1,12,15,7],delet:[9,0,10,25,12,7,3,5,15,13],version:[8,17,0,20,22,11,23,10,24,19,25,2,12,21,3,4,26,6,15,27,7],suspect:19,consecut:12,"new":[8,0,20,23,1,25,26,12,7,5,15,27,13],net:27,ever:1,"public":[],contrast:[0,13],metadata:[9,20,27,3,26,14,15,13],elimin:12,full:[8,17,0,11,23,10,26,21,3,5,6,20,27,13],themselv:1,absolut:[5,25,12],pacif:[],gener:[9,0,22,1,25,12,7,5,15,13],never:[],privat:[],here:[26,12,18],satisfi:22,explicitli:7,modif:7,address:7,path:[8,20,22,23,19,2,12,3,15,27],becom:[5,25,14,12,1],modifi:[17,21,13],sinc:[27,1,25,12,5,7],valu:[27,20,7,1],wait:[11,14,27,24,7],dry:25,convert:12,joes_bucket:[],checksum:21,larger:27,step:5,amount:[26,14,12,21],throughput:[],action:[26,15,13,3],implement:1,magnitud:[17,13],chanc:7,control:13,fstab:[27,7],appli:[14,12,1],modul:[8,20,1,23,19,2,3,26,22,15,27],apw:22,filenam:14,unix:[14,21,7],visibl:12,home:[5,14,12],instal:[8,9,0,20,18,22,17,11,23,10,26,3,5,6,15],total:[27,7,13],establish:1,from:[8,17,0,20,22,19,23,21,1,25,2,12,3,4,5,14,15,27,13],describ:[12,1],would:[17,1,25,12,5,13],apswvers:22,upgrad:[9,15,22,3],few:[5,12,22],concret:1,call:[17,0,27,10,1,25,26,12,5,6,14,20,13],usr:[8,17,0,11,23,10,26,3,5,6,20],recommend:[5,27,12,7,1],taken:[17,21,20,27,13],tape:[0,13],bill:1,type:[],until:[11,27,7,13,24],more:[21,22,1,13,4,26,7],sort:[5,25],desir:26,st_nlink:7,src:[17,13],peopl:[],hundr:[],relat:12,benchmark:[5,9,27],"19283712_yourname_s3ql":[],notic:[14,12],enhanc:5,warn:[26,10,15,13,1],sqlitelibvers:22,sens:14,known:[9,21,7],rare:[],hold:[12,1],unpack:22,cach:[8,9,21,20,23,19,2,13,3,26,15,27,7],must:[17,15,12,13,3],worst:[0,13],none:[8,20,1,27,19,3,15],word:[],sometim:15,restor:[17,9,14,15,13],dest:17,setup:22,work:[21,22,24,25,5,14,7],uniqu:1,conceptu:21,remain:27,wors:12,obvious:27,can:[0,1,2,3,5,6,7,8,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27],mktemp:27,about:[9,21,27,13,6,14,7],root:[17,0,11,27,10,24,26,6,20,13],fetch:[17,13],overrid:12,prompt:[8,19],tar:14,give:[5,25,14,15],process:[11,27,24,12,4,5,14,20,7],lock:[0,13],sudo:22,share:[8,17,0,11,23,10,26,3,5,6,20],accept:[8,17,0,20,19,11,23,10,24,1,25,2,3,4,26,6,15,27],high:[21,20,27,4],critic:21,minimum:[14,21],want:[11,27,24,25,5,14,16],onlin:[0,12,21,13],unfortun:[14,12,7],occur:[8,17,0,11,23,10,1,25,3,4,26,6,20],ratio:27,alwai:[17,27,25,13,5,7],end:[27,15],turn:[5,25,27,7],rather:[25,12,13,5,14,7],anoth:[7,1],ordinari:[17,11,24,13],write:[8,9,21,20,22,17,27,19,12,7,3,5,14,15,13],how:[5,25,14],manpag:[9,18],env:27,webpag:12,verifi:[12,7],simpl:[5,25,13],updat:[5,21,12,7],product:[],resourc:[9,15,16],max:[27,20],earlier:[22,7,1],pyliblzma:22,badli:[15,3],wrong:[15,7,3],endang:21,mai:[17,0,20,22,27,16,21,24,1,26,12,7,3,5,14,15,13],multipl:[8,15,23,19,2,3,20,27],redund:[5,25,21,12],secondari:[],data:[8,17,21,20,19,11,23,27,24,1,2,12,7,3,5,14,15,26,13],grow:[21,27],physic:[5,17,0,15,13],man:[18,7],indistinguish:21,handl:[17,21,27,25,12,13,5,14,7],"short":[0,12,13],attempt:[8,27,7,1],practic:[14,12],third:14,neitheristhi:1,author:[27,7],favorit:[],apsw:22,element:27,issu:[17,9,0,22,27,16,10,21,12,7,26,6,13],inform:[9,27,1,25,26,20,13],maintain:27,combin:[14,12,27,1],allow:[17,0,20,27,10,25,26,12,4,5,6,14,15,13],enter:[2,23],exclus:14,volum:27,order:[17,13,1],talk:12,oper:[8,17,0,20,11,23,10,1,25,21,3,4,26,6,15,27],help:[9,22,16,26,13,3,5,15,7],over:[17,21,27,12,13,14,20,7],move:[21,15,1],soon:[],topsecret:[],increas:[14,20,27],appar:12,effici:7,still:[17,21,11,27,24,1,12,13],dynam:21,paramet:1,overwrit:[2,23],fix:[0,25,12,13,5,7],inadvert:[17,13],better:[27,22,4],window:12,html:[],restart:[27,7],persist:1,mail:[21,15,22,16],main:[10,13],might:[17,0,27,16,10,19,13,26,6,7],documents_januari:[17,13],them:[17,11,24,1,12,15,13],good:[0,25,13,5,14,7],synopsi:[8,17,0,11,23,10,25,3,4,26,6,20],thei:[17,0,18,21,25,12,5,14,13],python:[27,22,1],promin:7,safe:[17,14,13],fuse4bsd:22,dai:[5,25],initi:[],dat:[5,25],terminolog:[9,1],therebi:12,instead:[17,11,24,1,12,13,7],interrupt:15,potenti:12,now:[0,22,25,12,5,13],bigger:[],choic:22,term:[5,25,1],"__version__":22,somewher:[8,17,0,11,23,10,3,26,6,20,13],name:[17,23,1,25,2,12,5,14,20,27,13],joesdomain:[],authent:[8,9,20,19,23,1,2,12,3,15,27],achiev:[5,17,21,27,13],mode:[27,20],each:[5,25,27,20,1],debug:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],found:[14,1],beneath:13,confid:[],side:1,mean:[21,12,7],compil:27,s3ql:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27],due:[12,7],mkfifo:27,chunk:7,hard:[14,21],idea:[5,25,0,7,13],procedur:[17,13],realli:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,14,15,27,7],contrib:[5,14,27],meta:[26,14,13],significantli:[5,22,1],year:[5,25,21,12],s3_copi:[5,9],happen:[0,12,13,1],todo:[],special:[17,13],out:[21,12,7],lzma:[21,20,22,27],ftp:[],shown:12,network:[17,14,27,21,13],space:[17,21,7,13],open:[11,27,24,7],newli:7,log_fifo:27,content:[17,21,27,0,1,3,13],suitabl:[21,22],rational:0,internet:[21,27],print:[8,17,0,20,22,11,23,10,24,19,25,2,3,4,26,6,15,27],eth0:27,bla:27,correct:[5,25],common:[17,0,12,13],foreground:[27,20],shut:[5,11,24,7],after:[17,0,21,1,12,14,15,13],insid:17,advanc:[9,13],migrat:5,documents_janurai:[],manipul:21,situat:[14,7],given:7,free:[21,12],standard:[5,25,21,12,27],inod:[8,7,19],reason:[10,27,24,1,25,12,13,5,14,19,7],base:27,theori:[12,7],usual:27,ask:[22,23,2,12,15,7],org:27,"byte":[],likelihood:12,afterward:14,bash:14,care:[17,21,7,13],her:[0,13],thread:[5,27,20],befor:[17,21,11,27,24,1,12,13,7],could:[17,13],success:[],refus:[5,25],keep:[0,27,1,25,5,13],recov:[0,13],thing:[15,3],length:12,rais:[27,20],place:[27,12],perman:[9,0,27,12,14,13],pycrypto:22,principl:27,confus:7,neglect:1,first:[21,22,27,1,12,14,15,7],origin:[],softwar:27,rang:[5,25],becaus:[17,21,27,1,12,5,13],directli:12,malici:[0,13],carri:21,onc:[0,11,27,21,24,12,13,14,20,7],clariti:[],s3q:[],number:[21,22,27,25,13,4,5,14,20,7],capac:21,restrict:[26,27,20],date:[5,25],instruct:[27,22],alreadi:[17,22,7,13],done:[27,12],wrapper:[5,4],llfuse:22,stabl:21,miss:[25,1],s3c:12,size:[21,23,2,13,26,14,20,27,7],differ:[17,21,22,27,1,26,3,5,14,15,13],convent:[8,17,0,11,23,10,3,26,6,20,7],script:[5,14,27,7],profil:[27,20],unknown:21,interact:3,s3qllock:[9,0,18,10,5,14,13],system:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27],least:[22,27,25,12,5,20],parallel:[9,27,4,5,14,20],checkpoint:1,attack:[0,7,13],necessarili:12,demonstr:5,s3qlrm:[9,0,18,10,25,5,13],termin:[11,24,7,1],lost:[12,1],john:12,"final":13,store:[8,9,21,20,19,23,1,25,2,12,3,5,14,15,27,7],low:[21,12],servi:7,luckili:7,consol:12,option:[0,2,3,4,5,6,7,8,10,11,13,14,15,17,19,20,22,23,24,25,26,27],especi:[14,21],shelf:[0,13],tool:12,copi:[17,9,21,1,13,4,5,14,15,7],specifi:[8,20,19,23,27,1,25,2,12,3,5,15,26,7],arbitrari:[14,12],part:[21,27,1,25,4,5],pars:1,mostli:[],consult:[8,23,20,3],exactli:[5,17,25,13],than:[10,27,1,25,13,5,14,7],wide:[27,22],target:[8,17,20,27,19,3,14,15,13],cachedir:[8,15,23,19,2,3,20,27],whenev:[14,22,27,1],prevent:[2,0,23,13],remov:[17,9,0,22,27,10,25,26,7,5,6,13],eleg:21,tree:[17,9,0,10,4,5,21,13],second:[27,1,25,12,5,14,20],structur:[21,15],exampl:[21,1,25,13,5,14,7],matter:[26,14,15],temporarili:27,friend:14,video:27,minut:[5,25],led:1,pre:27,unencrypt:[2,23],sai:[17,13],comput:[5,25,21,27],entail:[],januari:[17,13],plaintext:[],explicit:14,ram:27,mind:1,argument:[26,15,3],peculiar:12,"13th":21,packag:[5,22],expir:27,increment:[],disadvantag:[17,13],need:[17,20,22,27,1,25,12,5,15,13],seem:14,exclud:[0,13],paramiko:[],caus:[12,1],equival:[5,25],irrepar:15,destroi:[0,13],moreov:13,blindli:7,atim:7,accuraci:[5,25],note:[17,9,21,22,11,27,24,25,12,7,4,5,14,15,13],also:[8,17,0,18,11,23,10,24,1,25,26,12,21,3,4,5,6,14,20,27,13],builtin:22,denomin:12,take:[17,21,13,3,26,14,15,7],which:[17,10,22,21,1,25,12,13,5,7],discrep:[],transmit:[],environ:22,uplink:5,singl:[14,20,27],swordfish:1,compat:[9,12,7],begin:1,sure:[20,22,27,12,14,15,7],unless:[2,0,23,13],distribut:[5,27,22],plenti:[],normal:[27,20,1],buffer:7,previou:[5,15,12],compress:[9,21,27,5,20,13],most:[8,17,20,19,27,1,25,12,7,3,5,15,13],beta:21,said:[],rigor:[5,25],plan:[],stricli:[8,19],choos:[],homepag:[8,17,0,11,23,10,3,26,6,20],"class":[8,17,0,11,23,10,25,26,3,4,5,6,20],independ:[21,20,27],simplic:21,renam:21,correctli:[7,19],ship:[25,27,22,4],url:[8,20,19,23,1,2,12,3,15,27],doc:[8,17,0,11,23,10,26,3,5,6,20],clear:[17,15,13],later:[17,0,22,1,12,13,7],cover:14,drive:[0,13],destruct:[0,13],doe:[21,27,24,1,12,7],declar:21,snapshot:[17,9,21,26,13],runtim:[9,13],determin:[5,27,1],sourceforg:[],occasion:12,region:[5,12],hmac:21,gracefulli:[5,25],myawssecretaccesskei:[],theoret:14,show:25,carefulli:1,random:[],syntax:[27,19,2,26,15,13],connect:[14,12,27,21],permiss:[5,21,20,27],bucketnam:12,newest:[14,3],anywai:[],rotat:[8,15,27,19,3,20],redirect:27,current:[5,15,12,25,7],onli:[17,25,0,20,22,11,27,10,24,21,26,12,7,4,5,6,14,15,13],slow:7,locat:[8,17,0,11,23,10,1,3,26,6,20,13],execut:[5,15,22,13],transact:21,configur:[9,27,13],activ:[8,17,0,20,19,11,23,10,24,1,25,2,12,3,4,26,6,15,27,13],state:[5,25,21,7],haven:[],authinfo:[],latenc:[14,21,4],suppos:12,rich:12,factor:[5,12],folder:[17,22,13],local:[8,9,0,22,17,11,23,10,12,21,3,26,6,14,20,27],defin:[5,25,27,1],contribut:[5,9],variou:[26,15,3],get:[8,9,0,20,22,19,23,27,1,25,2,16,3,5,15,26,13],googlegroup:16,nasti:[0,13],stop:27,autom:[5,21],regularli:[17,13],ssl:7,s3rr:[],cannot:[5,25],ssh:[9,14,12],report:[21,22,16,1],reconstruct:[5,25,1],requir:[8,21,1,27,19,25,26,5,22,20,7],myawsaccesskeyid:[],reveal:21,enabl:[],dramat:14,intrins:14,method:[27,22],provid:[21,22,27,1,12,7],bad:14,statist:[6,13,9],though:[27,12,1],contain:[22,27,25,3,5,15],userspac:[],nowher:[],where:[26,15,7,3,1],caches:[26,27,20,13],wiki:[22,16],kernel:22,set:[5,14,20,27],bucket1:1,bucket3:1,bucket2:1,startup:27,maximum:[5,2,23,20,27],see:[8,17,0,20,11,23,10,1,25,26,12,7,3,4,5,6,14,15,27,13],num:[27,20],s3qlcp:[5,9,17,18,13],fail:[12,22,7],close:[11,24],optimum:14,whatsoev:[0,21,13],best:[14,12,22,27],concern:1,infinit:21,awar:1,statu:[8,9,0,17,11,23,10,25,12,21,3,4,26,6,20,7],detect:[21,12,1],extend:21,inconveni:21,hopefulli:7,databas:21,boundari:[5,25],label:[2,23],favor:21,enough:[14,20,27,7],between:[5,25,27,20],"import":[5,22,1],neither:[0,13],across:21,attribut:21,amazon:[9,21,1,12,5,7],august:[],kei:[21,12],weak:[],inconsist:7,southeast:[],lazi:[11,24],hardlink:[17,21,13],joe:[5,1],expire_backup:[5,9,18,25],solv:14,come:[5,14,21],local0:27,addit:[17,10,27,21,1,3,26,15,13],both:[17,21,22,13,1],protect:[0,21,13],accident:[2,0,23,7,13],last:1,irregular:[5,25],extens:21,someth:[17,13],howev:[17,0,27,21,25,12,13,5,14,7],alon:1,job:[5,27],against:[0,21,13],configpars:1,etc:[17,27,7,13],instanc:14,freeli:27,corrupt:1,com:[8,17,0,22,11,23,10,25,3,4,26,6,14,20,16],pcp:[5,9,14,18,4],load:27,simpli:[0,13],figur:1,inspir:[17,13],buti:[],period:[0,13],insist:14,batch:[8,27,19],written:[21,20,27,1],littl:[27,12,22],shutdown:27,linux:[27,22],averag:12,guid:[8,9,18,23,3,20],assum:27,damag:[5,25,15,1],quit:22,worthless:[0,13],strong:[8,17,0,11,23,10,25,3,4,26,6,20],nikolau:27,west:[],devic:27,three:[27,12,22],been:[0,19,11,27,21,24,1,25,12,7,5,14,15,13],mark:[8,19],compon:21,secret:12,much:4,interpret:27,interest:13,subscrib:16,monthli:[17,13],immedi:[10,11,27,24,12,13],strategi:[5,25],legaci:12,infect:[0,13],upstart:[5,27,20],great:[0,13],ani:[8,17,0,22,23,21,1,25,2,12,16,5,13],rsync_arg:14,zero:14,understand:[27,20],togeth:1,els:15,tradition:[0,13],s3qlstat:[6,18,7,13,9],present:12,"case":[17,0,22,21,1,25,5,14,15,13],replic:[5,17,21,13],trojan:[0,13],ident:[17,21,13,1],look:14,gnu:7,solid:21,plain:[2,23],servic:[21,12,1],zlib:[27,20],histor:1,trick:[9,14],documents_februari:[17,13],invok:[26,15],abov:[5,25,1],error:[8,9,0,20,17,11,23,10,1,25,21,3,4,26,6,19,7],login:[12,1],invoc:[5,25],ahax:4,loos:[12,1],jibbadup:[],earli:7,runlevel:27,argpars:22,have:[17,0,20,22,11,23,16,21,24,1,25,2,12,7,4,5,14,15,27,13],advantag:[5,25,0,13],stdout:27,almost:14,therefor:[5,27,12,7,1],remount:12,worri:7,destin:[17,14,13,4],exit:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],gsutil:[],conf:[5,9],incom:[],revis:[15,3],dedic:[27,7],sever:[21,22,27,1,12,4,5,14,20],tamper:[5,25],unmount:[9,11,27,24,1,12,5,14,19,7],develop:[9,21,12],minim:21,perform:[9,21,22,27,1,26,12,3,4,5,14,15,7],media:12,make:[17,0,20,22,27,21,1,25,12,13,5,14,15,7],flushcach:[26,13],same:[17,27,1,25,12,5,14,15,13],"while":[0,22,27,21,24,12,26,13],read:[8,17,21,20,19,23,1,2,12,7,3,14,15,27,13],renumb:[8,19],unexpectedli:7,split:21,auto:[27,20],pai:12,document:[8,17,0,11,23,10,3,26,6,20,13],infer:[5,25],complet:[17,22,27,1,25,26,12,5,20,13],week:[5,25],geograph:12,archiv:[21,12],hostnam:12,closest:12,lie:17,optim:27,keyr:[],confidenti:[],upon:7,effect:[17,21,13,1],cycl:[5,25],solut:5,remot:[14,12,21],fuse:[27,7],temporari:12,user:[8,9,0,18,22,17,11,23,10,24,19,12,21,3,26,6,14,20,27,13],mani:[5,25,21,1],extern:[0,12,13],encrypt:[21,27,1,2,3,23],typic:[17,13],recent:[5,25,27,15,7],gss:12,appropri:27,kept:[8,15,27,19,3,20],older:15,nevertheless:[5,1],entri:[27,20,7,1],thu:1,irrelev:7,well:[0,20,27,10,1,25,5,15,13],without:[8,17,0,21,19,12,13,14,15,7],command:[0,1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19,20,22,23,24,25,26,27],thi:[0,1,2,3,4,5,6,7,8,10,12,13,14,15,16,17,19,20,21,22,23,24,25,26,27],filesystem:[27,19,2,14,20,23],gzip:21,credenti:[8,20,1,23,19,2,3,15,27],spend:14,left:7,compromis:14,identifi:[12,1],just:[8,17,0,20,11,23,10,24,19,25,2,12,3,4,26,6,14,15,27,13],less:[7,1],conform:21,tip:[9,14],lowest:12,obtain:[],rest:14,bandwidth:[5,21],touch:[17,13],openstack:21,passphras:[9,15,3,1],roughli:27,speed:[5,27],yet:[12,7],web:12,viru:[0,13],detach:[11,24],homedir:[],easi:14,hint:[26,15,3],trigger:[12,13],point:[21,11,27,1,12,20],had:[5,17,25,13],except:1,param:15,thousand:[27,12,7],add:[0,13],valid:[12,13],nor:[0,13],versa:[17,13],input:[8,19],logger:27,subsequ:[5,25,12],match:12,bin:[14,22],applic:[7,1],transpar:21,preserv:[5,21],big:[14,7],regard:[17,13],exception:[],traffic:12,know:[5,17,25,7,13],background:[11,24],amp:14,bit:21,password:[2,23,12,1],recurs:[5,9,10,13],presum:1,like:[17,0,27,21,12,13,4,20,7],loss:[12,1],daemon:[27,20],ctime:7,specif:[0,22,1,12,13,26,15,7],header:1,should:[8,17,0,20,22,19,11,23,10,24,1,7,3,26,6,14,15,27,13],anyth:[],manual:[11,22,24,16],resolv:[12,16],noth:[0,13],princip:7,necessari:15,either:[12,13,3,26,15,7],output:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],per:27,page:18,yyyi:[5,25],imagin:[5,25,0,13],right:14,old:[8,0,20,22,27,19,25,12,3,5,15,13],often:[14,1],deal:[],ll23bi:1,interv:[5,25,14,20,27],creation:[2,23,9],some:[8,17,0,20,22,11,23,10,24,1,25,26,12,21,3,4,5,6,19,7],umount:[9,18,11,27,24,7],self:22,certain:[],strongest:[],"export":[27,20,7],flush:[27,13],guarante:[5,25,12],server:[14,12,7],librari:[22,7],"24h":[27,20],rsync:[17,0,13,4,5,14,7],backend:[8,9,17,11,23,24,1,12,7,3,26,14,20,27,13],confirm:[10,13],stronger:12,freebsd:22,avoid:[5,25],exec:27,definit:[5,25],februari:[17,13],protocol:1,usernam:1,equal:27,leav:7,slash:12,cif:27,duplic:[17,21,1,12,5,13],creep:21,refer:[18,1],machin:[],core:[5,27],plu:1,object:[20,12,27],run:[0,20,22,19,27,1,25,26,13,3,5,15,7],itself:[12,22,7,1],power:5,certif:[12,7],reach:[8,15,27,19,3,20],intellig:[5,25,21],view:18,usag:[5,17,27,25,13],symlink:21,speak:27,host:[],unreason:7,although:[21,10,7,13],eventu:12,bi23ll:1,immut:[9,0,10,5,21,14,13],impos:[],stage:[],sshf:[14,12],comparison:[],deflat:21,actual:[15,14,12,22,7],proce:22,memori:27,http:[8,17,0,22,11,23,10,25,12,3,4,26,6,20],storebackup:[17,13],acl:21,messag:[8,15,23,19,2,3,20,27],fals:[27,20],commit:[24,7],backup:[17,9,0,27,21,1,25,12,3,5,14,15,13],disabl:[8,15,27,19,3,20],block:[21,11,23,24,1,2,13,26,27,7],repair:8,client:7,real:12,encount:[16,7],xyz:[],within:[13,1],encod:27,automat:[5,9,27,12,15],two:[17,21,15,22,13],down:[5,11,24,7],ahv:14,authinfo2:[8,15,23,19,2,3,20,27],ensur:[5,25,14,21],chang:[17,9,0,27,21,12,7,3,26,15,13],insuffici:7,storag:[8,9,0,20,19,17,11,23,21,24,1,2,12,7,3,5,14,15,27,13],your:[8,17,0,18,22,11,23,10,25,26,12,3,5,6,14,20,27,13],durabl:1,manag:[9,12,15],east:[],fast:[9,21,27,13],fusermount:[11,24,7],prepar:26,wai:[0,27,21,1,12,13,14,7],transfer:[5,14,21],support:[17,21,27,12,13,20,7],question:16,s3_backup:[5,9],"long":[5,25,14,7,1],avail:[8,0,20,23,25,12,7,3,5,15,27,13],start:[8,22,27,1,12,4,5,14,19],reli:[7,1],quiet:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],includ:[22,21,12,18,7],lot:14,"var":12,succeed:[8,17,0,11,23,10,25,3,4,26,6,20],individu:[27,20,4],"function":15,properli:5,tracker:[21,22,16],form:[5,25,12,1],offer:[17,12,13],forc:[8,2,23,19],basic:[],continu:24,sigstop:[27,20],satur:27,measur:5,newer:[12,22],don:[27,20,7],line:[26,21],bug:[21,22,16,7],faster:[10,12,13],info:[8,15,27,19,3,26,20],commun:1,made:[17,21,0,14,13],furthermor:[21,1],consist:[12,1],possibl:[0,22,27,21,12,13,14,15,7],"default":[8,20,23,19,25,2,3,4,5,15,27],bucket:[9,23,1,2,12,3,5,15,27,7],displai:7,tell:[5,25,27],asynchron:21,authfil:[8,20,1,23,19,2,3,15,27],below:[12,13],limit:[17,0,27,10,1,26,12,7,5,6,20,13],unnot:12,problem:[8,17,0,16,21,1,25,12,7,5,14,13],similar:27,expect:[27,12,1],featur:[17,9,0,21,1,12,13],creat:[17,0,20,23,10,25,2,12,7,5,21,15,27,13],classic:[],retriev:[14,12,27,20,4],dure:[15,20,12,27],day_hour:[5,25],decrypt:1,s3qlctrl:[26,9,14,18,13],strongli:[5,25],workaround:14,decreas:4,file:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,19,20,21,23,24,25,26,27],mtime:7,request:12,exist:[17,0,23,10,1,25,2,12,5,13],improv:[5,9,14,4],mybucket:14,check:[8,9,22,27,19,12,15,7],probabl:[21,25,5,14,15,13],otherwis:[27,20],again:[0,12,7,13,24],readi:12,relatim:7,umask:27,googl:[8,9,0,22,17,11,23,10,25,12,21,3,4,26,6,20],sequenti:[8,19],when:[8,0,20,19,27,21,1,25,12,7,3,4,5,14,15,13],detail:[5,14,12,7],prepend:12,field:[],other:[17,0,22,27,10,21,12,16,26,6,20,13],futur:[17,0,10,13,26,6,7],rememb:[],test:[21,22],you:[17,0,20,22,19,11,23,16,10,24,1,25,2,12,7,5,14,15,27,13],shrink:21,rid:[0,13],particular:1,variabl:22,intend:[],clean:[8,19],fulli:7,mountpoint:[27,24,12,26,6,14,20,13],"return":[8,17,0,11,23,10,24,25,3,4,26,6,20,7],fsck:[8,9,18,19,27,1,12,15],briefli:[],releas:21,track:[5,25],log:[8,20,19,27,1,12,3,26,15,13],consid:[12,16,1],sql:24,noleaf:7,dd_hh:[5,25],pool:[],stai:[27,20],reduc:[5,12],infrequ:1,longer:[21,0,24,25,13,5,15,7],algorithm:[5,9,21,20,27],vice:[17,13],directori:[0,1,2,3,4,5,7,8,10,11,12,13,14,15,17,19,20,21,22,23,25,27],reliabl:[9,12,1],descript:[8,17,0,11,23,10,25,3,4,26,6,14,20,27],save:[25,14,21,1],rule:14,sftp:[9,12],depth:[],ignor:[],back:[0,13],time:[8,0,20,19,23,21,1,25,2,12,7,3,5,14,15,27,13],backward:[],s3qladm:[9,15,18,3],daili:[]},objtypes:{},titles:["The <strong class=\"program\">s3qllock</strong> command","General Information","File System Creation","The <strong class=\"program\">s3qladm</strong> command","The <strong class=\"program\">pcp</strong> command","Contributed Programs","The <strong class=\"program\">s3qlstat</strong> command","Known Issues","The <strong class=\"program\">fsck.s3ql</strong> command","S3QL User&#8217;s Guide","The <strong class=\"program\">s3qlrm</strong> command","The <strong class=\"program\">umount.s3ql</strong> command","Storage Backends","Advanced S3QL Features","Tips &amp; Tricks","Managing Buckets","Further Resources / Getting Help","The <strong class=\"program\">s3qlcp</strong> command","Manpages","Checking for Errors","The <strong class=\"program\">mount.s3ql</strong> command","About S3QL","Installation","The <strong class=\"program\">mkfs.s3ql</strong> command","Unmounting","The <strong class=\"program\">expire_backups</strong> command","The <strong class=\"program\">s3qlctrl</strong> command","Mounting"],objnames:{},filenames:["man/lock","general","mkfs","man/adm","man/pcp","contrib","man/stat","issues","man/fsck","index","man/rm","man/umount","backends","special","tips","adm","resources","man/cp","man/index","fsck","man/mount","about","installation","man/mkfs","umount","man/expire_backups","man/ctrl","mount"]}) \ No newline at end of file
+Search.setIndex({objects:{},terms:{suffici:1,all:[0,1,2,3,5,7,8,11,12,13,14,15,17,19,20,21,22,23,24,25,26,27],code:[8,17,0,22,11,23,10,25,21,3,4,26,6,20],partial:12,global:[],mnt:[5,14,27],month:[5,25],prefix:[12,1],stumbl:7,notquitesecret:1,follow:[0,1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17,19,20,22,23,24,25,26,27],disk:[14,21],whose:12,decid:[14,1],middl:7,depend:[8,9,22,23,1,25,3,5,14,20,27],million:[],ulimit:[27,20],readabl:21,send:16,rel:[12,4],init:[27,7],program:[0,2,3,4,5,6,7,8,9,10,11,13,15,17,19,20,22,23,24,25,26,27],those:12,under:[12,22],aris:12,sha256:21,neglig:[],worth:7,introduc:[17,13],sourc:[17,21,22,4,5,13],everi:[17,0,27,1,25,12,5,14,13],risk:[],mkf:[8,2,23,18,9],far:14,faq:16,account:[20,12,27],util:[14,19],pycryptopp:22,failur:1,veri:[21,12,7],affect:[17,27,7,13,1],tri:[15,7],administr:[0,13],level:[26,13],did:21,who:[11,27,20,24],list:[21,22,25,26,12,13,5,15,16],upload:[21,11,27,24,26,12,13,5,14,20,7],"try":[5,25,12,22,1],larg:[17,14,7,13],stderr:27,small:[14,7],blocksiz:[2,23,20,27],mount:[17,9,0,20,18,11,27,10,24,1,26,12,7,3,5,6,14,15,13],dir:[17,27],pleas:[21,16],upper:12,smaller:[14,21,7],slower:[17,27,13],ifac:27,ten:12,whitespac:[],compens:1,sync:4,sign:12,consequ:1,hors:[0,13],design:[0,21,13,1],pass:[27,4],download:[15,22,3],further:[9,15,16],correspond:12,port:12,rath:27,even:[8,0,11,24,1,12,19,13],what:[5,25,15,26,1],bzip2:[21,20,27],sub:7,entir:[21,0,10,13],descriptor:[27,20],section:[12,1],abl:[0,11,24,1,12,13,16],weren:16,asia:[],find:[0,12,27,7,13],access:[21,20,27,1,12,15,7],delet:[9,0,10,25,12,3,5,15,13],version:[8,17,0,20,22,11,23,10,24,19,25,2,12,21,3,4,26,6,15,27,7],suspect:19,consecut:12,"new":[8,0,20,23,1,25,26,12,5,15,27,13],net:[27,22],ever:1,"public":[],contrast:[0,13],metadata:[9,20,27,3,26,14,15,13],elimin:12,full:[8,17,0,11,23,10,26,21,3,5,6,20,27,13],themselv:1,absolut:[5,25,12],pacif:[],gener:[9,0,22,1,25,12,7,5,15,13],never:[],privat:[],here:[26,12,18],satisfi:22,explicitli:7,modif:7,address:7,path:[8,20,22,23,19,2,12,3,15,27],becom:[5,25,14,12,1],modifi:[17,21,13],sinc:[27,1,25,12,5,7],valu:[27,20,7,1],wait:[11,14,27,24,7],dry:25,convert:12,joes_bucket:[],checksum:21,larger:27,step:5,amount:[26,14,12,21],throughput:[],action:[26,15,13,3],implement:1,magnitud:[17,13],chanc:[],control:13,fstab:[27,7],appli:[14,12,1],modul:[8,20,1,23,19,2,3,26,22,15,27],apw:22,filenam:14,unix:[14,21,7],visibl:12,instal:[8,9,0,20,18,22,17,11,23,10,26,3,5,6,15],total:[27,13],establish:1,from:[8,17,0,20,22,19,23,21,1,25,2,12,3,4,5,14,15,27,13],describ:[12,1],would:[17,1,25,12,5,13],apswvers:22,upgrad:[9,15,22,3],few:[5,12,22],concret:1,call:[17,0,27,10,1,25,26,12,5,6,14,20,13],usr:[8,17,0,11,23,10,26,3,5,6,20],recommend:[5,27,12,7,1],taken:[17,21,20,27,13],tape:[0,13],bill:1,type:[],until:[11,27,7,13,24],more:[21,22,1,13,4,26,7],sort:[5,25],desir:26,st_nlink:7,src:[17,13],peopl:[],hundr:[],relat:12,benchmark:[5,9,27],"19283712_yourname_s3ql":[],notic:[14,12],enhanc:5,warn:[26,10,15,13,1],sqlitelibvers:[],sens:14,known:[9,21,7],rare:[],hold:[12,1],unpack:22,cach:[8,9,21,20,23,19,2,13,3,26,15,27,7],must:[17,22,12,3,15,13],worst:[0,13],none:[8,20,1,27,19,3,15],word:[],sometim:15,restor:[17,9,14,15,13],dest:17,setup:22,work:[21,22,24,25,5,14,7],uniqu:1,conceptu:21,remain:27,wors:12,obvious:27,can:[0,1,2,3,5,6,7,8,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27],mktemp:27,about:[9,21,27,6,14,13],root:[17,0,11,27,10,24,26,6,20,13],fetch:[17,13],overrid:12,sqlite:22,prompt:[8,19],tar:14,give:[5,25,14,15],process:[11,27,24,12,4,5,14,20,7],lock:[0,13],sudo:22,share:[8,17,0,22,11,23,10,26,3,5,6,20],accept:[8,17,0,20,19,11,23,10,24,1,25,2,3,4,26,6,15,27],high:[21,20,27,4],critic:21,minimum:[14,21],want:[11,27,24,25,5,14,16],onlin:[0,12,21,13],unfortun:[14,12,7],occur:[8,17,0,11,23,10,1,25,3,4,26,6,20],ratio:27,alwai:[17,27,25,13,5,7],end:[27,15],turn:[5,25,27,7],rather:[25,12,13,5,14,7],anoth:1,ordinari:[17,11,24,13],write:[8,9,21,20,22,17,27,19,12,7,3,5,14,15,13],how:[5,25,14],manpag:[9,18],env:27,webpag:12,verifi:[12,7],simpl:[5,25,13],updat:[5,21,12,7],product:[],resourc:[9,15,16],max:[27,20],earlier:[7,1],pyliblzma:22,badli:[15,3],wrong:[15,3],endang:21,mai:[17,0,20,22,27,16,21,24,1,26,12,7,3,5,14,15,13],multipl:[8,15,23,19,2,3,20,27],redund:[5,25,21,12],secondari:[],data:[8,17,21,20,19,11,23,27,24,1,2,12,7,3,5,14,15,26,13],grow:[21,27],physic:[5,17,0,15,13],man:[18,7],indistinguish:21,"short":[0,12,13],attempt:[8,27,7,1],practic:[14,12],third:14,read:[8,17,21,20,19,23,1,2,12,3,14,15,27,13],neitheristhi:1,author:[27,7],favorit:[],apsw:22,element:27,issu:[17,9,0,22,27,16,10,21,12,7,26,6,13],inform:[9,27,1,25,26,20,13],maintain:27,combin:[14,12,27,1],allow:[17,0,20,27,10,25,26,12,4,5,6,14,15,13],enter:[2,23],exclus:14,volum:27,order:[17,13,1],talk:12,oper:[8,17,0,20,11,23,10,1,25,21,3,4,26,6,15,27],help:[9,22,16,26,13,3,5,15,7],over:[17,21,27,12,14,20,13],move:[21,15,1],soon:[],topsecret:[],increas:[14,20,27],appar:12,effici:7,still:[17,21,11,27,24,1,12,13],dynam:[21,22],paramet:1,overwrit:[2,23],rogerbinn:22,fix:[0,25,12,13,5,7],inadvert:[17,13],better:[27,22,4],window:12,html:[],restart:[27,7],persist:1,mail:[21,15,22,16],main:[10,13],might:[17,0,27,16,10,19,13,26,6,7],documents_januari:[17,13],them:[17,11,24,1,12,15,13],good:[0,25,13,5,14,7],synopsi:[8,17,0,11,23,10,25,3,4,26,6,20],thei:[17,0,18,21,25,12,5,14,13],python:[27,22,1],promin:7,safe:[17,14,13],fuse4bsd:22,dai:[5,25],initi:[],dat:[5,25],terminolog:[9,1],therebi:12,instead:[17,11,24,1,12,13,7],interrupt:15,potenti:12,now:[0,22,25,12,5,13],bigger:[],choic:22,term:[5,25,1],"__version__":22,somewher:[8,17,0,11,23,10,3,26,6,20,13],name:[17,23,1,25,2,12,5,14,20,27,13],joesdomain:[],authent:[8,9,20,19,23,1,2,12,3,15,27],achiev:[5,17,21,27,13],mode:[27,20],each:[5,25,27,20,1],debug:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],found:[14,1],beneath:13,confid:[],side:1,mean:[21,12,7],compil:27,s3ql:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27],due:[12,7],mkfifo:27,chunk:7,hard:[14,21],idea:[5,25,0,7,13],procedur:[17,13],realli:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,14,15,27,7],contrib:[5,14,27],meta:[26,14,13],"static":22,significantli:[5,22,1],year:[5,25,21,12],s3_copi:[5,9],happen:[0,12,13,1],todo:[],special:[17,13],out:[21,12,7],lzma:[21,20,22,27],ftp:[],shown:12,network:[17,14,27,21,13],space:[17,21,7,13],open:[11,27,24],newli:[],log_fifo:27,content:[17,21,27,0,1,3,13],suitabl:[21,22],rational:0,internet:[21,27],print:[8,17,0,20,22,11,23,10,24,19,25,2,3,4,26,6,15,27],eth0:27,bla:27,correct:[5,25],common:[17,0,12,13],foreground:[27,20],shut:[5,11,24,7],after:[17,0,21,1,12,14,15,13],insid:17,advanc:[9,13],migrat:5,documents_janurai:[],manipul:21,situat:14,given:7,free:[21,12],standard:[5,25,21,12,27],inod:[],reason:[10,27,24,1,25,12,13,5,14,19,7],base:27,theori:12,usual:27,ask:[22,23,2,12,15,7],org:27,"byte":[],likelihood:12,afterward:14,bash:14,care:[17,21,7,13],her:[0,13],thread:[5,27,20],befor:[17,21,11,27,24,1,12,13,7],guarante:[5,25,12],could:[17,13],success:[],refus:[5,25],keep:[0,27,1,25,5,13],recov:[0,13],thing:[15,3],length:12,rais:[27,20],place:[27,12],perman:[9,0,27,12,14,13],pycrypto:22,principl:27,confus:7,neglect:1,first:[21,22,27,1,12,14,15,7],origin:[],softwar:27,rang:[5,25],becaus:[17,21,27,1,12,5,13],directli:12,malici:[0,13],carri:21,onc:[0,11,27,21,24,12,14,20,13],clariti:[],s3q:[],number:[21,22,27,25,13,4,5,14,20,7],capac:21,restrict:[26,27,20],date:[5,25],instruct:[27,22],alreadi:[17,22,7,13],done:[27,12],wrapper:[5,4],llfuse:22,stabl:21,miss:[25,1],s3c:12,size:[21,23,2,13,26,14,20,27,7],differ:[17,21,22,27,1,26,3,5,14,15,13],convent:[8,17,0,11,23,10,3,26,6,20,7],script:[5,14,27,7],profil:[27,20],unknown:21,interact:3,s3qllock:[9,0,18,10,5,14,13],system:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27],least:[22,27,25,12,5,20],parallel:[9,27,4,5,14,20],checkpoint:1,attack:[0,7,13],necessarili:12,demonstr:5,s3qlrm:[9,0,18,10,25,5,13],termin:[11,24,7,1],lost:[12,1],john:12,"final":13,store:[8,9,21,20,19,23,1,25,2,12,3,5,14,15,27,7],low:[21,12],servi:7,luckili:[],consol:12,option:[0,2,3,4,5,6,7,8,10,11,13,14,15,17,19,20,22,23,24,25,26,27],especi:[14,21],shelf:[0,13],tool:12,copi:[17,9,21,1,13,4,5,14,15,7],specifi:[8,20,19,23,27,1,25,2,12,3,5,15,26,7],arbitrari:[14,12],part:[21,27,1,25,4,5],pars:1,mostli:[],consult:[8,23,20,3],exactli:[5,17,25,13],than:[10,27,1,25,13,5,14,7],wide:[27,22],target:[8,17,20,27,19,3,14,15,13],cachedir:[8,15,23,19,2,3,20,27],whenev:[14,22,27,1],prevent:[2,0,23,13],remov:[17,9,0,22,27,10,25,26,7,5,6,13],eleg:21,tree:[17,9,0,10,4,5,21,13],second:[27,1,25,12,5,14,20],structur:[21,15],exampl:[21,1,25,13,5,14,7],matter:[26,14,15],temporarili:27,friend:14,video:27,minut:[5,25],led:1,pre:27,unencrypt:[2,23],sai:[17,13],comput:[5,25,21,27],entail:[],januari:[17,13],plaintext:[],explicit:14,ram:27,mind:1,argument:[26,15,3],peculiar:12,"13th":21,packag:[5,22],expir:27,increment:[],disadvantag:[17,13],need:[17,20,22,27,1,25,12,5,15,13],seem:14,exclud:[0,13],paramiko:[],caus:[12,1],built:22,equival:[5,25],irrepar:15,destroi:[0,13],moreov:13,blindli:7,atim:7,accuraci:[5,25],note:[17,9,21,22,11,27,24,25,12,7,4,5,14,15,13],also:[0,1,3,4,5,6,13,8,10,11,12,14,17,18,20,21,22,23,24,25,26,27],builtin:[],denomin:12,take:[17,21,13,3,26,14,15,7],which:[17,10,22,21,1,25,12,13,5,7],discrep:[],transmit:[],environ:22,uplink:5,singl:[14,20,27],swordfish:1,compat:[9,12],begin:1,sure:[20,22,27,12,14,15,7],unless:[2,0,23,13],distribut:[5,27,22],plenti:[],normal:[27,20,1],usernam:1,previou:[5,15,12],compress:[9,21,27,5,20,13],most:[8,17,20,19,27,1,25,12,7,3,5,15,13],beta:21,said:[],rigor:[5,25],plan:[],stricli:[],choos:[],homepag:[8,17,0,11,23,10,3,26,6,20],"class":[8,17,0,11,23,10,25,26,3,4,5,6,20],independ:[21,20,27],simplic:21,renam:21,correctli:[7,19],ship:[25,27,22,4],url:[8,20,19,23,1,2,12,3,15,27],doc:[8,17,0,11,23,10,26,3,5,6,20],clear:[17,15,13],later:[17,0,22,1,12,13,7],cover:14,drive:[0,13],destruct:[0,13],doe:[21,27,24,1,12,7],declar:21,snapshot:[17,9,21,26,13],runtim:[9,13],determin:[5,27,1],sourceforg:[],occasion:12,region:[5,12],hmac:21,gracefulli:[5,25],myawssecretaccesskei:[],theoret:14,show:25,carefulli:1,random:[],syntax:[27,19,2,26,15,13],connect:[14,12,27,21],permiss:[5,21,20,27],bucketnam:12,newest:[14,3],anywai:[],rotat:[8,15,27,19,3,20],redirect:27,current:[5,15,12,25,7],onli:[17,25,0,20,22,11,27,10,24,21,26,12,7,4,5,6,14,15,13],slow:7,locat:[8,17,0,11,23,10,1,3,26,6,20,13],execut:[5,15,22,13],transact:21,configur:[9,27,13],activ:[8,17,0,20,19,11,23,10,24,1,25,2,12,3,4,26,6,15,27,13],state:[5,25,21,7],haven:[],authinfo:[],latenc:[14,21,4],suppos:12,rich:12,factor:[5,12],folder:[17,22,13],local:[8,9,0,22,17,11,23,10,12,21,3,26,6,14,20,27],defin:[5,25,27,1],contribut:[5,9],variou:[26,15,3],get:[8,9,0,20,22,19,23,27,1,25,2,16,3,5,15,26,13],googlegroup:16,nasti:[0,13],stop:27,autom:[5,21],regularli:[17,13],ssl:7,s3rr:[],cannot:[5,25],ssh:[9,14,12],report:[21,22,16,1],reconstruct:[5,25,1],requir:[8,21,1,27,19,25,26,5,22,20,7],myawsaccesskeyid:[],reveal:21,enabl:[],dramat:14,intrins:14,method:[27,22],provid:[21,22,27,1,12,7],bad:14,statist:[6,13,9],though:[27,12,1],contain:[22,27,25,3,5,15],userspac:[],nowher:[],where:[26,15,3,1],caches:[26,27,20,13],wiki:[22,16],kernel:22,set:[5,14,20,27],bucket1:1,bucket3:1,bucket2:1,startup:27,maximum:[5,2,23,20,27],see:[8,17,0,20,11,23,10,1,25,26,12,7,3,4,5,6,14,15,27,13],num:[27,20],s3qlcp:[5,9,17,18,13],fail:[12,22,7],close:[11,24],optimum:14,whatsoev:[0,21,13],best:[14,12,22,27],concern:1,infinit:21,awar:1,statu:[8,9,0,17,11,23,10,25,12,21,3,4,26,6,20,7],detect:[21,12,1],extend:21,inconveni:21,hopefulli:7,databas:21,boundari:[5,25],label:[2,23],favor:21,enough:[14,20,27],between:[5,25,27,20],"import":[5,22,1],neither:[0,13],across:21,attribut:21,check:[8,9,22,27,19,12,15,7],amazon:[9,21,1,12,5,7],august:[],kei:[21,12],weak:[],inconsist:7,extens:21,job:[5,27],hardlink:[17,21,13],joe:[5,1],expire_backup:[5,9,18,25],solv:14,come:[5,14,21],local0:27,addit:[17,10,27,21,1,3,26,15,13],both:[17,21,13,1],protect:[0,21,13],accident:[2,0,23,13],last:1,irregular:[5,25],southeast:[],someth:[17,13],howev:[17,0,27,21,25,12,13,5,14,7],alon:1,lazi:[11,24],against:[0,22,21,13],configpars:1,etc:[17,27,7,13],instanc:14,freeli:27,corrupt:1,com:[8,17,0,22,11,23,10,25,3,4,26,6,14,20,16],pcp:[5,9,14,18,4],load:27,simpli:[0,13],figur:1,inspir:[17,13],buti:[],period:[0,13],insist:14,batch:[8,27,19],written:[21,20,27,1],littl:[27,12,22],shutdown:27,linux:[27,22],averag:12,guid:[8,9,18,23,3,20],assum:27,damag:[5,25,15,1],quit:22,worthless:[0,13],strong:[8,17,0,11,23,10,25,3,4,26,6,20],nikolau:27,west:[],devic:27,three:[27,12,22],been:[0,19,11,27,21,24,1,25,12,7,5,14,15,13],mark:[8,19],compon:21,secret:12,much:4,interpret:27,interest:13,subscrib:16,monthli:[17,13],immedi:[10,11,27,24,12,13],strategi:[5,25],legaci:12,infect:[0,13],upstart:[5,27,20],great:[0,13],ani:[8,17,0,22,23,21,1,25,2,12,16,5,13],rsync_arg:14,zero:14,understand:[27,20],togeth:1,els:15,tradition:[0,13],s3qlstat:[6,18,13,9],present:12,"case":[17,0,22,21,1,25,5,14,15,13],replic:[5,17,21,13],trojan:[0,13],ident:[17,21,13,1],look:14,gnu:7,solid:21,plain:[2,23],servic:[21,12,1],zlib:[27,20],histor:1,trick:[9,14],documents_februari:[17,13],invok:[26,15],abov:[5,25,1],error:[8,9,0,20,17,11,23,10,1,25,21,3,4,26,6,19,7],login:[12,1],invoc:[5,25],ahax:4,loos:[12,1],jibbadup:[],earli:[22,7],runlevel:27,argpars:22,have:[17,0,20,22,11,23,16,21,24,1,25,2,12,7,4,5,14,15,27,13],advantag:[5,25,0,13],stdout:27,almost:14,therefor:[5,27,12,7,1],remount:12,worri:[],destin:[17,14,13,4],exit:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],gsutil:[],conf:[5,9],incom:[],revis:[15,3],dedic:[27,7],sever:[21,22,27,1,12,4,5,14,20],tamper:[5,25],unmount:[9,11,27,24,1,12,5,14,19,7],develop:[9,21,12,22],minim:21,perform:[9,21,22,27,1,26,12,3,4,5,14,15,7],media:12,make:[17,0,20,22,27,21,1,25,12,13,5,14,15,7],flushcach:[26,13],same:[17,27,1,25,12,5,14,15,13],"while":[0,22,27,21,24,12,26,13],handl:[17,21,27,25,12,13,5,14,7],renumb:[],unexpectedli:7,split:21,auto:[27,20],pai:12,document:[8,17,0,11,23,10,3,26,6,20,13],infer:[5,25],complet:[17,22,27,1,25,26,12,5,20,13],week:[5,25],geograph:12,archiv:[21,12,22],hostnam:12,closest:12,lie:17,optim:27,keyr:[],confidenti:[],upon:7,effect:[17,21,13,1],cycl:[5,25],solut:5,remot:[14,12,21],fuse:[27,7],temporari:12,user:[8,9,0,18,22,17,11,23,10,24,19,12,21,3,26,6,14,20,27,13],mani:[5,25,21,1],extern:[0,12,13],encrypt:[21,27,1,2,3,23],typic:[17,13],recent:[5,25,27,15,7],gss:12,appropri:27,kept:[8,15,27,19,3,20],older:15,nevertheless:[5,1],entri:[27,20,1],thu:1,irrelev:[],well:[0,20,27,10,1,25,5,15,13],without:[8,17,0,21,19,12,13,14,15,7],command:[0,1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19,20,22,23,24,25,26,27],thi:[0,1,2,3,4,5,6,7,8,10,12,13,14,15,16,17,19,20,21,22,23,24,25,26,27],filesystem:[27,19,2,14,20,23],gzip:21,credenti:[8,20,1,23,19,2,3,15,27],spend:14,ubuntu:22,left:7,compromis:14,identifi:[12,1],just:[8,17,0,20,11,23,10,24,19,25,2,12,3,4,26,6,14,15,27,13],less:1,conform:21,tip:[9,14],lowest:12,obtain:[],rest:14,bandwidth:[5,21],touch:[17,13],openstack:[21,12],passphras:[9,15,3,1],roughli:27,speed:[5,27],yet:[12,7],web:12,viru:[0,13],detach:[11,24],homedir:[],easi:14,hint:[26,15,3],trigger:[12,13],point:[21,11,27,1,12,20],had:[5,17,25,13],except:1,param:15,thousand:[27,12],add:[0,13],valid:[12,13],nor:[0,13],versa:[17,13],input:[8,19],logger:27,subsequ:[5,25,12],launchpad:22,match:12,build:22,bin:[14,22],applic:[7,1],transpar:21,preserv:[5,21],big:[14,7],regard:[17,13],exception:[],traffic:12,know:[5,17,25,13],background:[11,24],amp:14,bit:21,password:[2,23,12,1],recurs:[5,9,10,13],presum:1,like:[17,0,27,21,12,13,4,20,7],loss:[12,1],daemon:[27,20],ctime:7,specif:[0,22,1,12,13,26,15,7],header:[22,1],should:[8,17,0,20,22,19,11,23,10,24,1,7,3,26,6,14,15,27,13],anyth:[],manual:[11,22,24,16],resolv:[12,16],noth:[0,13],princip:7,necessari:15,either:[12,13,3,26,15,7],output:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],per:27,page:18,yyyi:[5,25],imagin:[5,25,0,13],right:14,old:[8,0,20,22,27,19,25,12,3,5,15,13],often:[14,1],deal:[],ll23bi:1,interv:[5,25,14,20,27],creation:[2,23,9],some:[8,17,0,20,22,11,23,10,24,1,25,26,12,21,3,4,5,6,19,7],umount:[9,18,11,27,24,7],self:22,certain:[],strongest:[],"export":[27,20],flush:[27,13],home:[5,14,12],server:[14,12,7],librari:[22,7],"24h":[27,20],rsync:[17,0,13,4,5,14,7],backend:[8,9,17,11,23,24,1,12,7,3,26,14,20,27,13],confirm:[10,13],stronger:12,freebsd:22,avoid:[5,25],exec:27,definit:[5,25],februari:[17,13],protocol:1,buffer:7,equal:27,leav:7,slash:12,cif:27,duplic:[17,21,1,12,5,13],creep:21,refer:[18,1],machin:[],core:[5,27],plu:1,object:[20,12,27],run:[0,20,22,19,27,1,25,26,13,3,5,15,7],itself:[12,22,1],power:5,certif:[12,7],reach:[8,15,27,19,3,20],intellig:[5,25,21],view:18,usag:[5,17,27,25,13],symlink:21,speak:27,host:[],unreason:7,although:[21,10,7,13],eventu:12,bi23ll:1,immut:[9,0,10,5,21,14,13],impos:[],stage:[],sshf:[14,12],comparison:[],deflat:21,actual:[15,14,12,22],proce:22,memori:27,http:[8,17,0,22,11,23,10,25,12,3,4,26,6,20],storebackup:[17,13],acl:21,messag:[8,15,23,19,2,3,20,27],fals:[27,20],commit:[24,7],backup:[17,9,0,27,21,1,25,12,3,5,14,15,13],disabl:[8,15,27,19,3,20],block:[21,11,23,24,1,2,13,26,27,7],repair:8,client:[],real:12,encount:[16,7],xyz:[],within:[13,1],encod:27,automat:[5,9,27,12,15],two:[17,21,15,13],down:[5,11,24,7],ahv:14,authinfo2:[8,15,23,19,2,3,20,27],ensur:[5,25,14,21],chang:[17,9,0,27,21,12,7,3,26,15,13],insuffici:7,storag:[8,9,0,20,19,17,11,23,21,24,1,2,12,7,3,5,14,15,27,13],your:[8,17,0,18,22,11,23,10,25,26,12,3,5,6,14,20,27,13],durabl:1,manag:[9,12,15],east:[],fast:[9,21,27,13],fusermount:[11,24,7],prepar:26,wai:[0,27,21,1,12,13,14,7],transfer:[5,14,21],support:[17,21,27,12,13,20,7],question:16,s3_backup:[5,9],"long":[5,25,14,7,1],avail:[8,0,20,23,25,12,7,3,5,15,27,13],start:[22,27,1,12,4,5,14],reli:[7,1],quiet:[8,17,0,20,11,23,10,24,19,25,2,3,4,26,6,15,27],includ:[22,21,12,18,7],lot:14,"var":12,succeed:[8,17,0,11,23,10,25,3,4,26,6,20],individu:[27,20,4],"function":15,properli:5,tracker:[21,22,16],form:[5,25,12,1],offer:[17,12,13],forc:[8,2,23,19],basic:[],continu:24,sigstop:[27,20],satur:27,measur:5,newer:[12,22],don:[27,20],line:[26,21],bug:[21,22,16,7],faster:[10,12,13],info:[8,15,27,19,3,26,20],commun:1,made:[17,21,0,14,13],furthermor:[21,1],consist:[12,1],possibl:[0,22,27,21,12,13,14,15,7],"default":[8,20,23,19,25,2,3,4,5,15,27],bucket:[9,23,1,2,12,3,5,15,27,7],displai:[],tell:[5,25,27],asynchron:21,authfil:[8,20,1,23,19,2,3,15,27],below:[12,13],limit:[17,0,27,10,1,26,12,7,5,6,20,13],unnot:12,problem:[8,17,0,16,21,1,25,12,7,5,14,13],similar:27,expect:[27,12,1],featur:[17,9,0,21,1,12,13],creat:[17,0,20,23,10,25,2,12,5,21,15,27,13],classic:[],retriev:[14,12,27,20,4],dure:[15,20,12,27],day_hour:[5,25],decrypt:1,s3qlctrl:[26,9,14,18,13],strongli:[5,25],workaround:14,decreas:4,file:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,19,20,21,23,24,25,26,27],mtime:7,request:12,exist:[17,0,23,10,1,25,2,12,5,13],improv:[5,9,14,4],mybucket:14,dd_hh:[5,25],link:22,probabl:[21,25,5,14,15,13],otherwis:[27,20],again:[0,12,24,13],readi:12,relatim:7,umask:27,googl:[8,9,0,22,17,11,23,10,25,12,21,3,4,26,6,20],sequenti:[],when:[8,0,20,19,27,21,1,25,12,7,3,4,5,14,15,13],detail:[5,14,12,7],prepend:12,field:[],other:[17,0,22,27,10,21,12,16,26,6,20,13],futur:[17,0,10,13,26,6,7],rememb:[],test:[21,22],you:[17,0,20,22,19,11,23,16,10,24,1,25,2,12,7,5,14,15,27,13],shrink:21,rid:[0,13],particular:1,variabl:22,intend:[],clean:[8,19],fulli:[],mountpoint:[27,24,12,26,6,14,20,13],"return":[8,17,0,11,23,10,24,25,3,4,26,6,20,7],fsck:[8,9,18,19,27,1,12,15],briefli:[],releas:21,track:[5,25],log:[8,20,19,27,1,12,3,26,15,13],consid:[12,16,1],sql:24,noleaf:7,ppa:22,pool:[],stai:[27,20],reduc:[5,12],infrequ:1,longer:[21,0,24,25,13,5,15,7],algorithm:[5,9,21,20,27],vice:[17,13],directori:[0,1,2,3,4,5,7,8,10,11,12,13,14,15,17,19,20,21,22,23,25,27],reliabl:[9,12,1],descript:[8,17,0,11,23,10,25,3,4,26,6,14,20,27],save:[25,14,21,1],rule:14,sftp:[9,12],depth:[],ignor:[],back:[0,13],time:[8,0,20,19,23,21,1,25,2,12,7,3,5,14,15,27,13],backward:[],s3qladm:[9,15,18,3],daili:[]},objtypes:{},titles:["The <strong class=\"program\">s3qllock</strong> command","General Information","File System Creation","The <strong class=\"program\">s3qladm</strong> command","The <strong class=\"program\">pcp</strong> command","Contributed Programs","The <strong class=\"program\">s3qlstat</strong> command","Known Issues","The <strong class=\"program\">fsck.s3ql</strong> command","S3QL User&#8217;s Guide","The <strong class=\"program\">s3qlrm</strong> command","The <strong class=\"program\">umount.s3ql</strong> command","Storage Backends","Advanced S3QL Features","Tips &amp; Tricks","Managing Buckets","Further Resources / Getting Help","The <strong class=\"program\">s3qlcp</strong> command","Manpages","Checking for Errors","The <strong class=\"program\">mount.s3ql</strong> command","About S3QL","Installation","The <strong class=\"program\">mkfs.s3ql</strong> command","Unmounting","The <strong class=\"program\">expire_backups</strong> command","The <strong class=\"program\">s3qlctrl</strong> command","Mounting"],objnames:{},filenames:["man/lock","general","mkfs","man/adm","man/pcp","contrib","man/stat","issues","man/fsck","index","man/rm","man/umount","backends","special","tips","adm","resources","man/cp","man/index","fsck","man/mount","about","installation","man/mkfs","umount","man/expire_backups","man/ctrl","mount"]}) \ No newline at end of file
diff --git a/doc/html/special.html b/doc/html/special.html
index 02dde61..b7b1587 100644
--- a/doc/html/special.html
+++ b/doc/html/special.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Advanced S3QL Features &mdash; S3QL v1.6 documentation</title>
+ <title>Advanced S3QL Features &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Unmounting" href="umount.html" />
<link rel="prev" title="Mounting" href="mount.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="mount.html" title="Mounting"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -263,7 +263,7 @@ been flushed.</td>
<li class="right" >
<a href="mount.html" title="Mounting"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/tips.html b/doc/html/tips.html
index 9b0fff3..17e7ef9 100644
--- a/doc/html/tips.html
+++ b/doc/html/tips.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Tips &amp; Tricks &mdash; S3QL v1.6 documentation</title>
+ <title>Tips &amp; Tricks &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Known Issues" href="issues.html" />
<link rel="prev" title="Contributed Programs" href="contrib.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="contrib.html" title="Contributed Programs"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -187,7 +187,7 @@ details.</p>
<li class="right" >
<a href="contrib.html" title="Contributed Programs"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/html/umount.html b/doc/html/umount.html
index f4f824d..f2f1bcf 100644
--- a/doc/html/umount.html
+++ b/doc/html/umount.html
@@ -7,13 +7,13 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>Unmounting &mdash; S3QL v1.6 documentation</title>
+ <title>Unmounting &mdash; S3QL v1.7 documentation</title>
<link rel="stylesheet" href="_static/sphinxdoc.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
- VERSION: '1.6',
+ VERSION: '1.7',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
@@ -23,7 +23,7 @@
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="author" title="About these documents" href="about.html" />
- <link rel="top" title="S3QL v1.6 documentation" href="index.html" />
+ <link rel="top" title="S3QL v1.7 documentation" href="index.html" />
<link rel="next" title="Checking for Errors" href="fsck.html" />
<link rel="prev" title="Advanced S3QL Features" href="special.html" />
</head>
@@ -37,7 +37,7 @@
<li class="right" >
<a href="special.html" title="Advanced S3QL Features"
accesskey="P">previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="sphinxsidebar">
@@ -143,7 +143,7 @@ upload data in the background for a while longer.</p>
<li class="right" >
<a href="special.html" title="Advanced S3QL Features"
>previous</a> |</li>
- <li><a href="index.html">S3QL v1.6 documentation</a> &raquo;</li>
+ <li><a href="index.html">S3QL v1.7 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
diff --git a/doc/latex/manual.aux b/doc/latex/manual.aux
index 5c7507a..00e879d 100644
--- a/doc/latex/manual.aux
+++ b/doc/latex/manual.aux
@@ -66,6 +66,7 @@
\@writefile{toc}{\contentsline {section}{\numberline {4.3}S3 compatible}{10}{section.4.3}}
\newlabel{backends:s3-compatible}{{4.3}{10}{S3 compatible\relax }{section.4.3}{}}
\@writefile{toc}{\contentsline {section}{\numberline {4.4}Local}{11}{section.4.4}}
+\newlabel{backends:openstack}{{4.4}{11}{Local\relax }{section.4.4}{}}
\newlabel{backends:local}{{4.4}{11}{Local\relax }{section.4.4}{}}
\@writefile{toc}{\contentsline {section}{\numberline {4.5}SSH/SFTP}{11}{section.4.5}}
\newlabel{backends:ssh-sftp}{{4.5}{11}{SSH/SFTP\relax }{section.4.5}{}}
diff --git a/doc/latex/manual.log b/doc/latex/manual.log
index bbfc540..844282b 100644
--- a/doc/latex/manual.log
+++ b/doc/latex/manual.log
@@ -1,4 +1,4 @@
-This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=pdflatex 2011.10.2) 20 NOV 2011 22:13
+This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=pdflatex 2011.10.2) 27 NOV 2011 14:20
entering extended mode
%&-line parsing enabled.
**manual.tex
@@ -832,11 +832,11 @@ File: t1pcr.fd 2001/06/04 font definitions for T1/pcr.
] [4]
Chapter 3.
LaTeX Font Info: Font shape `T1/pcr/m/it' in size <9> not available
-(Font) Font shape `T1/pcr/m/sl' tried instead on input line 372.
+(Font) Font shape `T1/pcr/m/sl' tried instead on input line 382.
[5
]
-Underfull \hbox (badness 10000) in paragraph at lines 417--418
+Underfull \hbox (badness 10000) in paragraph at lines 427--428
[]
@@ -846,13 +846,13 @@ Underfull \hbox (badness 10000) in paragraph at lines 417--418
Chapter 4.
[9]
LaTeX Font Info: Font shape `T1/phv/bx/n' in size <12> not available
-(Font) Font shape `T1/phv/b/n' tried instead on input line 552.
+(Font) Font shape `T1/phv/b/n' tried instead on input line 562.
[10] [11] [12
]
Chapter 5.
-Underfull \hbox (badness 10000) in paragraph at lines 688--690
+Underfull \hbox (badness 10000) in paragraph at lines 698--700
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
@@ -862,20 +862,20 @@ Underfull \hbox (badness 10000) in paragraph at lines 688--690
]
Chapter 6.
-Underfull \hbox (badness 10000) in paragraph at lines 746--748
+Underfull \hbox (badness 10000) in paragraph at lines 756--758
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
[15]
-Underfull \hbox (badness 10000) in paragraph at lines 821--822
+Underfull \hbox (badness 10000) in paragraph at lines 831--832
[]
[16]
Chapter 7.
-Underfull \hbox (badness 10000) in paragraph at lines 851--853
+Underfull \hbox (badness 10000) in paragraph at lines 861--863
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
@@ -895,7 +895,7 @@ Chapter 9.
]
Chapter 10.
-Underfull \hbox (badness 10000) in paragraph at lines 1316--1318
+Underfull \hbox (badness 10000) in paragraph at lines 1326--1328
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
@@ -905,7 +905,7 @@ Underfull \hbox (badness 10000) in paragraph at lines 1316--1318
]
Chapter 11.
[29]
-Underfull \hbox (badness 10000) in paragraph at lines 1457--1462
+Underfull \hbox (badness 10000) in paragraph at lines 1465--1470
[]\T1/ptm/b/n/10 expire_backups \T1/ptm/m/n/10 us-age is sim-ple. It re-quires
back-ups to have names of the forms
[]
@@ -921,7 +921,7 @@ Chapter 13.
] [36]
Chapter 14.
-Underfull \hbox (badness 10000) in paragraph at lines 1744--1746
+Underfull \hbox (badness 10000) in paragraph at lines 1719--1721
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
@@ -929,25 +929,25 @@ Underfull \hbox (badness 10000) in paragraph at lines 1744--1746
[37
]
-Underfull \hbox (badness 10000) in paragraph at lines 1821--1823
+Underfull \hbox (badness 10000) in paragraph at lines 1796--1798
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
[38]
-Underfull \hbox (badness 10000) in paragraph at lines 1900--1902
+Underfull \hbox (badness 10000) in paragraph at lines 1875--1877
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
[39] [40] [41] [42] [43] [44] [45] [46]
-Underfull \hbox (badness 10000) in paragraph at lines 2460--2462
+Underfull \hbox (badness 10000) in paragraph at lines 2435--2437
[]\T1/ptm/m/n/10 Read au-then-ti-ca-tion cre-den-tials from this file (de-fault
:
[]
[47] [48]
-Underfull \hbox (badness 10000) in paragraph at lines 2605--2610
+Underfull \hbox (badness 10000) in paragraph at lines 2578--2583
[]\T1/ptm/b/n/10 expire_backups \T1/ptm/m/n/10 us-age is sim-ple. It re-quires
back-ups to have names of the forms
[]
@@ -959,10 +959,10 @@ Chapter 15.
No file manual.ind.
[51] (./manual.aux) )
Here is how much of TeX's memory you used:
- 8332 strings out of 495021
- 113354 string characters out of 1181035
- 199858 words of memory out of 3000000
- 11121 multiletter control sequences out of 15000+50000
+ 8329 strings out of 495021
+ 113346 string characters out of 1181035
+ 199892 words of memory out of 3000000
+ 11122 multiletter control sequences out of 15000+50000
58515 words of font info for 65 fonts, out of 3000000 for 9000
29 hyphenation exceptions out of 8191
45i,12n,48p,278b,492s stack positions out of 5000i,500n,10000p,200000b,50000s
@@ -973,9 +973,9 @@ rw/helvetic/uhvb8a.pfb></usr/share/texmf-texlive/fonts/type1/urw/helvetic/uhvbo
8a.pfb></usr/share/texmf-texlive/fonts/type1/urw/times/utmb8a.pfb></usr/share/t
exmf-texlive/fonts/type1/urw/times/utmr8a.pfb></usr/share/texmf-texlive/fonts/t
ype1/urw/times/utmri8a.pfb>
-Output written on manual.pdf (55 pages, 287015 bytes).
+Output written on manual.pdf (55 pages, 286415 bytes).
PDF statistics:
- 805 PDF objects out of 1000 (max. 8388607)
- 214 named destinations out of 1000 (max. 500000)
+ 803 PDF objects out of 1000 (max. 8388607)
+ 210 named destinations out of 1000 (max. 500000)
489 words of extra memory for PDF output out of 10000 (max. 10000000)
diff --git a/doc/latex/manual.tex b/doc/latex/manual.tex
index fd08191..23856f2 100644
--- a/doc/latex/manual.tex
+++ b/doc/latex/manual.tex
@@ -12,8 +12,8 @@
\title{S3QL Documentation}
-\date{November 20, 2011}
-\release{1.6}
+\date{November 27, 2011}
+\release{1.7}
\author{Nikolaus Rath}
\newcommand{\sphinxlogo}{}
\renewcommand{\releasename}{Release}
@@ -238,7 +238,7 @@ possible}.
\item {}
\href{http://www.python.org/}{Python}, version 2.6.6 or newer, but not
-Python 3.x.
+Python 3.x. Make sure to also install the development headers.
\item {}
The \href{http://pypi.python.org/pypi/pycryptopp}{PyCrypto++ Python Module}. To check if this module
@@ -251,15 +251,22 @@ argparse.\_\_version\_\_'}. If argparse is installed, this will print
the version number. You need version 1.1 or later.
\item {}
+\href{http://www.sqlite.org/}{SQLite} version 3.7.0 or newer. SQLite
+has to be installed as a \emph{shared library} with development headers.
+
+\item {}
The \href{http://code.google.com/p/apsw/}{APSW Python Module}. To check
which (if any) version of APWS is installed, run the command
\begin{Verbatim}[commandchars=\\\{\}]
-\PYG{l}{python -c 'import apsw; print apsw.apswversion(), apsw.sqlitelibversion()'}
+\PYG{l}{python -c 'import apsw; print apsw.apswversion()'}
\end{Verbatim}
-If APSW is installed, this should print two version numbers which
-both have to be at least 3.7.0.
+The printed version number should be at least 3.7.0. Note that APSW
+must be linked \emph{dynamically} against SQLite, so you can \emph{not} use
+the Ubuntu PPA at
+\href{https://launchpad.net/~ubuntu-rogerbinns/+archive/apsw}{https://launchpad.net/\textasciitilde{}ubuntu-rogerbinns/+archive/apsw} (these
+packages are statically linked).
\item {}
The \href{http://pypi.python.org/pypi/pyliblzma}{PyLibLZMA Python module}. To check if this module
@@ -271,9 +278,9 @@ least version 0.5.3.
The \href{http://code.google.com/p/python-llfuse/}{Python LLFUSE module}. To check if this module
is installed, execute \code{python -c 'import llfuse; print
llfuse.\_\_version\_\_'}. This should print a version number. You need at
-least version 0.29.
+least version 0.36.
-Note that earlier S3QL versions shipped with a builtin version of
+Note that early S3QL versions shipped with a built-in version of
this module. If you are upgrading from such a version, make sure to
completely remove the old S3QL version first.
@@ -291,6 +298,9 @@ Download S3QL from \href{http://code.google.com/p/s3ql/downloads/list}{http://co
Unpack it into a folder of your choice
\item {}
+Run \code{python setup.py build} to build S3QL.
+
+\item {}
Run \code{python setup.py test} to run a self-test. If this fails, ask
for help on the \href{http://groups.google.com/group/s3ql}{mailing list} or report a bug in the
\href{http://code.google.com/p/s3ql/issues/list}{issue tracker}.
@@ -616,11 +626,11 @@ problems.
\section{S3 compatible}
\label{backends:s3-compatible}
-S3QL is also able to access other, S3 compatible storage services for
-which no specific backend exists. Note that when accessing such
-services, only the lowest common denominator of available features can
-be used, so it is generally recommended to use a service specific
-backend instead.
+S3QL is also able to access other, S3 compatible storage services like
+\href{http://www.openstack.org/}{OpenStack} for which no specific backend exists. Note that when
+accessing such services, only the lowest common denominator of
+available features can be used, so it is generally recommended to use
+a service specific backend instead.
The storage URL for accessing an arbitrary S3 compatible storage
service is
@@ -640,7 +650,7 @@ not verify the server certificate (cf. \href{http://code.google.com/p/s3ql/issue
\section{Local}
-\label{backends:local}
+\label{backends:openstack}\label{backends:local}
S3QL is also able to store its data on the local file system. This can
be used to backup data on external media, or to access external
services that S3QL can not talk to directly (e.g., it is possible to
@@ -1327,8 +1337,6 @@ just print program version and exit
If user input is required, exit without prompting.
\item [-{-}force]
Force checking even if file system is marked clean.
-\item [-{-}renumber-inodes]
-Renumber inodes to be stricly sequential starting from 3
\end{optionlist}
\end{quote}
@@ -1641,39 +1649,6 @@ the \code{-{-}noleaf} option to work correctly on S3QL file systems. This
bug has already been fixed in recent find versions.
\item {}
-In theory, S3QL is not fully compatible with NFS. Since S3QL does
-not support \emph{inode generation numbers}, NFS clients may (once again,
-in theory) accidentally read or write the wrong file in the
-following situation:
-\begin{enumerate}
-\item {}
-An S3QL file system is exported over NFS
-
-\item {}
-NFS client 1 opens a file A
-
-\item {}
-Another NFS client 2 (or the server itself) deletes file A (without
-client 1 knowing about this)
-
-\item {}
-A new file B is created by either of the clients or the server
-
-\item {}
-NFS client 1 tries to read or write file A (which has actually already been deleted).
-
-\end{enumerate}
-
-In this situation it is possible that NFS client 1 actually writes
-or reads the newly created file B instead. The chances of this are 1
-to (2\textasciicircum{}32 - \emph{n}) where \emph{n} is the total number of directory entries
-in the S3QL file system (as displayed by \code{s3qlstat}).
-
-Luckily enough, as long as you have less than about 2 thousand
-million directory entries (2\textasciicircum{}31), the chances for this are totally
-irrelevant and you don't have to worry about it.
-
-\item {}
The \code{umount} and \code{fusermount -u} commands will \emph{not} block until all
data has been uploaded to the backend. (this is a FUSE limitation
that will hopefully be removed in the future, see \href{http://code.google.com/p/s3ql/issues/detail?id=159}{issue 159}). If you use
@@ -2471,8 +2446,6 @@ just print program version and exit
If user input is required, exit without prompting.
\item [-{-}force]
Force checking even if file system is marked clean.
-\item [-{-}renumber-inodes]
-Renumber inodes to be stricly sequential starting from 3
\end{optionlist}
\end{quote}
diff --git a/doc/man/fsck.s3ql.1 b/doc/man/fsck.s3ql.1
index 7e71a2c..9e4e91b 100644
--- a/doc/man/fsck.s3ql.1
+++ b/doc/man/fsck.s3ql.1
@@ -1,4 +1,4 @@
-.TH "FSCK.S3QL" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "FSCK.S3QL" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
fsck.s3ql \- Check an S3QL file system for errors
.
@@ -84,9 +84,6 @@ If user input is required, exit without prompting.
.TP
.B \-\-force
Force checking even if file system is marked clean.
-.TP
-.B \-\-renumber\-inodes
-Renumber inodes to be stricly sequential starting from 3
.UNINDENT
.UNINDENT
.UNINDENT
diff --git a/doc/man/mkfs.s3ql.1 b/doc/man/mkfs.s3ql.1
index 83320cb..ccb96a1 100644
--- a/doc/man/mkfs.s3ql.1
+++ b/doc/man/mkfs.s3ql.1
@@ -1,4 +1,4 @@
-.TH "MKFS.S3QL" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "MKFS.S3QL" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
mkfs.s3ql \- Create an S3QL file system
.
diff --git a/doc/man/mount.s3ql.1 b/doc/man/mount.s3ql.1
index 5e8c397..a34c490 100644
--- a/doc/man/mount.s3ql.1
+++ b/doc/man/mount.s3ql.1
@@ -1,4 +1,4 @@
-.TH "MOUNT.S3QL" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "MOUNT.S3QL" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
mount.s3ql \- Mount an S3QL file system
.
diff --git a/doc/man/s3qladm.1 b/doc/man/s3qladm.1
index 8fa458b..fb9f8bc 100644
--- a/doc/man/s3qladm.1
+++ b/doc/man/s3qladm.1
@@ -1,4 +1,4 @@
-.TH "S3QLADM" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "S3QLADM" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
s3qladm \- Manage S3QL buckets
.
diff --git a/doc/man/s3qlcp.1 b/doc/man/s3qlcp.1
index 0f768eb..8995116 100644
--- a/doc/man/s3qlcp.1
+++ b/doc/man/s3qlcp.1
@@ -1,4 +1,4 @@
-.TH "S3QLCP" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "S3QLCP" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
s3qlcp \- Copy-on-write replication on S3QL file systems
.
diff --git a/doc/man/s3qlctrl.1 b/doc/man/s3qlctrl.1
index c3d513f..1481e5d 100644
--- a/doc/man/s3qlctrl.1
+++ b/doc/man/s3qlctrl.1
@@ -1,4 +1,4 @@
-.TH "S3QLCTRL" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "S3QLCTRL" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
s3qlctrl \- Control a mounted S3QL file system
.
diff --git a/doc/man/s3qllock.1 b/doc/man/s3qllock.1
index 902b967..7c04007 100644
--- a/doc/man/s3qllock.1
+++ b/doc/man/s3qllock.1
@@ -1,4 +1,4 @@
-.TH "S3QLLOCK" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "S3QLLOCK" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
s3qllock \- Make trees on an S3QL file system immutable
.
diff --git a/doc/man/s3qlrm.1 b/doc/man/s3qlrm.1
index 554cc1a..6118aa2 100644
--- a/doc/man/s3qlrm.1
+++ b/doc/man/s3qlrm.1
@@ -1,4 +1,4 @@
-.TH "S3QLRM" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "S3QLRM" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
s3qlrm \- Fast tree removal on S3QL file systems
.
diff --git a/doc/man/s3qlstat.1 b/doc/man/s3qlstat.1
index f7438b1..ccee4e6 100644
--- a/doc/man/s3qlstat.1
+++ b/doc/man/s3qlstat.1
@@ -1,4 +1,4 @@
-.TH "S3QLSTAT" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "S3QLSTAT" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
s3qlstat \- Gather S3QL file system statistics
.
diff --git a/doc/man/umount.s3ql.1 b/doc/man/umount.s3ql.1
index ea54a6e..89d2224 100644
--- a/doc/man/umount.s3ql.1
+++ b/doc/man/umount.s3ql.1
@@ -1,4 +1,4 @@
-.TH "UMOUNT.S3QL" "1" "November 20, 2011" "1.6" "S3QL"
+.TH "UMOUNT.S3QL" "1" "November 27, 2011" "1.7" "S3QL"
.SH NAME
umount.s3ql \- Unmount an S3QL file system
.
diff --git a/doc/manual.pdf b/doc/manual.pdf
index 6d734e7..0bb83c7 100644
--- a/doc/manual.pdf
+++ b/doc/manual.pdf
Binary files differ
diff --git a/rst/backends.rst b/rst/backends.rst
index 5bc9521..002793f 100644
--- a/rst/backends.rst
+++ b/rst/backends.rst
@@ -154,11 +154,11 @@ problems.
S3 compatible
=============
-S3QL is also able to access other, S3 compatible storage services for
-which no specific backend exists. Note that when accessing such
-services, only the lowest common denominator of available features can
-be used, so it is generally recommended to use a service specific
-backend instead.
+S3QL is also able to access other, S3 compatible storage services like
+OpenStack_ for which no specific backend exists. Note that when
+accessing such services, only the lowest common denominator of
+available features can be used, so it is generally recommended to use
+a service specific backend instead.
The storage URL for accessing an arbitrary S3 compatible storage
service is ::
@@ -173,6 +173,7 @@ to use HTTPS connections. Note, however, that at this point S3QL does
not verify the server certificate (cf. `issue 267
<http://code.google.com/p/s3ql/issues/detail?id=267>`_).
+.. _OpenStack: http://www.openstack.org/
Local
=====
diff --git a/rst/installation.rst b/rst/installation.rst
index 0cfa670..6056906 100644
--- a/rst/installation.rst
+++ b/rst/installation.rst
@@ -34,7 +34,7 @@ that is not the case.
possible*.
* `Python <http://www.python.org/>`_, version 2.6.6 or newer, but not
- Python 3.x.
+ Python 3.x. Make sure to also install the development headers.
* The `PyCrypto++ Python Module
<http://pypi.python.org/pypi/pycryptopp>`_. To check if this module
@@ -46,13 +46,19 @@ that is not the case.
argparse.__version__'`. If argparse is installed, this will print
the version number. You need version 1.1 or later.
+* `SQLite <http://www.sqlite.org/>`_ version 3.7.0 or newer. SQLite
+ has to be installed as a *shared library* with development headers.
+
* The `APSW Python Module <http://code.google.com/p/apsw/>`_. To check
which (if any) version of APWS is installed, run the command ::
- python -c 'import apsw; print apsw.apswversion(), apsw.sqlitelibversion()'
+ python -c 'import apsw; print apsw.apswversion()'
- If APSW is installed, this should print two version numbers which
- both have to be at least 3.7.0.
+ The printed version number should be at least 3.7.0. Note that APSW
+ must be linked *dynamically* against SQLite, so you can *not* use
+ the Ubuntu PPA at
+ https://launchpad.net/~ubuntu-rogerbinns/+archive/apsw (these
+ packages are statically linked).
* The `PyLibLZMA Python module
<http://pypi.python.org/pypi/pyliblzma>`_. To check if this module
@@ -64,9 +70,9 @@ that is not the case.
<http://code.google.com/p/python-llfuse/>`_. To check if this module
is installed, execute `python -c 'import llfuse; print
llfuse.__version__'`. This should print a version number. You need at
- least version 0.29.
+ least version 0.36.
- Note that earlier S3QL versions shipped with a builtin version of
+ Note that early S3QL versions shipped with a built-in version of
this module. If you are upgrading from such a version, make sure to
completely remove the old S3QL version first.
@@ -79,7 +85,8 @@ To install S3QL itself, proceed as follows:
1. Download S3QL from http://code.google.com/p/s3ql/downloads/list
2. Unpack it into a folder of your choice
-3. Run `python setup.py test` to run a self-test. If this fails, ask
+3. Run `python setup.py build` to build S3QL.
+4. Run `python setup.py test` to run a self-test. If this fails, ask
for help on the `mailing list
<http://groups.google.com/group/s3ql>`_ or report a bug in the
`issue tracker <http://code.google.com/p/s3ql/issues/list>`_.
diff --git a/rst/issues.rst b/rst/issues.rst
index ac2cb8c..e79b2e1 100644
--- a/rst/issues.rst
+++ b/rst/issues.rst
@@ -45,28 +45,6 @@ Known Issues
the `--noleaf` option to work correctly on S3QL file systems. This
bug has already been fixed in recent find versions.
-
-* In theory, S3QL is not fully compatible with NFS. Since S3QL does
- not support *inode generation numbers*, NFS clients may (once again,
- in theory) accidentally read or write the wrong file in the
- following situation:
-
- #. An S3QL file system is exported over NFS
- #. NFS client 1 opens a file A
- #. Another NFS client 2 (or the server itself) deletes file A (without
- client 1 knowing about this)
- #. A new file B is created by either of the clients or the server
- #. NFS client 1 tries to read or write file A (which has actually already been deleted).
-
- In this situation it is possible that NFS client 1 actually writes
- or reads the newly created file B instead. The chances of this are 1
- to (2^32 - *n*) where *n* is the total number of directory entries
- in the S3QL file system (as displayed by `s3qlstat`).
-
- Luckily enough, as long as you have less than about 2 thousand
- million directory entries (2^31), the chances for this are totally
- irrelevant and you don't have to worry about it.
-
* The `umount` and `fusermount -u` commands will *not* block until all
data has been uploaded to the backend. (this is a FUSE limitation
that will hopefully be removed in the future, see `issue 159
diff --git a/setup.py b/setup.py
index 9fd0f58..9709162 100755
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,7 @@ from distribute_setup import use_setuptools
use_setuptools(version='0.6.14', download_delay=5)
import setuptools
import setuptools.command.test as setuptools_test
+from setuptools import Extension
class build_docs(setuptools.Command):
description = 'Build Sphinx documentation'
@@ -102,6 +103,19 @@ def main():
with open(os.path.join(basedir, 'rst', 'about.rst'), 'r') as fh:
long_desc = fh.read()
+ compile_args = ['-Wall', '-Wextra', '-Wno-unused-parameter' ]
+
+ # http://trac.cython.org/cython_trac/ticket/704
+ compile_args.append('-Wno-unused-but-set-variable')
+
+ # http://bugs.python.org/issue969718
+ if sys.version_info[0] == 2:
+ compile_args.append('-fno-strict-aliasing')
+
+ # http://bugs.python.org/issue7576
+ if sys.version_info[0] == 3 and sys.version_info[1] < 2:
+ compile_args.append('-Wno-missing-field-initializers')
+
setuptools.setup(
name='s3ql',
zip_safe=True,
@@ -126,41 +140,84 @@ def main():
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
provides=['s3ql'],
+ ext_modules=[Extension('s3ql._deltadump', ['src/s3ql/_deltadump.c'],
+ extra_compile_args=compile_args,
+ extra_link_args=[ '-lsqlite3'] )],
data_files = [ ('share/man/man1',
[ os.path.join('doc/man/', x) for x
in glob(os.path.join(basedir, 'doc', 'man', '*.1')) ]) ],
entry_points={ 'console_scripts':
[
- 'mkfs.s3ql = s3ql.cli.mkfs:main',
- 'fsck.s3ql = s3ql.cli.fsck:main',
- 'mount.s3ql = s3ql.cli.mount:main',
- 'umount.s3ql = s3ql.cli.umount:main',
- 's3qlcp = s3ql.cli.cp:main',
- 's3qlstat = s3ql.cli.statfs:main',
- 's3qladm = s3ql.cli.adm:main',
- 's3qlctrl = s3ql.cli.ctrl:main',
- 's3qllock = s3ql.cli.lock:main',
- 's3qlrm = s3ql.cli.remove:main',
+ 'mkfs.s3ql = s3ql.mkfs:main',
+ 'fsck.s3ql = s3ql.fsck:main',
+ 'mount.s3ql = s3ql.mount:main',
+ 'umount.s3ql = s3ql.umount:main',
+ 's3qlcp = s3ql.cp:main',
+ 's3qlstat = s3ql.statfs:main',
+ 's3qladm = s3ql.adm:main',
+ 's3qlctrl = s3ql.ctrl:main',
+ 's3qllock = s3ql.lock:main',
+ 's3qlrm = s3ql.remove:main',
]
},
install_requires=['apsw >= 3.7.0',
'pycryptopp',
-# 'llfuse >= 0.35',
+ 'llfuse >= 0.35',
'argparse >= 1.1',
'pyliblzma >= 0.5.3' ],
tests_require=['apsw >= 3.7.0', 'unittest2',
'pycryptopp',
-# 'llfuse >= 0.35',
+ 'llfuse >= 0.35',
'argparse >= 1.1',
'pyliblzma >= 0.5.3' ],
test_suite='tests',
cmdclass={'test': test,
'upload_docs': upload_docs,
+ 'build_cython': build_cython,
'build_sphinx': build_docs },
command_options = { 'sdist': { 'formats': ('setup.py', 'bztar') } },
)
-
+class build_cython(setuptools.Command):
+ user_options = []
+ boolean_options = []
+ description = "Compile .pyx to .c"
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ # Attribute defined outside init
+ #pylint: disable=W0201
+ self.extensions = self.distribution.ext_modules
+
+ def run(self):
+ try:
+ from Cython.Compiler.Main import compile as cython_compile
+ from Cython.Compiler.Options import extra_warnings
+ except ImportError:
+ raise SystemExit('Cython needs to be installed for this command')
+
+ directives = dict(extra_warnings)
+ directives['embedsignature'] = True
+ options = { 'recursive': False, 'verbose': True, 'timestamps': False,
+ 'compiler_directives': directives, 'warning_errors': True }
+
+ for extension in self.extensions:
+ for file_ in extension.sources:
+ (file_, ext) = os.path.splitext(file_)
+ path = os.path.join(basedir, file_)
+ if ext != '.c':
+ continue
+ if os.path.exists(path + '.pyx'):
+ print('compiling %s to %s' % (file_ + '.pyx', file_ + ext))
+ res = cython_compile(path + '.pyx', full_module_name=extension.name,
+ **options)
+ if res.num_errors != 0:
+ raise SystemExit('Cython encountered errors.')
+ else:
+ print('%s is up to date' % (file_ + ext,))
+
class test(setuptools_test.test):
# Attributes defined outside init, required by setuptools.
# pylint: disable=W0201
diff --git a/src/s3ql.egg-info/PKG-INFO b/src/s3ql.egg-info/PKG-INFO
index fc583e4..cc2f675 100644
--- a/src/s3ql.egg-info/PKG-INFO
+++ b/src/s3ql.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: s3ql
-Version: 1.6
+Version: 1.7
Summary: a full-featured file system for online data storage
Home-page: http://code.google.com/p/s3ql/
Author: Nikolaus Rath
diff --git a/src/s3ql.egg-info/SOURCES.txt b/src/s3ql.egg-info/SOURCES.txt
index c45ba1d..4858282 100644
--- a/src/s3ql.egg-info/SOURCES.txt
+++ b/src/s3ql.egg-info/SOURCES.txt
@@ -15,6 +15,7 @@ bin/umount.s3ql
contrib/benchmark.py
contrib/expire_backups.1
contrib/expire_backups.py
+contrib/fsck_db.py
contrib/pcp.1
contrib/pcp.py
contrib/s3ql.conf
@@ -158,15 +159,29 @@ rst/man/rm.rst
rst/man/stat.rst
rst/man/umount.rst
src/s3ql/__init__.py
+src/s3ql/_deltadump.c
+src/s3ql/_deltadump.pyx
+src/s3ql/adm.py
src/s3ql/block_cache.py
+src/s3ql/cleanup_manager.py
src/s3ql/common.py
+src/s3ql/cp.py
+src/s3ql/ctrl.py
src/s3ql/daemonize.py
src/s3ql/database.py
+src/s3ql/deltadump.py
src/s3ql/fs.py
src/s3ql/fsck.py
src/s3ql/inode_cache.py
+src/s3ql/lock.py
+src/s3ql/metadata.py
+src/s3ql/mkfs.py
+src/s3ql/mount.py
src/s3ql/ordered_dict.py
src/s3ql/parse_args.py
+src/s3ql/remove.py
+src/s3ql/statfs.py
+src/s3ql/umount.py
src/s3ql.egg-info/PKG-INFO
src/s3ql.egg-info/SOURCES.txt
src/s3ql.egg-info/dependency_links.txt
@@ -183,21 +198,11 @@ src/s3ql/backends/s3.py
src/s3ql/backends/s3c.py
src/s3ql/backends/s3cs.py
src/s3ql/backends/s3s.py
-src/s3ql/cli/__init__.py
-src/s3ql/cli/adm.py
-src/s3ql/cli/cp.py
-src/s3ql/cli/ctrl.py
-src/s3ql/cli/fsck.py
-src/s3ql/cli/lock.py
-src/s3ql/cli/mkfs.py
-src/s3ql/cli/mount.py
-src/s3ql/cli/remove.py
-src/s3ql/cli/statfs.py
-src/s3ql/cli/umount.py
tests/__init__.py
tests/_common.py
tests/data.tar.bz2
tests/t1_backends.py
+tests/t1_dump.py
tests/t1_ordered_dict.py
tests/t2_block_cache.py
tests/t3_fs_api.py
@@ -205,9 +210,11 @@ tests/t3_fsck.py
tests/t3_inode_cache.py
tests/t4_adm.py
tests/t4_fuse.py
-tests/t5_cli.py
tests/t5_cp.py
+tests/t5_ctrl.py
+tests/t5_fsck.py
tests/t5_full.py
+tests/t5_lock_rm.py
util/cmdline_lexer.py
util/distribute_setup.py
util/sphinx_pipeinclude.py \ No newline at end of file
diff --git a/src/s3ql.egg-info/entry_points.txt b/src/s3ql.egg-info/entry_points.txt
index 1d80f48..0aeca88 100644
--- a/src/s3ql.egg-info/entry_points.txt
+++ b/src/s3ql.egg-info/entry_points.txt
@@ -1,12 +1,12 @@
[console_scripts]
-fsck.s3ql = s3ql.cli.fsck:main
-s3qllock = s3ql.cli.lock:main
-s3qladm = s3ql.cli.adm:main
-s3qlctrl = s3ql.cli.ctrl:main
-umount.s3ql = s3ql.cli.umount:main
-s3qlcp = s3ql.cli.cp:main
-s3qlrm = s3ql.cli.remove:main
-mount.s3ql = s3ql.cli.mount:main
-s3qlstat = s3ql.cli.statfs:main
-mkfs.s3ql = s3ql.cli.mkfs:main
+fsck.s3ql = s3ql.fsck:main
+s3qllock = s3ql.lock:main
+s3qladm = s3ql.adm:main
+s3qlctrl = s3ql.ctrl:main
+umount.s3ql = s3ql.umount:main
+s3qlcp = s3ql.cp:main
+s3qlrm = s3ql.remove:main
+mount.s3ql = s3ql.mount:main
+s3qlstat = s3ql.statfs:main
+mkfs.s3ql = s3ql.mkfs:main
diff --git a/src/s3ql.egg-info/requires.txt b/src/s3ql.egg-info/requires.txt
index 30c88d6..91bedd3 100644
--- a/src/s3ql.egg-info/requires.txt
+++ b/src/s3ql.egg-info/requires.txt
@@ -1,4 +1,5 @@
apsw >= 3.7.0
pycryptopp
+llfuse >= 0.35
argparse >= 1.1
pyliblzma >= 0.5.3 \ No newline at end of file
diff --git a/src/s3ql/__init__.py b/src/s3ql/__init__.py
index ed134b0..1094134 100644
--- a/src/s3ql/__init__.py
+++ b/src/s3ql/__init__.py
@@ -8,9 +8,18 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function
-__all__ = [ 'backends', 'cli', 'parse_args', 'block_cache', "common", 'daemonize',
- 'database', 'fs', 'fsck', 'ordered_dict',
- 'VERSION', 'CURRENT_FS_REV' ]
+__all__ = [ 'adm', 'backends', 'block_cache', 'cleanup_manager', 'common',
+ 'cp', 'ctrl', 'daemonize', 'database', 'deltadump', '_deltadump',
+ 'fs', 'fsck', 'inode_cache', 'lock', 'mkfs', 'mount', 'ordered_dict',
+ 'parse_args', 'remove', 'statfs', 'umount', 'VERSION',
+ 'CURRENT_FS_REV', 'REV_VER_MAP' ]
-VERSION = '1.6'
-CURRENT_FS_REV = 13
+VERSION = '1.7'
+CURRENT_FS_REV = 14
+
+# Maps file system revisions to the last S3QL version that
+# supported this revision.
+REV_VER_MAP = { 14: '1.7',
+ 13: '1.6',
+ 12: '1.3',
+ 11: '1.0.1' }
diff --git a/src/s3ql/_deltadump.c b/src/s3ql/_deltadump.c
new file mode 100644
index 0000000..89189e9
--- /dev/null
+++ b/src/s3ql/_deltadump.c
@@ -0,0 +1,6278 @@
+/* Generated by Cython 0.15.1 on Sat Nov 26 18:34:22 2011 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#else
+
+#include <stddef.h> /* For offsetof */
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+
+#if PY_VERSION_HEX < 0x02040000
+ #define METH_COEXIST 0
+ #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
+ #define PyDict_Contains(d,o) PySequence_Contains(d,o)
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ typedef int Py_ssize_t;
+ #define PY_SSIZE_T_MAX INT_MAX
+ #define PY_SSIZE_T_MIN INT_MIN
+ #define PY_FORMAT_SIZE_T ""
+ #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
+ #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o)
+ #define PyNumber_Index(o) PyNumber_Int(o)
+ #define PyIndex_Check(o) PyNumber_Check(o)
+ #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
+ #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+ #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
+ #define PyVarObject_HEAD_INIT(type, size) \
+ PyObject_HEAD_INIT(type) size,
+ #define PyType_Modified(t)
+
+ typedef struct {
+ void *buf;
+ PyObject *obj;
+ Py_ssize_t len;
+ Py_ssize_t itemsize;
+ int readonly;
+ int ndim;
+ char *format;
+ Py_ssize_t *shape;
+ Py_ssize_t *strides;
+ Py_ssize_t *suboffsets;
+ void *internal;
+ } Py_buffer;
+
+ #define PyBUF_SIMPLE 0
+ #define PyBUF_WRITABLE 0x0001
+ #define PyBUF_FORMAT 0x0004
+ #define PyBUF_ND 0x0008
+ #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
+ #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
+ #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
+ #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
+ #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
+
+#endif
+
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define Py_TPFLAGS_CHECKTYPES 0
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+
+#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define PyBytesObject PyStringObject
+ #define PyBytes_Type PyString_Type
+ #define PyBytes_Check PyString_Check
+ #define PyBytes_CheckExact PyString_CheckExact
+ #define PyBytes_FromString PyString_FromString
+ #define PyBytes_FromStringAndSize PyString_FromStringAndSize
+ #define PyBytes_FromFormat PyString_FromFormat
+ #define PyBytes_DecodeEscape PyString_DecodeEscape
+ #define PyBytes_AsString PyString_AsString
+ #define PyBytes_AsStringAndSize PyString_AsStringAndSize
+ #define PyBytes_Size PyString_Size
+ #define PyBytes_AS_STRING PyString_AS_STRING
+ #define PyBytes_GET_SIZE PyString_GET_SIZE
+ #define PyBytes_Repr PyString_Repr
+ #define PyBytes_Concat PyString_Concat
+ #define PyBytes_ConcatAndDel PyString_ConcatAndDel
+#endif
+
+#if PY_VERSION_HEX < 0x02060000
+ #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
+ #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+
+#if PY_VERSION_HEX < 0x03020000
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#endif
+
+#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
+ #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
+ #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
+ #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
+#else
+ #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
+ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
+ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
+ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
+ #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
+ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
+ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
+ #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
+ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
+ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
+ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+ #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
+#else
+ #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
+ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
+ #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+ #define __Pyx_NAMESTR(n) ((char *)(n))
+ #define __Pyx_DOCSTR(n) ((char *)(n))
+#else
+ #define __Pyx_NAMESTR(n) (n)
+ #define __Pyx_DOCSTR(n) (n)
+#endif
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#define __PYX_HAVE__s3ql___deltadump
+#define __PYX_HAVE_API__s3ql___deltadump
+#include "stdio.h"
+#include "string.h"
+#include "errno.h"
+#include "stdlib.h"
+#include "stdint.h"
+#include "unistd.h"
+#include "endian.h"
+#include "sqlite3.h"
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#ifdef PYREX_WITHOUT_ASSERTIONS
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+
+/* inline attribute */
+#ifndef CYTHON_INLINE
+ #if defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+/* unused attribute */
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || defined(__INTEL_COMPILER)
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+
+typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+
+/* Type Conversion Predeclarations */
+
+#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
+#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
+
+#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
+
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+
+
+#ifdef __GNUC__
+ /* Test for GCC > 2.95 */
+ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+ #else /* __GNUC__ > 2 ... */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+ #endif /* __GNUC__ > 2 ... */
+#else /* __GNUC__ */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "_deltadump.pyx",
+};
+
+/*--- Type declarations ---*/
+struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load;
+
+/* "s3ql/_deltadump.pyx":183
+ *
+ *
+ * def _dump_or_load(table, order, columns, db, fh): # <<<<<<<<<<<<<<
+ * '''Dump or load *columns* of *table*
+ *
+ */
+struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load {
+ PyObject_HEAD
+ void *__pyx_v_buf;
+ int *__pyx_v_col_args;
+ int *__pyx_v_col_types;
+ FILE *__pyx_v_fp;
+ int64_t *__pyx_v_int64_prev;
+ sqlite3_stmt *__pyx_v_stmt;
+};
+
+
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+ #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+ #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif /* CYTHON_REFNANNY */
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name, PyObject* kw_name); /*proto*/
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
+
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+
+
+#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_List_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) {
+ if (likely(o != Py_None)) {
+ if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) {
+ PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Tuple_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) {
+ if (likely(o != Py_None)) {
+ if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) {
+ PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+
+#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Fast(o, i) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) {
+ PyObject *r;
+ if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ r = PyList_GET_ITEM(o, i);
+ Py_INCREF(r);
+ }
+ else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+ r = PyTuple_GET_ITEM(o, i);
+ Py_INCREF(r);
+ }
+ else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) {
+ r = PySequence_GetItem(o, i);
+ }
+ else {
+ r = __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+ }
+ return r;
+}
+
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+
+static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/
+
+#define __pyx_binding_PyCFunctionType_USED 1
+
+typedef struct {
+ PyCFunctionObject func;
+} __pyx_binding_PyCFunctionType_object;
+
+static PyTypeObject __pyx_binding_PyCFunctionType_type;
+static PyTypeObject *__pyx_binding_PyCFunctionType = NULL;
+
+static PyObject *__pyx_binding_PyCFunctionType_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module); /* proto */
+#define __pyx_binding_PyCFunctionType_New(ml, self) __pyx_binding_PyCFunctionType_NewEx(ml, self, NULL)
+
+static int __pyx_binding_PyCFunctionType_init(void); /* proto */
+
+static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_int64_t(int64_t);
+
+static CYTHON_INLINE int64_t __Pyx_PyInt_from_py_int64_t(PyObject *);
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
+
+static int __Pyx_check_binary_version(void);
+
+static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
+ int __pyx_lineno, const char *__pyx_filename); /*proto*/
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
+
+/* Module declarations from 'cpython.long' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'cpython.exc' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.errno' */
+
+/* Module declarations from 'libc.stdlib' */
+
+/* Module declarations from 'libc.stdint' */
+
+/* Module declarations from 'posix.unistd' */
+
+/* Module declarations from 's3ql._deltadump' */
+static PyTypeObject *__pyx_ptype_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load = 0;
+static int __pyx_v_4s3ql_10_deltadump__INTEGER;
+static int __pyx_v_4s3ql_10_deltadump__BLOB;
+static int __pyx_v_4s3ql_10_deltadump__TIME;
+static uint8_t __pyx_v_4s3ql_10_deltadump_INT8;
+static uint8_t __pyx_v_4s3ql_10_deltadump_INT16;
+static uint8_t __pyx_v_4s3ql_10_deltadump_INT32;
+static uint8_t __pyx_v_4s3ql_10_deltadump_INT64;
+static double __pyx_v_4s3ql_10_deltadump_time_scale;
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_fwrite(const void *, size_t, FILE *); /*proto*/
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_fread(void *, size_t, FILE *); /*proto*/
+static int __pyx_f_4s3ql_10_deltadump_free(void *); /*proto*/
+static int __pyx_f_4s3ql_10_deltadump_sqlite3_finalize_p(sqlite3_stmt *); /*proto*/
+static int __pyx_f_4s3ql_10_deltadump_fclose(FILE *); /*proto*/
+static void *__pyx_f_4s3ql_10_deltadump_calloc(size_t, size_t); /*proto*/
+static PyObject *__pyx_f_4s3ql_10_deltadump__dump_table(int *, int *, int64_t *, int, sqlite3_stmt *, FILE *); /*proto*/
+static PyObject *__pyx_f_4s3ql_10_deltadump__load_table(int *, int *, int64_t *, int, int, sqlite3_stmt *, FILE *, void *); /*proto*/
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_write_integer(int64_t, FILE *); /*proto*/
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_read_integer(int64_t *, FILE *); /*proto*/
+#define __Pyx_MODULE_NAME "s3ql._deltadump"
+int __pyx_module_is_main_s3ql___deltadump = 0;
+
+/* Implementation of 's3ql._deltadump' */
+static PyObject *__pyx_builtin_IOError;
+static PyObject *__pyx_builtin_OSError;
+static PyObject *__pyx_builtin_range;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_RuntimeError;
+static char __pyx_k_1[] = "r+b";
+static char __pyx_k_2[] = "s3ql._deltadump";
+static char __pyx_k_3[] = "Invalid type for column %d";
+static char __pyx_k_4[] = "INSERT INTO %s (%s) VALUES(%s)";
+static char __pyx_k_5[] = ", ";
+static char __pyx_k_6[] = "?";
+static char __pyx_k_7[] = "SELECT %s FROM %s ORDER BY %s ";
+static char __pyx_k_8[] = "_dump_or_load(%s): reading %d rows";
+static char __pyx_k_9[] = "SELECT COUNT(rowid) FROM %s";
+static char __pyx_k_10[] = "_dump_or_load(%s): writing %d rows";
+static char __pyx_k_12[] = "Can't dump NULL values";
+static char __pyx_k_14[] = "Can not dump BLOB of size %d (max: %d)";
+static char __pyx_k_15[] = "Length %d != %d in column %d";
+static char __pyx_k_16[] = "BLOB too large to read (%d vs %d)";
+static char __pyx_k_17[] = "Ouch!";
+static char __pyx_k_19[] = "\n_deltadump.pyx - this file is part of S3QL (http://s3ql.googlecode.com)\n\nCopyright (C) Nikolaus Rath <Nikolaus@rath.org>\n\nThis program can be distributed under the terms of the GNU GPLv3.\n";
+static char __pyx_k__db[] = "db";
+static char __pyx_k__fh[] = "fh";
+static char __pyx_k__os[] = "os";
+static char __pyx_k__log[] = "log";
+static char __pyx_k__BLOB[] = "BLOB";
+static char __pyx_k__TIME[] = "TIME";
+static char __pyx_k__apsw[] = "apsw";
+static char __pyx_k__conn[] = "conn";
+static char __pyx_k__fail[] = "fail";
+static char __pyx_k__join[] = "join";
+static char __pyx_k__debug[] = "debug";
+static char __pyx_k__order[] = "order";
+static char __pyx_k__range[] = "range";
+static char __pyx_k__table[] = "table";
+static char __pyx_k__fileno[] = "fileno";
+static char __pyx_k__INTEGER[] = "INTEGER";
+static char __pyx_k__IOError[] = "IOError";
+static char __pyx_k__OSError[] = "OSError";
+static char __pyx_k__columns[] = "columns";
+static char __pyx_k__get_val[] = "get_val";
+static char __pyx_k__logging[] = "logging";
+static char __pyx_k____exit__[] = "__exit__";
+static char __pyx_k____main__[] = "__main__";
+static char __pyx_k____test__[] = "__test__";
+static char __pyx_k__register[] = "register";
+static char __pyx_k____enter__[] = "__enter__";
+static char __pyx_k__deltadump[] = "deltadump";
+static char __pyx_k__getLogger[] = "getLogger";
+static char __pyx_k__ValueError[] = "ValueError";
+static char __pyx_k__dump_table[] = "dump_table";
+static char __pyx_k__load_table[] = "load_table";
+static char __pyx_k__RuntimeError[] = "RuntimeError";
+static char __pyx_k__exceptionfor[] = "exceptionfor";
+static char __pyx_k__MAX_BLOB_SIZE[] = "MAX_BLOB_SIZE";
+static char __pyx_k___dump_or_load[] = "_dump_or_load";
+static char __pyx_k__CleanupManager[] = "CleanupManager";
+static char __pyx_k__sqlite3pointer[] = "sqlite3pointer";
+static char __pyx_k__cleanup_manager[] = "cleanup_manager";
+static PyObject *__pyx_kp_s_10;
+static PyObject *__pyx_kp_s_12;
+static PyObject *__pyx_kp_s_14;
+static PyObject *__pyx_kp_s_15;
+static PyObject *__pyx_kp_s_16;
+static PyObject *__pyx_kp_s_17;
+static PyObject *__pyx_n_s_2;
+static PyObject *__pyx_kp_s_3;
+static PyObject *__pyx_kp_s_4;
+static PyObject *__pyx_kp_s_5;
+static PyObject *__pyx_kp_s_6;
+static PyObject *__pyx_kp_s_7;
+static PyObject *__pyx_kp_s_8;
+static PyObject *__pyx_kp_s_9;
+static PyObject *__pyx_n_s__BLOB;
+static PyObject *__pyx_n_s__CleanupManager;
+static PyObject *__pyx_n_s__INTEGER;
+static PyObject *__pyx_n_s__IOError;
+static PyObject *__pyx_n_s__MAX_BLOB_SIZE;
+static PyObject *__pyx_n_s__OSError;
+static PyObject *__pyx_n_s__RuntimeError;
+static PyObject *__pyx_n_s__TIME;
+static PyObject *__pyx_n_s__ValueError;
+static PyObject *__pyx_n_s____enter__;
+static PyObject *__pyx_n_s____exit__;
+static PyObject *__pyx_n_s____main__;
+static PyObject *__pyx_n_s____test__;
+static PyObject *__pyx_n_s___dump_or_load;
+static PyObject *__pyx_n_s__apsw;
+static PyObject *__pyx_n_s__cleanup_manager;
+static PyObject *__pyx_n_s__columns;
+static PyObject *__pyx_n_s__conn;
+static PyObject *__pyx_n_s__db;
+static PyObject *__pyx_n_s__debug;
+static PyObject *__pyx_n_s__deltadump;
+static PyObject *__pyx_n_s__dump_table;
+static PyObject *__pyx_n_s__exceptionfor;
+static PyObject *__pyx_n_s__fail;
+static PyObject *__pyx_n_s__fh;
+static PyObject *__pyx_n_s__fileno;
+static PyObject *__pyx_n_s__getLogger;
+static PyObject *__pyx_n_s__get_val;
+static PyObject *__pyx_n_s__join;
+static PyObject *__pyx_n_s__load_table;
+static PyObject *__pyx_n_s__log;
+static PyObject *__pyx_n_s__logging;
+static PyObject *__pyx_n_s__order;
+static PyObject *__pyx_n_s__os;
+static PyObject *__pyx_n_s__range;
+static PyObject *__pyx_n_s__register;
+static PyObject *__pyx_n_s__sqlite3pointer;
+static PyObject *__pyx_n_s__table;
+static PyObject *__pyx_int_4096;
+static PyObject *__pyx_k_tuple_11;
+static PyObject *__pyx_k_tuple_13;
+static PyObject *__pyx_k_tuple_18;
+static PyObject *__pyx_k_tuple_20;
+
+/* "s3ql/_deltadump.pyx":97
+ * cdef double time_scale = 1<<30
+ *
+ * cdef inline int fwrite(const_void* buf, size_t len_, FILE* fp) except -1: # <<<<<<<<<<<<<<
+ * '''Call libc's fwrite() and raise exception on failure'''
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_fwrite(const void *__pyx_v_buf, size_t __pyx_v_len_, FILE *__pyx_v_fp) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("fwrite");
+
+ /* "s3ql/_deltadump.pyx":100
+ * '''Call libc's fwrite() and raise exception on failure'''
+ *
+ * if fwrite_c(buf, len_, 1, fp) != 1: # <<<<<<<<<<<<<<
+ * raise IOError(errno, strerror(errno))
+ *
+ */
+ __pyx_t_1 = (fwrite(__pyx_v_buf, __pyx_v_len_, 1, __pyx_v_fp) != 1);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":101
+ *
+ * if fwrite_c(buf, len_, 1, fp) != 1:
+ * raise IOError(errno, strerror(errno)) # <<<<<<<<<<<<<<
+ *
+ * cdef inline int fread(void* buf, size_t len_, FILE* fp) except -1:
+ */
+ __pyx_t_2 = PyInt_FromLong(errno); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyBytes_FromString(strerror(errno)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("s3ql._deltadump.fwrite", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":103
+ * raise IOError(errno, strerror(errno))
+ *
+ * cdef inline int fread(void* buf, size_t len_, FILE* fp) except -1: # <<<<<<<<<<<<<<
+ * '''Call libc's fread() and raise exception on failure'''
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_fread(void *__pyx_v_buf, size_t __pyx_v_len_, FILE *__pyx_v_fp) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("fread");
+
+ /* "s3ql/_deltadump.pyx":106
+ * '''Call libc's fread() and raise exception on failure'''
+ *
+ * if fread_c(buf, len_, 1, fp) != 1: # <<<<<<<<<<<<<<
+ * raise IOError(errno, strerror(errno))
+ *
+ */
+ __pyx_t_1 = (fread(__pyx_v_buf, __pyx_v_len_, 1, __pyx_v_fp) != 1);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":107
+ *
+ * if fread_c(buf, len_, 1, fp) != 1:
+ * raise IOError(errno, strerror(errno)) # <<<<<<<<<<<<<<
+ *
+ * cdef int free(void* ptr) except -1:
+ */
+ __pyx_t_2 = PyInt_FromLong(errno); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyBytes_FromString(strerror(errno)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_IOError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("s3ql._deltadump.fread", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":109
+ * raise IOError(errno, strerror(errno))
+ *
+ * cdef int free(void* ptr) except -1: # <<<<<<<<<<<<<<
+ * '''Call libc.free() and return None'''
+ *
+ */
+
+static int __pyx_f_4s3ql_10_deltadump_free(void *__pyx_v_ptr) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("free");
+
+ /* "s3ql/_deltadump.pyx":112
+ * '''Call libc.free() and return None'''
+ *
+ * free_c(ptr) # <<<<<<<<<<<<<<
+ *
+ * cdef int sqlite3_finalize_p(sqlite3_stmt* stmt) except -1:
+ */
+ free(__pyx_v_ptr);
+
+ __pyx_r = 0;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":114
+ * free_c(ptr)
+ *
+ * cdef int sqlite3_finalize_p(sqlite3_stmt* stmt) except -1: # <<<<<<<<<<<<<<
+ * '''Call sqlite3_finalize and raise exception on failure'''
+ *
+ */
+
+static int __pyx_f_4s3ql_10_deltadump_sqlite3_finalize_p(sqlite3_stmt *__pyx_v_stmt) {
+ int __pyx_v_rc;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("sqlite3_finalize_p");
+
+ /* "s3ql/_deltadump.pyx":117
+ * '''Call sqlite3_finalize and raise exception on failure'''
+ *
+ * rc = sqlite3_finalize(stmt) # <<<<<<<<<<<<<<
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ */
+ __pyx_v_rc = sqlite3_finalize(__pyx_v_stmt);
+
+ /* "s3ql/_deltadump.pyx":118
+ *
+ * rc = sqlite3_finalize(stmt)
+ * if rc != SQLITE_OK: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ *
+ */
+ __pyx_t_1 = (__pyx_v_rc != SQLITE_OK);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":119
+ * rc = sqlite3_finalize(stmt)
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ *
+ * cdef int fclose(FILE* fp) except -1:
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("s3ql._deltadump.sqlite3_finalize_p", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":121
+ * raise apsw.exceptionfor(rc)
+ *
+ * cdef int fclose(FILE* fp) except -1: # <<<<<<<<<<<<<<
+ * '''Call libc.fclose() and raise exception on failure'''
+ *
+ */
+
+static int __pyx_f_4s3ql_10_deltadump_fclose(FILE *__pyx_v_fp) {
+ Py_ssize_t __pyx_v_off;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("fclose");
+
+ /* "s3ql/_deltadump.pyx":130
+ * # the same thing, but this does not seem to be documented so we don't rely
+ * # on it.
+ * off = ftell(fp) # <<<<<<<<<<<<<<
+ * if off == -1:
+ * raise OSError(errno, strerror(errno))
+ */
+ __pyx_v_off = ftell(__pyx_v_fp);
+
+ /* "s3ql/_deltadump.pyx":131
+ * # on it.
+ * off = ftell(fp)
+ * if off == -1: # <<<<<<<<<<<<<<
+ * raise OSError(errno, strerror(errno))
+ *
+ */
+ __pyx_t_1 = (__pyx_v_off == -1);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":132
+ * off = ftell(fp)
+ * if off == -1:
+ * raise OSError(errno, strerror(errno)) # <<<<<<<<<<<<<<
+ *
+ * if lseek(fileno(fp), off, SEEK_SET) != off:
+ */
+ __pyx_t_2 = PyInt_FromLong(errno); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyBytes_FromString(strerror(errno)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_OSError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "s3ql/_deltadump.pyx":134
+ * raise OSError(errno, strerror(errno))
+ *
+ * if lseek(fileno(fp), off, SEEK_SET) != off: # <<<<<<<<<<<<<<
+ * raise OSError(errno, strerror(errno))
+ *
+ */
+ __pyx_t_1 = (lseek(fileno(__pyx_v_fp), __pyx_v_off, SEEK_SET) != __pyx_v_off);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":135
+ *
+ * if lseek(fileno(fp), off, SEEK_SET) != off:
+ * raise OSError(errno, strerror(errno)) # <<<<<<<<<<<<<<
+ *
+ * if fclose_c(fp) != 0:
+ */
+ __pyx_t_3 = PyInt_FromLong(errno); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyBytes_FromString(strerror(errno)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_t_4));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_4));
+ __pyx_t_3 = 0;
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_OSError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "s3ql/_deltadump.pyx":137
+ * raise OSError(errno, strerror(errno))
+ *
+ * if fclose_c(fp) != 0: # <<<<<<<<<<<<<<
+ * raise OSError(errno, strerror(errno))
+ *
+ */
+ __pyx_t_1 = (fclose(__pyx_v_fp) != 0);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":138
+ *
+ * if fclose_c(fp) != 0:
+ * raise OSError(errno, strerror(errno)) # <<<<<<<<<<<<<<
+ *
+ * cdef void* calloc(size_t cnt, size_t size) except NULL:
+ */
+ __pyx_t_4 = PyInt_FromLong(errno); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyBytes_FromString(strerror(errno)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_4 = 0;
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_OSError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("s3ql._deltadump.fclose", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":140
+ * raise OSError(errno, strerror(errno))
+ *
+ * cdef void* calloc(size_t cnt, size_t size) except NULL: # <<<<<<<<<<<<<<
+ * '''Call libc.calloc and raise exception on failure'''
+ *
+ */
+
+static void *__pyx_f_4s3ql_10_deltadump_calloc(size_t __pyx_v_cnt, size_t __pyx_v_size) {
+ void *__pyx_v_ptr;
+ void *__pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("calloc");
+
+ /* "s3ql/_deltadump.pyx":145
+ * cdef void* ptr
+ *
+ * ptr = calloc_c(cnt, size) # <<<<<<<<<<<<<<
+ *
+ * if ptr is NULL:
+ */
+ __pyx_v_ptr = calloc(__pyx_v_cnt, __pyx_v_size);
+
+ /* "s3ql/_deltadump.pyx":147
+ * ptr = calloc_c(cnt, size)
+ *
+ * if ptr is NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ *
+ */
+ __pyx_t_1 = (__pyx_v_ptr == NULL);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":148
+ *
+ * if ptr is NULL:
+ * PyErr_NoMemory() # <<<<<<<<<<<<<<
+ *
+ * return ptr
+ */
+ __pyx_t_2 = PyErr_NoMemory(); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "s3ql/_deltadump.pyx":150
+ * PyErr_NoMemory()
+ *
+ * return ptr # <<<<<<<<<<<<<<
+ *
+ * def dump_table(table, order, columns, db, fh):
+ */
+ __pyx_r = __pyx_v_ptr;
+ goto __pyx_L0;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("s3ql._deltadump.calloc", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":152
+ * return ptr
+ *
+ * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<<
+ * '''Dump *columns* of *table* into *fh*
+ *
+ */
+
+static PyObject *__pyx_pf_4s3ql_10_deltadump_dump_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_4s3ql_10_deltadump_dump_table[] = "dump_table(table, order, columns, db, fh)\nDump *columns* of *table* into *fh*\n\n *order* specifies the order in which the rows are written and must be a\n string that can be inserted after the \"ORDER BY\" clause in an SQL SELECT\n statement.\n \n *db* is an `s3ql.Connection` instance for the database.\n \n *columns* must a list of 3-tuples, one for each column that should be\n stored. The first element of the tuple must contain the column name and the\n second element the type of data stored in the column (`INTEGER`, `TIME`\n or `BLOB`). Times will be converted to nanosecond integers.\n \n For integers and seconds, the third tuple element specifies the expected\n change of the values between rows. For blobs it can be either zero\n (indicating variable length columns) or an integer specifying the length of\n the column values in bytes.\n ";
+static PyMethodDef __pyx_mdef_4s3ql_10_deltadump_dump_table = {__Pyx_NAMESTR("dump_table"), (PyCFunction)__pyx_pf_4s3ql_10_deltadump_dump_table, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4s3ql_10_deltadump_dump_table)};
+static PyObject *__pyx_pf_4s3ql_10_deltadump_dump_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_table = 0;
+ PyObject *__pyx_v_order = 0;
+ PyObject *__pyx_v_columns = 0;
+ PyObject *__pyx_v_db = 0;
+ PyObject *__pyx_v_fh = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__table,&__pyx_n_s__order,&__pyx_n_s__columns,&__pyx_n_s__db,&__pyx_n_s__fh,0};
+ __Pyx_RefNannySetupContext("dump_table");
+ __pyx_self = __pyx_self;
+ {
+ PyObject* values[5] = {0,0,0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__table);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__order);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__columns);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 3:
+ values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__db);
+ if (likely(values[3])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 4:
+ values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fh);
+ if (likely(values[4])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "dump_table") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ }
+ __pyx_v_table = values[0];
+ __pyx_v_order = values[1];
+ __pyx_v_columns = values[2];
+ __pyx_v_db = values[3];
+ __pyx_v_fh = values[4];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("dump_table", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("s3ql._deltadump.dump_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "s3ql/_deltadump.pyx":172
+ * '''
+ *
+ * return _dump_or_load(table, order, columns, db, fh) # <<<<<<<<<<<<<<
+ *
+ * def load_table(table, columns, db, fh):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___dump_or_load); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_table);
+ __Pyx_GIVEREF(__pyx_v_table);
+ __Pyx_INCREF(__pyx_v_order);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_order);
+ __Pyx_GIVEREF(__pyx_v_order);
+ __Pyx_INCREF(__pyx_v_columns);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_columns);
+ __Pyx_GIVEREF(__pyx_v_columns);
+ __Pyx_INCREF(__pyx_v_db);
+ PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_v_db);
+ __Pyx_GIVEREF(__pyx_v_db);
+ __Pyx_INCREF(__pyx_v_fh);
+ PyTuple_SET_ITEM(__pyx_t_2, 4, __pyx_v_fh);
+ __Pyx_GIVEREF(__pyx_v_fh);
+ __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("s3ql._deltadump.dump_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":174
+ * return _dump_or_load(table, order, columns, db, fh)
+ *
+ * def load_table(table, columns, db, fh): # <<<<<<<<<<<<<<
+ * '''Load *columns* of *table* from *fh*
+ *
+ */
+
+static PyObject *__pyx_pf_4s3ql_10_deltadump_1load_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_4s3ql_10_deltadump_1load_table[] = "load_table(table, columns, db, fh)\nLoad *columns* of *table* from *fh*\n\n Parameters are described in the docstring of the `dump_table` function.\n ";
+static PyMethodDef __pyx_mdef_4s3ql_10_deltadump_1load_table = {__Pyx_NAMESTR("load_table"), (PyCFunction)__pyx_pf_4s3ql_10_deltadump_1load_table, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4s3ql_10_deltadump_1load_table)};
+static PyObject *__pyx_pf_4s3ql_10_deltadump_1load_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_table = 0;
+ PyObject *__pyx_v_columns = 0;
+ PyObject *__pyx_v_db = 0;
+ PyObject *__pyx_v_fh = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__table,&__pyx_n_s__columns,&__pyx_n_s__db,&__pyx_n_s__fh,0};
+ __Pyx_RefNannySetupContext("load_table");
+ __pyx_self = __pyx_self;
+ {
+ PyObject* values[4] = {0,0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__table);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__columns);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("load_table", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__db);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("load_table", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 3:
+ values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fh);
+ if (likely(values[3])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("load_table", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "load_table") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ }
+ __pyx_v_table = values[0];
+ __pyx_v_columns = values[1];
+ __pyx_v_db = values[2];
+ __pyx_v_fh = values[3];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("load_table", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("s3ql._deltadump.load_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "s3ql/_deltadump.pyx":180
+ * '''
+ *
+ * return _dump_or_load(table, None, columns, db, fh) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___dump_or_load); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_table);
+ __Pyx_GIVEREF(__pyx_v_table);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(__pyx_v_columns);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_columns);
+ __Pyx_GIVEREF(__pyx_v_columns);
+ __Pyx_INCREF(__pyx_v_db);
+ PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_v_db);
+ __Pyx_GIVEREF(__pyx_v_db);
+ __Pyx_INCREF(__pyx_v_fh);
+ PyTuple_SET_ITEM(__pyx_t_2, 4, __pyx_v_fh);
+ __Pyx_GIVEREF(__pyx_v_fh);
+ __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("s3ql._deltadump.load_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":223
+ * if fp == NULL:
+ * raise OSError(errno, strerror(errno))
+ * cleanup.register(lambda: fclose(fp)) # <<<<<<<<<<<<<<
+ *
+ * # Allocate col_args and col_types
+ */
+
+static PyObject *__pyx_lambda_funcdef_lambda1(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyMethodDef __pyx_lambda_methdef_lambda1 = {__Pyx_NAMESTR("lambda1"), (PyCFunction)__pyx_lambda_funcdef_lambda1, METH_NOARGS, __Pyx_DOCSTR(0)};
+static PyObject *__pyx_lambda_funcdef_lambda1(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_cur_scope;
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_outer_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("lambda1");
+ __pyx_outer_scope = (struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *)__pyx_self;
+ __pyx_cur_scope = __pyx_outer_scope;
+ __pyx_self = __pyx_self;
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_fclose(__pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load.lambda1", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":228
+ * col_count = len(columns)
+ * col_types = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_types)) # <<<<<<<<<<<<<<
+ * col_args = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_args))
+ */
+
+static PyObject *__pyx_lambda_funcdef_lambda2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyMethodDef __pyx_lambda_methdef_lambda2 = {__Pyx_NAMESTR("lambda2"), (PyCFunction)__pyx_lambda_funcdef_lambda2, METH_NOARGS, __Pyx_DOCSTR(0)};
+static PyObject *__pyx_lambda_funcdef_lambda2(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_cur_scope;
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_outer_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("lambda2");
+ __pyx_outer_scope = (struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *)__pyx_self;
+ __pyx_cur_scope = __pyx_outer_scope;
+ __pyx_self = __pyx_self;
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_free(__pyx_cur_scope->__pyx_v_col_types); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load.lambda2", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":230
+ * cleanup.register(lambda: free(col_types))
+ * col_args = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_args)) # <<<<<<<<<<<<<<
+ *
+ * # Initialize col_args and col_types
+ */
+
+static PyObject *__pyx_lambda_funcdef_lambda3(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyMethodDef __pyx_lambda_methdef_lambda3 = {__Pyx_NAMESTR("lambda3"), (PyCFunction)__pyx_lambda_funcdef_lambda3, METH_NOARGS, __Pyx_DOCSTR(0)};
+static PyObject *__pyx_lambda_funcdef_lambda3(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_cur_scope;
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_outer_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("lambda3");
+ __pyx_outer_scope = (struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *)__pyx_self;
+ __pyx_cur_scope = __pyx_outer_scope;
+ __pyx_self = __pyx_self;
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_free(__pyx_cur_scope->__pyx_v_col_args); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load.lambda3", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":245
+ * # Allocate int64_prev
+ * int64_prev = <int64_t*> calloc(len(columns), sizeof(int64_t))
+ * cleanup.register(lambda: free(int64_prev)) # <<<<<<<<<<<<<<
+ *
+ * # Prepare statement
+ */
+
+static PyObject *__pyx_lambda_funcdef_lambda4(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyMethodDef __pyx_lambda_methdef_lambda4 = {__Pyx_NAMESTR("lambda4"), (PyCFunction)__pyx_lambda_funcdef_lambda4, METH_NOARGS, __Pyx_DOCSTR(0)};
+static PyObject *__pyx_lambda_funcdef_lambda4(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_cur_scope;
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_outer_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("lambda4");
+ __pyx_outer_scope = (struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *)__pyx_self;
+ __pyx_cur_scope = __pyx_outer_scope;
+ __pyx_self = __pyx_self;
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_free(__pyx_cur_scope->__pyx_v_int64_prev); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load.lambda4", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":260
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ * cleanup.register(lambda: sqlite3_finalize_p(stmt)) # <<<<<<<<<<<<<<
+ *
+ * # Dump or load data as requested
+ */
+
+static PyObject *__pyx_lambda_funcdef_lambda5(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyMethodDef __pyx_lambda_methdef_lambda5 = {__Pyx_NAMESTR("lambda5"), (PyCFunction)__pyx_lambda_funcdef_lambda5, METH_NOARGS, __Pyx_DOCSTR(0)};
+static PyObject *__pyx_lambda_funcdef_lambda5(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_cur_scope;
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_outer_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("lambda5");
+ __pyx_outer_scope = (struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *)__pyx_self;
+ __pyx_cur_scope = __pyx_outer_scope;
+ __pyx_self = __pyx_self;
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_sqlite3_finalize_p(__pyx_cur_scope->__pyx_v_stmt); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load.lambda5", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":265
+ * if order is None:
+ * buf = calloc(MAX_BLOB_SIZE, 1)
+ * cleanup.register(lambda: free(buf)) # <<<<<<<<<<<<<<
+ * read_integer(&row_count, fp)
+ * log.debug('_dump_or_load(%s): reading %d rows', table, row_count)
+ */
+
+static PyObject *__pyx_lambda_funcdef_lambda6(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyMethodDef __pyx_lambda_methdef_lambda6 = {__Pyx_NAMESTR("lambda6"), (PyCFunction)__pyx_lambda_funcdef_lambda6, METH_NOARGS, __Pyx_DOCSTR(0)};
+static PyObject *__pyx_lambda_funcdef_lambda6(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_cur_scope;
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_outer_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("lambda6");
+ __pyx_outer_scope = (struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *)__pyx_self;
+ __pyx_cur_scope = __pyx_outer_scope;
+ __pyx_self = __pyx_self;
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_free(__pyx_cur_scope->__pyx_v_buf); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load.lambda6", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":183
+ *
+ *
+ * def _dump_or_load(table, order, columns, db, fh): # <<<<<<<<<<<<<<
+ * '''Dump or load *columns* of *table*
+ *
+ */
+
+static PyObject *__pyx_pf_4s3ql_10_deltadump_2_dump_or_load(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_4s3ql_10_deltadump_2_dump_or_load[] = "_dump_or_load(table, order, columns, db, fh)\nDump or load *columns* of *table*\n \n If *order* is None, load data from *fh* into *db*.\n \n If *order* is not None, data will be read from *db* and written\n into *fh*. In this case, *order* specifies the order in which\n the rows are written and must be a string that can be inserted\n after the \"ORDER BY\" clause in an SQL SELECT statement.\n \n *db* is an `s3ql.Connection` instance for the database.\n \n *columns* must a list of 3-tuples, one for each column that should be stored\n or retrieved. The first element of the tuple must contain the column name\n and the second element the type of data stored in the column (`INTEGER`,\n `TIME` or `BLOB`). Times will be converted to nanosecond integers.\n \n For integers and times, the third tuple element specifies the expected\n change of the values between rows. For blobs it can be either zero\n (indicating variable length columns) or an integer specifying the length of\n the column values in bytes. \n ";
+static PyMethodDef __pyx_mdef_4s3ql_10_deltadump_2_dump_or_load = {__Pyx_NAMESTR("_dump_or_load"), (PyCFunction)__pyx_pf_4s3ql_10_deltadump_2_dump_or_load, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4s3ql_10_deltadump_2_dump_or_load)};
+static PyObject *__pyx_pf_4s3ql_10_deltadump_2_dump_or_load(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *__pyx_cur_scope;
+ PyObject *__pyx_v_table = 0;
+ PyObject *__pyx_v_order = 0;
+ PyObject *__pyx_v_columns = 0;
+ PyObject *__pyx_v_db = 0;
+ PyObject *__pyx_v_fh = 0;
+ sqlite3 *__pyx_v_sqlite3_db;
+ int __pyx_v_col_count;
+ int __pyx_v_rc;
+ int __pyx_v_fd;
+ int64_t __pyx_v_row_count;
+ PyObject *__pyx_v_cleanup = NULL;
+ PyObject *__pyx_v_i = NULL;
+ PyObject *__pyx_v_col_names = NULL;
+ PyObject *__pyx_v_query = NULL;
+ PyObject *__pyx_v_x = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ void *__pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_t_9;
+ int __pyx_t_10;
+ Py_ssize_t __pyx_t_11;
+ void *__pyx_t_12;
+ PyObject *(*__pyx_t_13)(PyObject *);
+ PyObject *__pyx_t_14 = NULL;
+ int __pyx_t_15;
+ int __pyx_t_16;
+ Py_ssize_t __pyx_t_17;
+ char *__pyx_t_18;
+ size_t __pyx_t_19;
+ int64_t __pyx_t_20;
+ PyObject *__pyx_t_21 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__table,&__pyx_n_s__order,&__pyx_n_s__columns,&__pyx_n_s__db,&__pyx_n_s__fh,0};
+ __Pyx_RefNannySetupContext("_dump_or_load");
+ __pyx_cur_scope = (struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load *)__pyx_ptype_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load->tp_new(__pyx_ptype_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load, __pyx_empty_tuple, NULL);
+ if (unlikely(!__pyx_cur_scope)) {
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ }
+ __Pyx_GOTREF(__pyx_cur_scope);
+ __pyx_self = __pyx_self;
+ {
+ PyObject* values[5] = {0,0,0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 0:
+ values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__table);
+ if (likely(values[0])) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__order);
+ if (likely(values[1])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_dump_or_load", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__columns);
+ if (likely(values[2])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_dump_or_load", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 3:
+ values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__db);
+ if (likely(values[3])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_dump_or_load", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 4:
+ values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fh);
+ if (likely(values[4])) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_dump_or_load", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "_dump_or_load") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ }
+ __pyx_v_table = values[0];
+ __pyx_v_order = values[1];
+ __pyx_v_columns = values[2];
+ __pyx_v_db = values[3];
+ __pyx_v_fh = values[4];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_dump_or_load", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_DECREF(((PyObject *)__pyx_cur_scope));
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+
+ /* "s3ql/_deltadump.pyx":214
+ * cdef int64_t row_count
+ *
+ * sqlite3_db = <sqlite3*> PyLong_AsVoidPtr(db.conn.sqlite3pointer()) # <<<<<<<<<<<<<<
+ *
+ * with CleanupManager(log) as cleanup:
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_db, __pyx_n_s__conn); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__sqlite3pointer); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_3 = PyLong_AsVoidPtr(__pyx_t_1); if (unlikely(__pyx_t_3 == NULL && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_sqlite3_db = ((sqlite3 *)__pyx_t_3);
+
+ /* "s3ql/_deltadump.pyx":216
+ * sqlite3_db = <sqlite3*> PyLong_AsVoidPtr(db.conn.sqlite3pointer())
+ *
+ * with CleanupManager(log) as cleanup: # <<<<<<<<<<<<<<
+ * fd = dup(fh.fileno())
+ * if fd == -1:
+ */
+ /*with:*/ {
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__CleanupManager); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__log); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s____exit__); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s____enter__); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ /*try:*/ {
+ {
+ __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
+ __Pyx_XGOTREF(__pyx_t_6);
+ __Pyx_XGOTREF(__pyx_t_7);
+ __Pyx_XGOTREF(__pyx_t_8);
+ /*try:*/ {
+ __pyx_v_cleanup = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":217
+ *
+ * with CleanupManager(log) as cleanup:
+ * fd = dup(fh.fileno()) # <<<<<<<<<<<<<<
+ * if fd == -1:
+ * raise OSError(errno, strerror(errno))
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_fh, __pyx_n_s__fileno); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_fd = dup(__pyx_t_9);
+
+ /* "s3ql/_deltadump.pyx":218
+ * with CleanupManager(log) as cleanup:
+ * fd = dup(fh.fileno())
+ * if fd == -1: # <<<<<<<<<<<<<<
+ * raise OSError(errno, strerror(errno))
+ * fp = fdopen(fd, 'r+b')
+ */
+ __pyx_t_10 = (__pyx_v_fd == -1);
+ if (__pyx_t_10) {
+
+ /* "s3ql/_deltadump.pyx":219
+ * fd = dup(fh.fileno())
+ * if fd == -1:
+ * raise OSError(errno, strerror(errno)) # <<<<<<<<<<<<<<
+ * fp = fdopen(fd, 'r+b')
+ * if fp == NULL:
+ */
+ __pyx_t_4 = PyInt_FromLong(errno); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyBytes_FromString(strerror(errno)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_4 = 0;
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_OSError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ goto __pyx_L18;
+ }
+ __pyx_L18:;
+
+ /* "s3ql/_deltadump.pyx":220
+ * if fd == -1:
+ * raise OSError(errno, strerror(errno))
+ * fp = fdopen(fd, 'r+b') # <<<<<<<<<<<<<<
+ * if fp == NULL:
+ * raise OSError(errno, strerror(errno))
+ */
+ __pyx_cur_scope->__pyx_v_fp = fdopen(__pyx_v_fd, __pyx_k_1);
+
+ /* "s3ql/_deltadump.pyx":221
+ * raise OSError(errno, strerror(errno))
+ * fp = fdopen(fd, 'r+b')
+ * if fp == NULL: # <<<<<<<<<<<<<<
+ * raise OSError(errno, strerror(errno))
+ * cleanup.register(lambda: fclose(fp))
+ */
+ __pyx_t_10 = (__pyx_cur_scope->__pyx_v_fp == NULL);
+ if (__pyx_t_10) {
+
+ /* "s3ql/_deltadump.pyx":222
+ * fp = fdopen(fd, 'r+b')
+ * if fp == NULL:
+ * raise OSError(errno, strerror(errno)) # <<<<<<<<<<<<<<
+ * cleanup.register(lambda: fclose(fp))
+ *
+ */
+ __pyx_t_2 = PyInt_FromLong(errno); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyBytes_FromString(strerror(errno)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_2 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_OSError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ goto __pyx_L19;
+ }
+ __pyx_L19:;
+
+ /* "s3ql/_deltadump.pyx":223
+ * if fp == NULL:
+ * raise OSError(errno, strerror(errno))
+ * cleanup.register(lambda: fclose(fp)) # <<<<<<<<<<<<<<
+ *
+ * # Allocate col_args and col_types
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_cleanup, __pyx_n_s__register); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_lambda_methdef_lambda1, ((PyObject*)__pyx_cur_scope), __pyx_n_s_2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+ /* "s3ql/_deltadump.pyx":226
+ *
+ * # Allocate col_args and col_types
+ * col_count = len(columns) # <<<<<<<<<<<<<<
+ * col_types = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_types))
+ */
+ __pyx_t_11 = PyObject_Length(__pyx_v_columns); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __pyx_v_col_count = __pyx_t_11;
+
+ /* "s3ql/_deltadump.pyx":227
+ * # Allocate col_args and col_types
+ * col_count = len(columns)
+ * col_types = <int*> calloc(col_count, sizeof(int)) # <<<<<<<<<<<<<<
+ * cleanup.register(lambda: free(col_types))
+ * col_args = <int*> calloc(col_count, sizeof(int))
+ */
+ __pyx_t_12 = __pyx_f_4s3ql_10_deltadump_calloc(__pyx_v_col_count, (sizeof(int))); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __pyx_cur_scope->__pyx_v_col_types = ((int *)__pyx_t_12);
+
+ /* "s3ql/_deltadump.pyx":228
+ * col_count = len(columns)
+ * col_types = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_types)) # <<<<<<<<<<<<<<
+ * col_args = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_args))
+ */
+ __pyx_t_4 = PyObject_GetAttr(__pyx_v_cleanup, __pyx_n_s__register); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_lambda_methdef_lambda2, ((PyObject*)__pyx_cur_scope), __pyx_n_s_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":229
+ * col_types = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_types))
+ * col_args = <int*> calloc(col_count, sizeof(int)) # <<<<<<<<<<<<<<
+ * cleanup.register(lambda: free(col_args))
+ *
+ */
+ __pyx_t_12 = __pyx_f_4s3ql_10_deltadump_calloc(__pyx_v_col_count, (sizeof(int))); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __pyx_cur_scope->__pyx_v_col_args = ((int *)__pyx_t_12);
+
+ /* "s3ql/_deltadump.pyx":230
+ * cleanup.register(lambda: free(col_types))
+ * col_args = <int*> calloc(col_count, sizeof(int))
+ * cleanup.register(lambda: free(col_args)) # <<<<<<<<<<<<<<
+ *
+ * # Initialize col_args and col_types
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_cleanup, __pyx_n_s__register); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_lambda_methdef_lambda3, ((PyObject*)__pyx_cur_scope), __pyx_n_s_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "s3ql/_deltadump.pyx":233
+ *
+ * # Initialize col_args and col_types
+ * for i in range(col_count): # <<<<<<<<<<<<<<
+ * if columns[i][1] not in (BLOB, INTEGER, TIME):
+ * raise ValueError("Invalid type for column %d" % i)
+ */
+ __pyx_t_1 = PyInt_FromLong(__pyx_v_col_count); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_range, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ if (PyList_CheckExact(__pyx_t_1) || PyTuple_CheckExact(__pyx_t_1)) {
+ __pyx_t_4 = __pyx_t_1; __Pyx_INCREF(__pyx_t_4); __pyx_t_11 = 0;
+ __pyx_t_13 = NULL;
+ } else {
+ __pyx_t_11 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_13 = Py_TYPE(__pyx_t_4)->tp_iternext;
+ }
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ for (;;) {
+ if (PyList_CheckExact(__pyx_t_4)) {
+ if (__pyx_t_11 >= PyList_GET_SIZE(__pyx_t_4)) break;
+ __pyx_t_1 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_11); __Pyx_INCREF(__pyx_t_1); __pyx_t_11++;
+ } else if (PyTuple_CheckExact(__pyx_t_4)) {
+ if (__pyx_t_11 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
+ __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_11); __Pyx_INCREF(__pyx_t_1); __pyx_t_11++;
+ } else {
+ __pyx_t_1 = __pyx_t_13(__pyx_t_4);
+ if (unlikely(!__pyx_t_1)) {
+ if (PyErr_Occurred()) {
+ if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear();
+ else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ }
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_1);
+ }
+ __Pyx_XDECREF(__pyx_v_i);
+ __pyx_v_i = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "s3ql/_deltadump.pyx":234
+ * # Initialize col_args and col_types
+ * for i in range(col_count):
+ * if columns[i][1] not in (BLOB, INTEGER, TIME): # <<<<<<<<<<<<<<
+ * raise ValueError("Invalid type for column %d" % i)
+ * col_types[i] = columns[i][1]
+ */
+ __pyx_t_1 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__BLOB); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_14 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ if (((int)__pyx_t_10)) {
+ __pyx_t_14 = __Pyx_GetName(__pyx_m, __pyx_n_s__INTEGER); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_t_14, Py_NE); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_15 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_15 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_16 = ((int)__pyx_t_15);
+ } else {
+ __pyx_t_16 = ((int)__pyx_t_10);
+ }
+ if (__pyx_t_16) {
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__TIME); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_14 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_15 = ((int)__pyx_t_10);
+ } else {
+ __pyx_t_15 = __pyx_t_16;
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_16 = __pyx_t_15;
+ if (__pyx_t_16) {
+
+ /* "s3ql/_deltadump.pyx":235
+ * for i in range(col_count):
+ * if columns[i][1] not in (BLOB, INTEGER, TIME):
+ * raise ValueError("Invalid type for column %d" % i) # <<<<<<<<<<<<<<
+ * col_types[i] = columns[i][1]
+ *
+ */
+ __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_3), __pyx_v_i); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_14));
+ PyTuple_SET_ITEM(__pyx_t_14, 0, ((PyObject *)__pyx_t_2));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_14), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_14)); __pyx_t_14 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ goto __pyx_L22;
+ }
+ __pyx_L22:;
+
+ /* "s3ql/_deltadump.pyx":236
+ * if columns[i][1] not in (BLOB, INTEGER, TIME):
+ * raise ValueError("Invalid type for column %d" % i)
+ * col_types[i] = columns[i][1] # <<<<<<<<<<<<<<
+ *
+ * if len(columns[i]) == 3:
+ */
+ __pyx_t_2 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_14 = __Pyx_GetItemInt(__pyx_t_2, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_14); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_17 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_17 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ (__pyx_cur_scope->__pyx_v_col_types[__pyx_t_17]) = __pyx_t_9;
+
+ /* "s3ql/_deltadump.pyx":238
+ * col_types[i] = columns[i][1]
+ *
+ * if len(columns[i]) == 3: # <<<<<<<<<<<<<<
+ * col_args[i] = columns[i][2]
+ * else:
+ */
+ __pyx_t_14 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_17 = PyObject_Length(__pyx_t_14); if (unlikely(__pyx_t_17 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_16 = (__pyx_t_17 == 3);
+ if (__pyx_t_16) {
+
+ /* "s3ql/_deltadump.pyx":239
+ *
+ * if len(columns[i]) == 3:
+ * col_args[i] = columns[i][2] # <<<<<<<<<<<<<<
+ * else:
+ * col_args[i] = 0
+ */
+ __pyx_t_14 = PyObject_GetItem(__pyx_v_columns, __pyx_v_i); if (!__pyx_t_14) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_14, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_17 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_17 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ (__pyx_cur_scope->__pyx_v_col_args[__pyx_t_17]) = __pyx_t_9;
+ goto __pyx_L23;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":241
+ * col_args[i] = columns[i][2]
+ * else:
+ * col_args[i] = 0 # <<<<<<<<<<<<<<
+ *
+ * # Allocate int64_prev
+ */
+ __pyx_t_17 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_17 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ (__pyx_cur_scope->__pyx_v_col_args[__pyx_t_17]) = 0;
+ }
+ __pyx_L23:;
+ }
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+
+ /* "s3ql/_deltadump.pyx":244
+ *
+ * # Allocate int64_prev
+ * int64_prev = <int64_t*> calloc(len(columns), sizeof(int64_t)) # <<<<<<<<<<<<<<
+ * cleanup.register(lambda: free(int64_prev))
+ *
+ */
+ __pyx_t_11 = PyObject_Length(__pyx_v_columns); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __pyx_t_12 = __pyx_f_4s3ql_10_deltadump_calloc(__pyx_t_11, (sizeof(int64_t))); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __pyx_cur_scope->__pyx_v_int64_prev = ((int64_t *)__pyx_t_12);
+
+ /* "s3ql/_deltadump.pyx":245
+ * # Allocate int64_prev
+ * int64_prev = <int64_t*> calloc(len(columns), sizeof(int64_t))
+ * cleanup.register(lambda: free(int64_prev)) # <<<<<<<<<<<<<<
+ *
+ * # Prepare statement
+ */
+ __pyx_t_4 = PyObject_GetAttr(__pyx_v_cleanup, __pyx_n_s__register); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_lambda_methdef_lambda4, ((PyObject*)__pyx_cur_scope), __pyx_n_s_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_14));
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_14), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_14)); __pyx_t_14 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":248
+ *
+ * # Prepare statement
+ * col_names = [ x[0] for x in columns ] # <<<<<<<<<<<<<<
+ * if order is None:
+ * query = ("INSERT INTO %s (%s) VALUES(%s)"
+ */
+ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ if (PyList_CheckExact(__pyx_v_columns) || PyTuple_CheckExact(__pyx_v_columns)) {
+ __pyx_t_14 = __pyx_v_columns; __Pyx_INCREF(__pyx_t_14); __pyx_t_11 = 0;
+ __pyx_t_13 = NULL;
+ } else {
+ __pyx_t_11 = -1; __pyx_t_14 = PyObject_GetIter(__pyx_v_columns); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_13 = Py_TYPE(__pyx_t_14)->tp_iternext;
+ }
+ for (;;) {
+ if (PyList_CheckExact(__pyx_t_14)) {
+ if (__pyx_t_11 >= PyList_GET_SIZE(__pyx_t_14)) break;
+ __pyx_t_4 = PyList_GET_ITEM(__pyx_t_14, __pyx_t_11); __Pyx_INCREF(__pyx_t_4); __pyx_t_11++;
+ } else if (PyTuple_CheckExact(__pyx_t_14)) {
+ if (__pyx_t_11 >= PyTuple_GET_SIZE(__pyx_t_14)) break;
+ __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_14, __pyx_t_11); __Pyx_INCREF(__pyx_t_4); __pyx_t_11++;
+ } else {
+ __pyx_t_4 = __pyx_t_13(__pyx_t_14);
+ if (unlikely(!__pyx_t_4)) {
+ if (PyErr_Occurred()) {
+ if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear();
+ else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ }
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_4);
+ }
+ __Pyx_XDECREF(__pyx_v_x);
+ __pyx_v_x = __pyx_t_4;
+ __pyx_t_4 = 0;
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_x, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (unlikely(PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_INCREF(((PyObject *)__pyx_t_2));
+ __pyx_v_col_names = __pyx_t_2;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":249
+ * # Prepare statement
+ * col_names = [ x[0] for x in columns ]
+ * if order is None: # <<<<<<<<<<<<<<
+ * query = ("INSERT INTO %s (%s) VALUES(%s)"
+ * % (table,
+ */
+ __pyx_t_16 = (__pyx_v_order == Py_None);
+ if (__pyx_t_16) {
+
+ /* "s3ql/_deltadump.pyx":252
+ * query = ("INSERT INTO %s (%s) VALUES(%s)"
+ * % (table,
+ * ', '.join(col_names), # <<<<<<<<<<<<<<
+ * ', '.join('?' * col_count)))
+ * else:
+ */
+ __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_5), __pyx_n_s__join); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 252; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 252; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_14));
+ __Pyx_INCREF(((PyObject *)__pyx_v_col_names));
+ PyTuple_SET_ITEM(__pyx_t_14, 0, ((PyObject *)__pyx_v_col_names));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_col_names));
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_14), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 252; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_14)); __pyx_t_14 = 0;
+
+ /* "s3ql/_deltadump.pyx":253
+ * % (table,
+ * ', '.join(col_names),
+ * ', '.join('?' * col_count))) # <<<<<<<<<<<<<<
+ * else:
+ * query = ("SELECT %s FROM %s ORDER BY %s " %
+ */
+ __pyx_t_14 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_5), __pyx_n_s__join); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_14);
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_col_count); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyNumber_Multiply(((PyObject *)__pyx_kp_s_6), __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_14, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_table);
+ __Pyx_GIVEREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_4 = 0;
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_4), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_v_query = ((PyObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+ goto __pyx_L26;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":256
+ * else:
+ * query = ("SELECT %s FROM %s ORDER BY %s " %
+ * (', '.join(col_names), table, order)) # <<<<<<<<<<<<<<
+ * rc = sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL)
+ * if rc != SQLITE_OK:
+ */
+ __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_5), __pyx_n_s__join); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(((PyObject *)__pyx_v_col_names));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_col_names));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_col_names));
+ __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_table);
+ __Pyx_GIVEREF(__pyx_v_table);
+ __Pyx_INCREF(__pyx_v_order);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_order);
+ __Pyx_GIVEREF(__pyx_v_order);
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_7), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_v_query = ((PyObject *)__pyx_t_4);
+ __pyx_t_4 = 0;
+ }
+ __pyx_L26:;
+
+ /* "s3ql/_deltadump.pyx":257
+ * query = ("SELECT %s FROM %s ORDER BY %s " %
+ * (', '.join(col_names), table, order))
+ * rc = sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL) # <<<<<<<<<<<<<<
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ */
+ __pyx_t_18 = PyBytes_AsString(__pyx_v_query); if (unlikely((!__pyx_t_18) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __pyx_v_rc = sqlite3_prepare_v2(__pyx_v_sqlite3_db, __pyx_t_18, -1, (&__pyx_cur_scope->__pyx_v_stmt), NULL);
+
+ /* "s3ql/_deltadump.pyx":258
+ * (', '.join(col_names), table, order))
+ * rc = sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL)
+ * if rc != SQLITE_OK: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ * cleanup.register(lambda: sqlite3_finalize_p(stmt))
+ */
+ __pyx_t_16 = (__pyx_v_rc != SQLITE_OK);
+ if (__pyx_t_16) {
+
+ /* "s3ql/_deltadump.pyx":259
+ * rc = sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL)
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ * cleanup.register(lambda: sqlite3_finalize_p(stmt))
+ *
+ */
+ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 259; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ goto __pyx_L27;
+ }
+ __pyx_L27:;
+
+ /* "s3ql/_deltadump.pyx":260
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ * cleanup.register(lambda: sqlite3_finalize_p(stmt)) # <<<<<<<<<<<<<<
+ *
+ * # Dump or load data as requested
+ */
+ __pyx_t_4 = PyObject_GetAttr(__pyx_v_cleanup, __pyx_n_s__register); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_lambda_methdef_lambda5, ((PyObject*)__pyx_cur_scope), __pyx_n_s_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "s3ql/_deltadump.pyx":263
+ *
+ * # Dump or load data as requested
+ * if order is None: # <<<<<<<<<<<<<<
+ * buf = calloc(MAX_BLOB_SIZE, 1)
+ * cleanup.register(lambda: free(buf))
+ */
+ __pyx_t_16 = (__pyx_v_order == Py_None);
+ if (__pyx_t_16) {
+
+ /* "s3ql/_deltadump.pyx":264
+ * # Dump or load data as requested
+ * if order is None:
+ * buf = calloc(MAX_BLOB_SIZE, 1) # <<<<<<<<<<<<<<
+ * cleanup.register(lambda: free(buf))
+ * read_integer(&row_count, fp)
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__MAX_BLOB_SIZE); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_19 = __Pyx_PyInt_AsSize_t(__pyx_t_1); if (unlikely((__pyx_t_19 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_12 = __pyx_f_4s3ql_10_deltadump_calloc(__pyx_t_19, 1); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __pyx_cur_scope->__pyx_v_buf = __pyx_t_12;
+
+ /* "s3ql/_deltadump.pyx":265
+ * if order is None:
+ * buf = calloc(MAX_BLOB_SIZE, 1)
+ * cleanup.register(lambda: free(buf)) # <<<<<<<<<<<<<<
+ * read_integer(&row_count, fp)
+ * log.debug('_dump_or_load(%s): reading %d rows', table, row_count)
+ */
+ __pyx_t_1 = PyObject_GetAttr(__pyx_v_cleanup, __pyx_n_s__register); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __pyx_binding_PyCFunctionType_NewEx(&__pyx_lambda_methdef_lambda6, ((PyObject*)__pyx_cur_scope), __pyx_n_s_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":266
+ * buf = calloc(MAX_BLOB_SIZE, 1)
+ * cleanup.register(lambda: free(buf))
+ * read_integer(&row_count, fp) # <<<<<<<<<<<<<<
+ * log.debug('_dump_or_load(%s): reading %d rows', table, row_count)
+ * _load_table(col_types, col_args, int64_prev, col_count,
+ */
+ __pyx_t_9 = __pyx_f_4s3ql_10_deltadump_read_integer((&__pyx_v_row_count), __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+
+ /* "s3ql/_deltadump.pyx":267
+ * cleanup.register(lambda: free(buf))
+ * read_integer(&row_count, fp)
+ * log.debug('_dump_or_load(%s): reading %d rows', table, row_count) # <<<<<<<<<<<<<<
+ * _load_table(col_types, col_args, int64_prev, col_count,
+ * row_count, stmt, fp, buf)
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__log); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__debug); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_PyInt_to_py_int64_t(__pyx_v_row_count); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_8));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_kp_s_8));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_8));
+ __Pyx_INCREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_table);
+ __Pyx_GIVEREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":269
+ * log.debug('_dump_or_load(%s): reading %d rows', table, row_count)
+ * _load_table(col_types, col_args, int64_prev, col_count,
+ * row_count, stmt, fp, buf) # <<<<<<<<<<<<<<
+ * else:
+ * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table)
+ */
+ __pyx_t_2 = __pyx_f_4s3ql_10_deltadump__load_table(__pyx_cur_scope->__pyx_v_col_types, __pyx_cur_scope->__pyx_v_col_args, __pyx_cur_scope->__pyx_v_int64_prev, __pyx_v_col_count, __pyx_v_row_count, __pyx_cur_scope->__pyx_v_stmt, __pyx_cur_scope->__pyx_v_fp, __pyx_cur_scope->__pyx_v_buf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ goto __pyx_L28;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":271
+ * row_count, stmt, fp, buf)
+ * else:
+ * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table) # <<<<<<<<<<<<<<
+ * log.debug('_dump_or_load(%s): writing %d rows', table, row_count)
+ * write_integer(row_count, fp)
+ */
+ __pyx_t_2 = PyObject_GetAttr(__pyx_v_db, __pyx_n_s__get_val); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_9), __pyx_v_table); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_1));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __pyx_t_20 = __Pyx_PyInt_from_py_int64_t(__pyx_t_1); if (unlikely((__pyx_t_20 == (int64_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_row_count = __pyx_t_20;
+
+ /* "s3ql/_deltadump.pyx":272
+ * else:
+ * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table)
+ * log.debug('_dump_or_load(%s): writing %d rows', table, row_count) # <<<<<<<<<<<<<<
+ * write_integer(row_count, fp)
+ * _dump_table(col_types, col_args, int64_prev, col_count, stmt, fp)
+ */
+ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__log); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__debug); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyInt_to_py_int64_t(__pyx_v_row_count); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_10));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_10));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_10));
+ __Pyx_INCREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_table);
+ __Pyx_GIVEREF(__pyx_v_table);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "s3ql/_deltadump.pyx":273
+ * row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table)
+ * log.debug('_dump_or_load(%s): writing %d rows', table, row_count)
+ * write_integer(row_count, fp) # <<<<<<<<<<<<<<
+ * _dump_table(col_types, col_args, int64_prev, col_count, stmt, fp)
+ *
+ */
+ __pyx_t_9 = __pyx_f_4s3ql_10_deltadump_write_integer(__pyx_v_row_count, __pyx_cur_scope->__pyx_v_fp); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+
+ /* "s3ql/_deltadump.pyx":274
+ * log.debug('_dump_or_load(%s): writing %d rows', table, row_count)
+ * write_integer(row_count, fp)
+ * _dump_table(col_types, col_args, int64_prev, col_count, stmt, fp) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump__dump_table(__pyx_cur_scope->__pyx_v_col_types, __pyx_cur_scope->__pyx_v_col_args, __pyx_cur_scope->__pyx_v_int64_prev, __pyx_v_col_count, __pyx_cur_scope->__pyx_v_stmt, __pyx_cur_scope->__pyx_v_fp); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L10_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ }
+ __pyx_L28:;
+ }
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ goto __pyx_L17_try_end;
+ __pyx_L10_error:;
+ __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "s3ql/_deltadump.pyx":216
+ * sqlite3_db = <sqlite3*> PyLong_AsVoidPtr(db.conn.sqlite3pointer())
+ *
+ * with CleanupManager(log) as cleanup: # <<<<<<<<<<<<<<
+ * fd = dup(fh.fileno())
+ * if fd == -1:
+ */
+ /*except:*/ {
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_2, &__pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L12_except_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_14 = PyTuple_New(3); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L12_except_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_14));
+ __Pyx_INCREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_14, 1, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_14, 2, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_21 = PyObject_Call(__pyx_t_5, __pyx_t_14, NULL);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L12_except_error;}
+ __Pyx_GOTREF(__pyx_t_21);
+ __pyx_t_16 = __Pyx_PyObject_IsTrue(__pyx_t_21);
+ __Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0;
+ if (unlikely(__pyx_t_16 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L12_except_error;}
+ __pyx_t_15 = (!__pyx_t_16);
+ if (__pyx_t_15) {
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __Pyx_ErrRestore(__pyx_t_1, __pyx_t_2, __pyx_t_4);
+ __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L12_except_error;}
+ goto __pyx_L31;
+ }
+ __pyx_L31:;
+ __Pyx_DECREF(((PyObject *)__pyx_t_14)); __pyx_t_14 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ goto __pyx_L11_exception_handled;
+ }
+ __pyx_L12_except_error:;
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
+ goto __pyx_L1_error;
+ __pyx_L11_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
+ __pyx_L17_try_end:;
+ }
+ }
+ /*finally:*/ {
+ if (__pyx_t_5) {
+ __pyx_t_8 = PyObject_Call(__pyx_t_5, __pyx_k_tuple_11, NULL);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_15 = __Pyx_PyObject_IsTrue(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ if (unlikely(__pyx_t_15 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ }
+ goto __pyx_L32;
+ __pyx_L6_error:;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ goto __pyx_L1_error;
+ __pyx_L32:;
+ }
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_14);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_or_load", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_cleanup);
+ __Pyx_XDECREF(__pyx_v_i);
+ __Pyx_XDECREF(__pyx_v_col_names);
+ __Pyx_XDECREF(__pyx_v_query);
+ __Pyx_XDECREF(__pyx_v_x);
+ __Pyx_DECREF(((PyObject *)__pyx_cur_scope));
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":277
+ *
+ *
+ * cdef _dump_table(int* col_types, int* col_args, int64_t* int64_prev, # <<<<<<<<<<<<<<
+ * int col_count, sqlite3_stmt* stmt, FILE* fp):
+ *
+ */
+
+static PyObject *__pyx_f_4s3ql_10_deltadump__dump_table(int *__pyx_v_col_types, int *__pyx_v_col_args, int64_t *__pyx_v_int64_prev, int __pyx_v_col_count, sqlite3_stmt *__pyx_v_stmt, FILE *__pyx_v_fp) {
+ const void *__pyx_v_buf;
+ int __pyx_v_rc;
+ int __pyx_v_i;
+ int __pyx_v_len_;
+ int64_t __pyx_v_int64;
+ int64_t __pyx_v_tmp;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_dump_table");
+
+ /* "s3ql/_deltadump.pyx":285
+ *
+ * # Iterate through rows
+ * while True: # <<<<<<<<<<<<<<
+ * rc = sqlite3_step(stmt)
+ * if rc == SQLITE_DONE:
+ */
+ while (1) {
+ if (!1) break;
+
+ /* "s3ql/_deltadump.pyx":286
+ * # Iterate through rows
+ * while True:
+ * rc = sqlite3_step(stmt) # <<<<<<<<<<<<<<
+ * if rc == SQLITE_DONE:
+ * break
+ */
+ __pyx_v_rc = sqlite3_step(__pyx_v_stmt);
+
+ /* "s3ql/_deltadump.pyx":287
+ * while True:
+ * rc = sqlite3_step(stmt)
+ * if rc == SQLITE_DONE: # <<<<<<<<<<<<<<
+ * break
+ * elif rc != SQLITE_ROW:
+ */
+ __pyx_t_1 = (__pyx_v_rc == SQLITE_DONE);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":288
+ * rc = sqlite3_step(stmt)
+ * if rc == SQLITE_DONE:
+ * break # <<<<<<<<<<<<<<
+ * elif rc != SQLITE_ROW:
+ * raise apsw.exceptionfor(rc)
+ */
+ goto __pyx_L4_break;
+ goto __pyx_L5;
+ }
+
+ /* "s3ql/_deltadump.pyx":289
+ * if rc == SQLITE_DONE:
+ * break
+ * elif rc != SQLITE_ROW: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ *
+ */
+ __pyx_t_1 = (__pyx_v_rc != SQLITE_ROW);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":290
+ * break
+ * elif rc != SQLITE_ROW:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ *
+ * for i in range(col_count):
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "s3ql/_deltadump.pyx":292
+ * raise apsw.exceptionfor(rc)
+ *
+ * for i in range(col_count): # <<<<<<<<<<<<<<
+ * if sqlite3_column_type(stmt, i) is SQLITE_NULL:
+ * raise ValueError("Can't dump NULL values")
+ */
+ __pyx_t_5 = __pyx_v_col_count;
+ for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
+ __pyx_v_i = __pyx_t_6;
+
+ /* "s3ql/_deltadump.pyx":293
+ *
+ * for i in range(col_count):
+ * if sqlite3_column_type(stmt, i) is SQLITE_NULL: # <<<<<<<<<<<<<<
+ * raise ValueError("Can't dump NULL values")
+ *
+ */
+ __pyx_t_1 = (sqlite3_column_type(__pyx_v_stmt, __pyx_v_i) == SQLITE_NULL);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":294
+ * for i in range(col_count):
+ * if sqlite3_column_type(stmt, i) is SQLITE_NULL:
+ * raise ValueError("Can't dump NULL values") # <<<<<<<<<<<<<<
+ *
+ * if col_types[i] == _INTEGER:
+ */
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_13), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+
+ /* "s3ql/_deltadump.pyx":296
+ * raise ValueError("Can't dump NULL values")
+ *
+ * if col_types[i] == _INTEGER: # <<<<<<<<<<<<<<
+ * int64 = sqlite3_column_int64(stmt, i)
+ * tmp = int64
+ */
+ __pyx_t_1 = ((__pyx_v_col_types[__pyx_v_i]) == __pyx_v_4s3ql_10_deltadump__INTEGER);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":297
+ *
+ * if col_types[i] == _INTEGER:
+ * int64 = sqlite3_column_int64(stmt, i) # <<<<<<<<<<<<<<
+ * tmp = int64
+ * int64 -= int64_prev[i] + col_args[i]
+ */
+ __pyx_v_int64 = sqlite3_column_int64(__pyx_v_stmt, __pyx_v_i);
+
+ /* "s3ql/_deltadump.pyx":298
+ * if col_types[i] == _INTEGER:
+ * int64 = sqlite3_column_int64(stmt, i)
+ * tmp = int64 # <<<<<<<<<<<<<<
+ * int64 -= int64_prev[i] + col_args[i]
+ * int64_prev[i] = tmp
+ */
+ __pyx_v_tmp = __pyx_v_int64;
+
+ /* "s3ql/_deltadump.pyx":299
+ * int64 = sqlite3_column_int64(stmt, i)
+ * tmp = int64
+ * int64 -= int64_prev[i] + col_args[i] # <<<<<<<<<<<<<<
+ * int64_prev[i] = tmp
+ * write_integer(int64, fp)
+ */
+ __pyx_v_int64 = (__pyx_v_int64 - ((__pyx_v_int64_prev[__pyx_v_i]) + (__pyx_v_col_args[__pyx_v_i])));
+
+ /* "s3ql/_deltadump.pyx":300
+ * tmp = int64
+ * int64 -= int64_prev[i] + col_args[i]
+ * int64_prev[i] = tmp # <<<<<<<<<<<<<<
+ * write_integer(int64, fp)
+ *
+ */
+ (__pyx_v_int64_prev[__pyx_v_i]) = __pyx_v_tmp;
+
+ /* "s3ql/_deltadump.pyx":301
+ * int64 -= int64_prev[i] + col_args[i]
+ * int64_prev[i] = tmp
+ * write_integer(int64, fp) # <<<<<<<<<<<<<<
+ *
+ * elif col_types[i] == _TIME:
+ */
+ __pyx_t_7 = __pyx_f_4s3ql_10_deltadump_write_integer(__pyx_v_int64, __pyx_v_fp); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 301; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L9;
+ }
+
+ /* "s3ql/_deltadump.pyx":303
+ * write_integer(int64, fp)
+ *
+ * elif col_types[i] == _TIME: # <<<<<<<<<<<<<<
+ * int64 = <int64_t> (sqlite3_column_double(stmt, i) * time_scale)
+ * tmp = int64
+ */
+ __pyx_t_1 = ((__pyx_v_col_types[__pyx_v_i]) == __pyx_v_4s3ql_10_deltadump__TIME);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":304
+ *
+ * elif col_types[i] == _TIME:
+ * int64 = <int64_t> (sqlite3_column_double(stmt, i) * time_scale) # <<<<<<<<<<<<<<
+ * tmp = int64
+ * int64 -= int64_prev[i] + col_args[i]
+ */
+ __pyx_v_int64 = ((int64_t)(sqlite3_column_double(__pyx_v_stmt, __pyx_v_i) * __pyx_v_4s3ql_10_deltadump_time_scale));
+
+ /* "s3ql/_deltadump.pyx":305
+ * elif col_types[i] == _TIME:
+ * int64 = <int64_t> (sqlite3_column_double(stmt, i) * time_scale)
+ * tmp = int64 # <<<<<<<<<<<<<<
+ * int64 -= int64_prev[i] + col_args[i]
+ * int64_prev[i] = tmp
+ */
+ __pyx_v_tmp = __pyx_v_int64;
+
+ /* "s3ql/_deltadump.pyx":306
+ * int64 = <int64_t> (sqlite3_column_double(stmt, i) * time_scale)
+ * tmp = int64
+ * int64 -= int64_prev[i] + col_args[i] # <<<<<<<<<<<<<<
+ * int64_prev[i] = tmp
+ * write_integer(int64, fp)
+ */
+ __pyx_v_int64 = (__pyx_v_int64 - ((__pyx_v_int64_prev[__pyx_v_i]) + (__pyx_v_col_args[__pyx_v_i])));
+
+ /* "s3ql/_deltadump.pyx":307
+ * tmp = int64
+ * int64 -= int64_prev[i] + col_args[i]
+ * int64_prev[i] = tmp # <<<<<<<<<<<<<<
+ * write_integer(int64, fp)
+ *
+ */
+ (__pyx_v_int64_prev[__pyx_v_i]) = __pyx_v_tmp;
+
+ /* "s3ql/_deltadump.pyx":308
+ * int64 -= int64_prev[i] + col_args[i]
+ * int64_prev[i] = tmp
+ * write_integer(int64, fp) # <<<<<<<<<<<<<<
+ *
+ * elif col_types[i] == _BLOB:
+ */
+ __pyx_t_7 = __pyx_f_4s3ql_10_deltadump_write_integer(__pyx_v_int64, __pyx_v_fp); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L9;
+ }
+
+ /* "s3ql/_deltadump.pyx":310
+ * write_integer(int64, fp)
+ *
+ * elif col_types[i] == _BLOB: # <<<<<<<<<<<<<<
+ * buf = sqlite3_column_blob(stmt, i)
+ * len_ = sqlite3_column_bytes(stmt, i)
+ */
+ __pyx_t_1 = ((__pyx_v_col_types[__pyx_v_i]) == __pyx_v_4s3ql_10_deltadump__BLOB);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":311
+ *
+ * elif col_types[i] == _BLOB:
+ * buf = sqlite3_column_blob(stmt, i) # <<<<<<<<<<<<<<
+ * len_ = sqlite3_column_bytes(stmt, i)
+ * if len_ > MAX_BLOB_SIZE:
+ */
+ __pyx_v_buf = sqlite3_column_blob(__pyx_v_stmt, __pyx_v_i);
+
+ /* "s3ql/_deltadump.pyx":312
+ * elif col_types[i] == _BLOB:
+ * buf = sqlite3_column_blob(stmt, i)
+ * len_ = sqlite3_column_bytes(stmt, i) # <<<<<<<<<<<<<<
+ * if len_ > MAX_BLOB_SIZE:
+ * raise ValueError('Can not dump BLOB of size %d (max: %d)',
+ */
+ __pyx_v_len_ = sqlite3_column_bytes(__pyx_v_stmt, __pyx_v_i);
+
+ /* "s3ql/_deltadump.pyx":313
+ * buf = sqlite3_column_blob(stmt, i)
+ * len_ = sqlite3_column_bytes(stmt, i)
+ * if len_ > MAX_BLOB_SIZE: # <<<<<<<<<<<<<<
+ * raise ValueError('Can not dump BLOB of size %d (max: %d)',
+ * len_, MAX_BLOB_SIZE)
+ */
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_len_); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__MAX_BLOB_SIZE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_4, Py_GT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":315
+ * if len_ > MAX_BLOB_SIZE:
+ * raise ValueError('Can not dump BLOB of size %d (max: %d)',
+ * len_, MAX_BLOB_SIZE) # <<<<<<<<<<<<<<
+ * if col_args[i] == 0:
+ * write_integer(len_ - int64_prev[i], fp)
+ */
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_len_); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__MAX_BLOB_SIZE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_14));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_kp_s_14));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_14));
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_3 = 0;
+ __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+
+ /* "s3ql/_deltadump.pyx":316
+ * raise ValueError('Can not dump BLOB of size %d (max: %d)',
+ * len_, MAX_BLOB_SIZE)
+ * if col_args[i] == 0: # <<<<<<<<<<<<<<
+ * write_integer(len_ - int64_prev[i], fp)
+ * int64_prev[i] = len_
+ */
+ __pyx_t_1 = ((__pyx_v_col_args[__pyx_v_i]) == 0);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":317
+ * len_, MAX_BLOB_SIZE)
+ * if col_args[i] == 0:
+ * write_integer(len_ - int64_prev[i], fp) # <<<<<<<<<<<<<<
+ * int64_prev[i] = len_
+ * elif len_ != col_args[i]:
+ */
+ __pyx_t_7 = __pyx_f_4s3ql_10_deltadump_write_integer((__pyx_v_len_ - (__pyx_v_int64_prev[__pyx_v_i])), __pyx_v_fp); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":318
+ * if col_args[i] == 0:
+ * write_integer(len_ - int64_prev[i], fp)
+ * int64_prev[i] = len_ # <<<<<<<<<<<<<<
+ * elif len_ != col_args[i]:
+ * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i))
+ */
+ (__pyx_v_int64_prev[__pyx_v_i]) = __pyx_v_len_;
+ goto __pyx_L11;
+ }
+
+ /* "s3ql/_deltadump.pyx":319
+ * write_integer(len_ - int64_prev[i], fp)
+ * int64_prev[i] = len_
+ * elif len_ != col_args[i]: # <<<<<<<<<<<<<<
+ * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i))
+ *
+ */
+ __pyx_t_1 = (__pyx_v_len_ != (__pyx_v_col_args[__pyx_v_i]));
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":320
+ * int64_prev[i] = len_
+ * elif len_ != col_args[i]:
+ * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i)) # <<<<<<<<<<<<<<
+ *
+ * if len_ != 0:
+ */
+ __pyx_t_4 = PyInt_FromLong(__pyx_v_len_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = PyInt_FromLong((__pyx_v_col_args[__pyx_v_i])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_i); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_8));
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_4 = 0;
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_15), ((PyObject *)__pyx_t_8)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_8));
+ PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_t_3));
+ __Pyx_GIVEREF(((PyObject *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L11;
+ }
+ __pyx_L11:;
+
+ /* "s3ql/_deltadump.pyx":322
+ * raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i))
+ *
+ * if len_ != 0: # <<<<<<<<<<<<<<
+ * fwrite(buf, len_, fp)
+ *
+ */
+ __pyx_t_1 = (__pyx_v_len_ != 0);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":323
+ *
+ * if len_ != 0:
+ * fwrite(buf, len_, fp) # <<<<<<<<<<<<<<
+ *
+ * cdef _load_table(int* col_types, int* col_args, int64_t* int64_prev,
+ */
+ __pyx_t_7 = __pyx_f_4s3ql_10_deltadump_fwrite(__pyx_v_buf, __pyx_v_len_, __pyx_v_fp); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+ }
+ }
+ __pyx_L4_break:;
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_AddTraceback("s3ql._deltadump._dump_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":325
+ * fwrite(buf, len_, fp)
+ *
+ * cdef _load_table(int* col_types, int* col_args, int64_t* int64_prev, # <<<<<<<<<<<<<<
+ * int col_count, int row_count, sqlite3_stmt* stmt,
+ * FILE* fp, void* buf):
+ */
+
+static PyObject *__pyx_f_4s3ql_10_deltadump__load_table(int *__pyx_v_col_types, int *__pyx_v_col_args, int64_t *__pyx_v_int64_prev, int __pyx_v_col_count, int __pyx_v_row_count, sqlite3_stmt *__pyx_v_stmt, FILE *__pyx_v_fp, void *__pyx_v_buf) {
+ int64_t __pyx_v_int64;
+ int __pyx_v_rc;
+ int __pyx_v_len_;
+ int __pyx_v_i;
+ int __pyx_v_j;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_load_table");
+
+ /* "s3ql/_deltadump.pyx":333
+ *
+ * # Iterate through rows
+ * for i in range(row_count): # <<<<<<<<<<<<<<
+ * for j in range(col_count):
+ * if col_types[j] == _INTEGER:
+ */
+ __pyx_t_1 = __pyx_v_row_count;
+ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
+ __pyx_v_i = __pyx_t_2;
+
+ /* "s3ql/_deltadump.pyx":334
+ * # Iterate through rows
+ * for i in range(row_count):
+ * for j in range(col_count): # <<<<<<<<<<<<<<
+ * if col_types[j] == _INTEGER:
+ * read_integer(&int64, fp)
+ */
+ __pyx_t_3 = __pyx_v_col_count;
+ for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
+ __pyx_v_j = __pyx_t_4;
+
+ /* "s3ql/_deltadump.pyx":335
+ * for i in range(row_count):
+ * for j in range(col_count):
+ * if col_types[j] == _INTEGER: # <<<<<<<<<<<<<<
+ * read_integer(&int64, fp)
+ * int64 += col_args[j] + int64_prev[j]
+ */
+ __pyx_t_5 = ((__pyx_v_col_types[__pyx_v_j]) == __pyx_v_4s3ql_10_deltadump__INTEGER);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":336
+ * for j in range(col_count):
+ * if col_types[j] == _INTEGER:
+ * read_integer(&int64, fp) # <<<<<<<<<<<<<<
+ * int64 += col_args[j] + int64_prev[j]
+ * int64_prev[j] = int64
+ */
+ __pyx_t_6 = __pyx_f_4s3ql_10_deltadump_read_integer((&__pyx_v_int64), __pyx_v_fp); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":337
+ * if col_types[j] == _INTEGER:
+ * read_integer(&int64, fp)
+ * int64 += col_args[j] + int64_prev[j] # <<<<<<<<<<<<<<
+ * int64_prev[j] = int64
+ * rc = sqlite3_bind_int64(stmt, j+1, int64)
+ */
+ __pyx_v_int64 = (__pyx_v_int64 + ((__pyx_v_col_args[__pyx_v_j]) + (__pyx_v_int64_prev[__pyx_v_j])));
+
+ /* "s3ql/_deltadump.pyx":338
+ * read_integer(&int64, fp)
+ * int64 += col_args[j] + int64_prev[j]
+ * int64_prev[j] = int64 # <<<<<<<<<<<<<<
+ * rc = sqlite3_bind_int64(stmt, j+1, int64)
+ * if rc != SQLITE_OK:
+ */
+ (__pyx_v_int64_prev[__pyx_v_j]) = __pyx_v_int64;
+
+ /* "s3ql/_deltadump.pyx":339
+ * int64 += col_args[j] + int64_prev[j]
+ * int64_prev[j] = int64
+ * rc = sqlite3_bind_int64(stmt, j+1, int64) # <<<<<<<<<<<<<<
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ */
+ __pyx_v_rc = sqlite3_bind_int64(__pyx_v_stmt, (__pyx_v_j + 1), __pyx_v_int64);
+
+ /* "s3ql/_deltadump.pyx":340
+ * int64_prev[j] = int64
+ * rc = sqlite3_bind_int64(stmt, j+1, int64)
+ * if rc != SQLITE_OK: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ *
+ */
+ __pyx_t_5 = (__pyx_v_rc != SQLITE_OK);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":341
+ * rc = sqlite3_bind_int64(stmt, j+1, int64)
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ *
+ * if col_types[j] == _TIME:
+ */
+ __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ __pyx_t_7 = 0;
+ __pyx_t_7 = PyObject_Call(__pyx_t_8, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0;
+ __Pyx_Raise(__pyx_t_7, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L8;
+ }
+ __pyx_L8:;
+ goto __pyx_L7;
+ }
+ __pyx_L7:;
+
+ /* "s3ql/_deltadump.pyx":343
+ * raise apsw.exceptionfor(rc)
+ *
+ * if col_types[j] == _TIME: # <<<<<<<<<<<<<<
+ * read_integer(&int64, fp)
+ * int64 += col_args[j] + int64_prev[j]
+ */
+ __pyx_t_5 = ((__pyx_v_col_types[__pyx_v_j]) == __pyx_v_4s3ql_10_deltadump__TIME);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":344
+ *
+ * if col_types[j] == _TIME:
+ * read_integer(&int64, fp) # <<<<<<<<<<<<<<
+ * int64 += col_args[j] + int64_prev[j]
+ * int64_prev[j] = int64
+ */
+ __pyx_t_6 = __pyx_f_4s3ql_10_deltadump_read_integer((&__pyx_v_int64), __pyx_v_fp); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 344; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":345
+ * if col_types[j] == _TIME:
+ * read_integer(&int64, fp)
+ * int64 += col_args[j] + int64_prev[j] # <<<<<<<<<<<<<<
+ * int64_prev[j] = int64
+ * rc = sqlite3_bind_double(stmt, j+1, int64 / time_scale)
+ */
+ __pyx_v_int64 = (__pyx_v_int64 + ((__pyx_v_col_args[__pyx_v_j]) + (__pyx_v_int64_prev[__pyx_v_j])));
+
+ /* "s3ql/_deltadump.pyx":346
+ * read_integer(&int64, fp)
+ * int64 += col_args[j] + int64_prev[j]
+ * int64_prev[j] = int64 # <<<<<<<<<<<<<<
+ * rc = sqlite3_bind_double(stmt, j+1, int64 / time_scale)
+ * if rc != SQLITE_OK:
+ */
+ (__pyx_v_int64_prev[__pyx_v_j]) = __pyx_v_int64;
+
+ /* "s3ql/_deltadump.pyx":347
+ * int64 += col_args[j] + int64_prev[j]
+ * int64_prev[j] = int64
+ * rc = sqlite3_bind_double(stmt, j+1, int64 / time_scale) # <<<<<<<<<<<<<<
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ */
+ if (unlikely(__pyx_v_4s3ql_10_deltadump_time_scale == 0)) {
+ PyErr_Format(PyExc_ZeroDivisionError, "float division");
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 347; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_v_rc = sqlite3_bind_double(__pyx_v_stmt, (__pyx_v_j + 1), (((double)__pyx_v_int64) / __pyx_v_4s3ql_10_deltadump_time_scale));
+
+ /* "s3ql/_deltadump.pyx":348
+ * int64_prev[j] = int64
+ * rc = sqlite3_bind_double(stmt, j+1, int64 / time_scale)
+ * if rc != SQLITE_OK: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ *
+ */
+ __pyx_t_5 = (__pyx_v_rc != SQLITE_OK);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":349
+ * rc = sqlite3_bind_double(stmt, j+1, int64 / time_scale)
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ *
+ * elif col_types[j] == _BLOB:
+ */
+ __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_9 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_7 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_8));
+ PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_7);
+ __pyx_t_7 = 0;
+ __pyx_t_7 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_8), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0;
+ __Pyx_Raise(__pyx_t_7, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L10;
+ }
+ __pyx_L10:;
+ goto __pyx_L9;
+ }
+
+ /* "s3ql/_deltadump.pyx":351
+ * raise apsw.exceptionfor(rc)
+ *
+ * elif col_types[j] == _BLOB: # <<<<<<<<<<<<<<
+ * if col_args[j] == 0:
+ * read_integer(&int64, fp)
+ */
+ __pyx_t_5 = ((__pyx_v_col_types[__pyx_v_j]) == __pyx_v_4s3ql_10_deltadump__BLOB);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":352
+ *
+ * elif col_types[j] == _BLOB:
+ * if col_args[j] == 0: # <<<<<<<<<<<<<<
+ * read_integer(&int64, fp)
+ * len_ = int64_prev[j] + int64
+ */
+ __pyx_t_5 = ((__pyx_v_col_args[__pyx_v_j]) == 0);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":353
+ * elif col_types[j] == _BLOB:
+ * if col_args[j] == 0:
+ * read_integer(&int64, fp) # <<<<<<<<<<<<<<
+ * len_ = int64_prev[j] + int64
+ * int64_prev[j] = len_
+ */
+ __pyx_t_6 = __pyx_f_4s3ql_10_deltadump_read_integer((&__pyx_v_int64), __pyx_v_fp); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":354
+ * if col_args[j] == 0:
+ * read_integer(&int64, fp)
+ * len_ = int64_prev[j] + int64 # <<<<<<<<<<<<<<
+ * int64_prev[j] = len_
+ * else:
+ */
+ __pyx_v_len_ = ((__pyx_v_int64_prev[__pyx_v_j]) + __pyx_v_int64);
+
+ /* "s3ql/_deltadump.pyx":355
+ * read_integer(&int64, fp)
+ * len_ = int64_prev[j] + int64
+ * int64_prev[j] = len_ # <<<<<<<<<<<<<<
+ * else:
+ * len_ = col_args[j]
+ */
+ (__pyx_v_int64_prev[__pyx_v_j]) = __pyx_v_len_;
+ goto __pyx_L11;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":357
+ * int64_prev[j] = len_
+ * else:
+ * len_ = col_args[j] # <<<<<<<<<<<<<<
+ *
+ * if len_ > MAX_BLOB_SIZE:
+ */
+ __pyx_v_len_ = (__pyx_v_col_args[__pyx_v_j]);
+ }
+ __pyx_L11:;
+
+ /* "s3ql/_deltadump.pyx":359
+ * len_ = col_args[j]
+ *
+ * if len_ > MAX_BLOB_SIZE: # <<<<<<<<<<<<<<
+ * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE)
+ *
+ */
+ __pyx_t_7 = PyInt_FromLong(__pyx_v_len_); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__MAX_BLOB_SIZE); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyObject_RichCompare(__pyx_t_7, __pyx_t_8, Py_GT); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":360
+ *
+ * if len_ > MAX_BLOB_SIZE:
+ * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE) # <<<<<<<<<<<<<<
+ *
+ * if len_ != 0:
+ */
+ __pyx_t_9 = PyInt_FromLong(__pyx_v_len_); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__MAX_BLOB_SIZE); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_7));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_16));
+ PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_kp_s_16));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_16));
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_9 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_t_7), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0;
+ __Pyx_Raise(__pyx_t_8, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L12;
+ }
+ __pyx_L12:;
+
+ /* "s3ql/_deltadump.pyx":362
+ * raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE)
+ *
+ * if len_ != 0: # <<<<<<<<<<<<<<
+ * fread(buf, len_, fp)
+ *
+ */
+ __pyx_t_5 = (__pyx_v_len_ != 0);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":363
+ *
+ * if len_ != 0:
+ * fread(buf, len_, fp) # <<<<<<<<<<<<<<
+ *
+ * rc = sqlite3_bind_blob(stmt, j+1, buf, len_, SQLITE_TRANSIENT)
+ */
+ __pyx_t_6 = __pyx_f_4s3ql_10_deltadump_fread(__pyx_v_buf, __pyx_v_len_, __pyx_v_fp); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L13;
+ }
+ __pyx_L13:;
+
+ /* "s3ql/_deltadump.pyx":365
+ * fread(buf, len_, fp)
+ *
+ * rc = sqlite3_bind_blob(stmt, j+1, buf, len_, SQLITE_TRANSIENT) # <<<<<<<<<<<<<<
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ */
+ __pyx_v_rc = sqlite3_bind_blob(__pyx_v_stmt, (__pyx_v_j + 1), __pyx_v_buf, __pyx_v_len_, SQLITE_TRANSIENT);
+
+ /* "s3ql/_deltadump.pyx":366
+ *
+ * rc = sqlite3_bind_blob(stmt, j+1, buf, len_, SQLITE_TRANSIENT)
+ * if rc != SQLITE_OK: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ *
+ */
+ __pyx_t_5 = (__pyx_v_rc != SQLITE_OK);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":367
+ * rc = sqlite3_bind_blob(stmt, j+1, buf, len_, SQLITE_TRANSIENT)
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ *
+ * rc = sqlite3_step(stmt)
+ */
+ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_7 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_t_7, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0;
+ __Pyx_Raise(__pyx_t_8, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L14;
+ }
+ __pyx_L14:;
+ goto __pyx_L9;
+ }
+ __pyx_L9:;
+ }
+
+ /* "s3ql/_deltadump.pyx":369
+ * raise apsw.exceptionfor(rc)
+ *
+ * rc = sqlite3_step(stmt) # <<<<<<<<<<<<<<
+ * if rc != SQLITE_DONE:
+ * raise apsw.exceptionfor(rc)
+ */
+ __pyx_v_rc = sqlite3_step(__pyx_v_stmt);
+
+ /* "s3ql/_deltadump.pyx":370
+ *
+ * rc = sqlite3_step(stmt)
+ * if rc != SQLITE_DONE: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ *
+ */
+ __pyx_t_5 = (__pyx_v_rc != SQLITE_DONE);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":371
+ * rc = sqlite3_step(stmt)
+ * if rc != SQLITE_DONE:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ *
+ * rc = sqlite3_reset(stmt)
+ */
+ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_7));
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_7), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0;
+ __Pyx_Raise(__pyx_t_8, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L15;
+ }
+ __pyx_L15:;
+
+ /* "s3ql/_deltadump.pyx":373
+ * raise apsw.exceptionfor(rc)
+ *
+ * rc = sqlite3_reset(stmt) # <<<<<<<<<<<<<<
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc)
+ */
+ __pyx_v_rc = sqlite3_reset(__pyx_v_stmt);
+
+ /* "s3ql/_deltadump.pyx":374
+ *
+ * rc = sqlite3_reset(stmt)
+ * if rc != SQLITE_OK: # <<<<<<<<<<<<<<
+ * raise apsw.exceptionfor(rc)
+ *
+ */
+ __pyx_t_5 = (__pyx_v_rc != SQLITE_OK);
+ if (__pyx_t_5) {
+
+ /* "s3ql/_deltadump.pyx":375
+ * rc = sqlite3_reset(stmt)
+ * if rc != SQLITE_OK:
+ * raise apsw.exceptionfor(rc) # <<<<<<<<<<<<<<
+ *
+ * cdef inline int write_integer(int64_t int64, FILE* fp) except -1:
+ */
+ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__apsw); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_7 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__exceptionfor); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __pyx_t_8 = PyInt_FromLong(__pyx_v_rc); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_9));
+ PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_8);
+ __pyx_t_8 = 0;
+ __pyx_t_8 = PyObject_Call(__pyx_t_7, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0;
+ __Pyx_Raise(__pyx_t_8, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L16;
+ }
+ __pyx_L16:;
+ }
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("s3ql._deltadump._load_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":377
+ * raise apsw.exceptionfor(rc)
+ *
+ * cdef inline int write_integer(int64_t int64, FILE* fp) except -1: # <<<<<<<<<<<<<<
+ * '''Write *int64* into *fp*, using as little space as possible'''
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_write_integer(int64_t __pyx_v_int64, FILE *__pyx_v_fp) {
+ uint8_t __pyx_v_int8;
+ size_t __pyx_v_len_;
+ uint64_t __pyx_v_uint64;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ uint64_t __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("write_integer");
+
+ /* "s3ql/_deltadump.pyx":384
+ * cdef uint64_t uint64
+ *
+ * if int64 < 0: # <<<<<<<<<<<<<<
+ * uint64 = <uint64_t> -int64
+ * int8 = <uint8_t> 0x80 # Highest bit set
+ */
+ __pyx_t_1 = (__pyx_v_int64 < 0);
+ if (__pyx_t_1) {
+
+ /* "s3ql/_deltadump.pyx":385
+ *
+ * if int64 < 0:
+ * uint64 = <uint64_t> -int64 # <<<<<<<<<<<<<<
+ * int8 = <uint8_t> 0x80 # Highest bit set
+ * else:
+ */
+ __pyx_v_uint64 = ((uint64_t)(-__pyx_v_int64));
+
+ /* "s3ql/_deltadump.pyx":386
+ * if int64 < 0:
+ * uint64 = <uint64_t> -int64
+ * int8 = <uint8_t> 0x80 # Highest bit set # <<<<<<<<<<<<<<
+ * else:
+ * uint64 = <uint64_t> int64
+ */
+ __pyx_v_int8 = ((uint8_t)0x80);
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":388
+ * int8 = <uint8_t> 0x80 # Highest bit set
+ * else:
+ * uint64 = <uint64_t> int64 # <<<<<<<<<<<<<<
+ * int8 = 0
+ *
+ */
+ __pyx_v_uint64 = ((uint64_t)__pyx_v_int64);
+
+ /* "s3ql/_deltadump.pyx":389
+ * else:
+ * uint64 = <uint64_t> int64
+ * int8 = 0 # <<<<<<<<<<<<<<
+ *
+ * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64):
+ */
+ __pyx_v_int8 = 0;
+ }
+ __pyx_L3:;
+
+ /* "s3ql/_deltadump.pyx":391
+ * int8 = 0
+ *
+ * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64): # <<<<<<<<<<<<<<
+ * len_ = 0
+ * int8 += <uint8_t> uint64
+ */
+ __pyx_t_1 = (__pyx_v_uint64 < 0x80);
+ if (__pyx_t_1) {
+ __pyx_t_2 = __pyx_v_uint64;
+ __pyx_t_3 = ((int)(__pyx_t_2 != __pyx_v_4s3ql_10_deltadump_INT8));
+ if (__pyx_t_3) {
+ __pyx_t_4 = ((int)(__pyx_t_2 != __pyx_v_4s3ql_10_deltadump_INT16));
+ __pyx_t_5 = __pyx_t_4;
+ } else {
+ __pyx_t_5 = __pyx_t_3;
+ }
+ if (__pyx_t_5) {
+ __pyx_t_3 = ((int)(__pyx_t_2 != __pyx_v_4s3ql_10_deltadump_INT32));
+ __pyx_t_4 = __pyx_t_3;
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ }
+ if (__pyx_t_4) {
+ __pyx_t_5 = ((int)(__pyx_t_2 != __pyx_v_4s3ql_10_deltadump_INT64));
+ __pyx_t_3 = __pyx_t_5;
+ } else {
+ __pyx_t_3 = __pyx_t_4;
+ }
+ __pyx_t_4 = __pyx_t_3;
+ __pyx_t_3 = __pyx_t_4;
+ } else {
+ __pyx_t_3 = __pyx_t_1;
+ }
+ if (__pyx_t_3) {
+
+ /* "s3ql/_deltadump.pyx":392
+ *
+ * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64):
+ * len_ = 0 # <<<<<<<<<<<<<<
+ * int8 += <uint8_t> uint64
+ * elif uint64 < UINT8_MAX:
+ */
+ __pyx_v_len_ = 0;
+
+ /* "s3ql/_deltadump.pyx":393
+ * if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64):
+ * len_ = 0
+ * int8 += <uint8_t> uint64 # <<<<<<<<<<<<<<
+ * elif uint64 < UINT8_MAX:
+ * len_ = 1
+ */
+ __pyx_v_int8 = (__pyx_v_int8 + ((uint8_t)__pyx_v_uint64));
+ goto __pyx_L4;
+ }
+
+ /* "s3ql/_deltadump.pyx":394
+ * len_ = 0
+ * int8 += <uint8_t> uint64
+ * elif uint64 < UINT8_MAX: # <<<<<<<<<<<<<<
+ * len_ = 1
+ * int8 += INT8
+ */
+ __pyx_t_3 = (__pyx_v_uint64 < UINT8_MAX);
+ if (__pyx_t_3) {
+
+ /* "s3ql/_deltadump.pyx":395
+ * int8 += <uint8_t> uint64
+ * elif uint64 < UINT8_MAX:
+ * len_ = 1 # <<<<<<<<<<<<<<
+ * int8 += INT8
+ * elif uint64 < UINT16_MAX:
+ */
+ __pyx_v_len_ = 1;
+
+ /* "s3ql/_deltadump.pyx":396
+ * elif uint64 < UINT8_MAX:
+ * len_ = 1
+ * int8 += INT8 # <<<<<<<<<<<<<<
+ * elif uint64 < UINT16_MAX:
+ * len_ = 2
+ */
+ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_10_deltadump_INT8);
+ goto __pyx_L4;
+ }
+
+ /* "s3ql/_deltadump.pyx":397
+ * len_ = 1
+ * int8 += INT8
+ * elif uint64 < UINT16_MAX: # <<<<<<<<<<<<<<
+ * len_ = 2
+ * int8 += INT16
+ */
+ __pyx_t_3 = (__pyx_v_uint64 < UINT16_MAX);
+ if (__pyx_t_3) {
+
+ /* "s3ql/_deltadump.pyx":398
+ * int8 += INT8
+ * elif uint64 < UINT16_MAX:
+ * len_ = 2 # <<<<<<<<<<<<<<
+ * int8 += INT16
+ * elif uint64 < UINT32_MAX:
+ */
+ __pyx_v_len_ = 2;
+
+ /* "s3ql/_deltadump.pyx":399
+ * elif uint64 < UINT16_MAX:
+ * len_ = 2
+ * int8 += INT16 # <<<<<<<<<<<<<<
+ * elif uint64 < UINT32_MAX:
+ * len_ = 4
+ */
+ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_10_deltadump_INT16);
+ goto __pyx_L4;
+ }
+
+ /* "s3ql/_deltadump.pyx":400
+ * len_ = 2
+ * int8 += INT16
+ * elif uint64 < UINT32_MAX: # <<<<<<<<<<<<<<
+ * len_ = 4
+ * int8 += INT32
+ */
+ __pyx_t_3 = (__pyx_v_uint64 < UINT32_MAX);
+ if (__pyx_t_3) {
+
+ /* "s3ql/_deltadump.pyx":401
+ * int8 += INT16
+ * elif uint64 < UINT32_MAX:
+ * len_ = 4 # <<<<<<<<<<<<<<
+ * int8 += INT32
+ * else:
+ */
+ __pyx_v_len_ = 4;
+
+ /* "s3ql/_deltadump.pyx":402
+ * elif uint64 < UINT32_MAX:
+ * len_ = 4
+ * int8 += INT32 # <<<<<<<<<<<<<<
+ * else:
+ * len_ = 8
+ */
+ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_10_deltadump_INT32);
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":404
+ * int8 += INT32
+ * else:
+ * len_ = 8 # <<<<<<<<<<<<<<
+ * int8 += INT64
+ *
+ */
+ __pyx_v_len_ = 8;
+
+ /* "s3ql/_deltadump.pyx":405
+ * else:
+ * len_ = 8
+ * int8 += INT64 # <<<<<<<<<<<<<<
+ *
+ * fwrite(&int8, 1, fp)
+ */
+ __pyx_v_int8 = (__pyx_v_int8 + __pyx_v_4s3ql_10_deltadump_INT64);
+ }
+ __pyx_L4:;
+
+ /* "s3ql/_deltadump.pyx":407
+ * int8 += INT64
+ *
+ * fwrite(&int8, 1, fp) # <<<<<<<<<<<<<<
+ * if len_ != 0:
+ * uint64 = htole64(uint64)
+ */
+ __pyx_t_6 = __pyx_f_4s3ql_10_deltadump_fwrite((&__pyx_v_int8), 1, __pyx_v_fp); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":408
+ *
+ * fwrite(&int8, 1, fp)
+ * if len_ != 0: # <<<<<<<<<<<<<<
+ * uint64 = htole64(uint64)
+ * fwrite(&uint64, len_, fp)
+ */
+ __pyx_t_3 = (__pyx_v_len_ != 0);
+ if (__pyx_t_3) {
+
+ /* "s3ql/_deltadump.pyx":409
+ * fwrite(&int8, 1, fp)
+ * if len_ != 0:
+ * uint64 = htole64(uint64) # <<<<<<<<<<<<<<
+ * fwrite(&uint64, len_, fp)
+ *
+ */
+ __pyx_v_uint64 = htole64(__pyx_v_uint64);
+
+ /* "s3ql/_deltadump.pyx":410
+ * if len_ != 0:
+ * uint64 = htole64(uint64)
+ * fwrite(&uint64, len_, fp) # <<<<<<<<<<<<<<
+ *
+ * cdef inline int read_integer(int64_t* out, FILE* fp) except -1:
+ */
+ __pyx_t_6 = __pyx_f_4s3ql_10_deltadump_fwrite((&__pyx_v_uint64), __pyx_v_len_, __pyx_v_fp); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("s3ql._deltadump.write_integer", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":412
+ * fwrite(&uint64, len_, fp)
+ *
+ * cdef inline int read_integer(int64_t* out, FILE* fp) except -1: # <<<<<<<<<<<<<<
+ * '''Read integer written using `write_integer` from *fp*'''
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_4s3ql_10_deltadump_read_integer(int64_t *__pyx_v_out, FILE *__pyx_v_fp) {
+ uint8_t __pyx_v_int8;
+ size_t __pyx_v_len_;
+ uint64_t __pyx_v_uint64;
+ char __pyx_v_negative;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("read_integer");
+
+ /* "s3ql/_deltadump.pyx":420
+ * cdef char negative
+ *
+ * fread(&int8, 1, fp) # <<<<<<<<<<<<<<
+ *
+ * if int8 & 0x80 != 0:
+ */
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_fread((&__pyx_v_int8), 1, __pyx_v_fp); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":422
+ * fread(&int8, 1, fp)
+ *
+ * if int8 & 0x80 != 0: # <<<<<<<<<<<<<<
+ * negative = 1
+ * int8 = int8 & (~ 0x80)
+ */
+ __pyx_t_2 = ((__pyx_v_int8 & 0x80) != 0);
+ if (__pyx_t_2) {
+
+ /* "s3ql/_deltadump.pyx":423
+ *
+ * if int8 & 0x80 != 0:
+ * negative = 1 # <<<<<<<<<<<<<<
+ * int8 = int8 & (~ 0x80)
+ * else:
+ */
+ __pyx_v_negative = 1;
+
+ /* "s3ql/_deltadump.pyx":424
+ * if int8 & 0x80 != 0:
+ * negative = 1
+ * int8 = int8 & (~ 0x80) # <<<<<<<<<<<<<<
+ * else:
+ * negative = 0
+ */
+ __pyx_v_int8 = (__pyx_v_int8 & (~0x80));
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":426
+ * int8 = int8 & (~ 0x80)
+ * else:
+ * negative = 0 # <<<<<<<<<<<<<<
+ *
+ * if int8 == INT8:
+ */
+ __pyx_v_negative = 0;
+ }
+ __pyx_L3:;
+
+ /* "s3ql/_deltadump.pyx":428
+ * negative = 0
+ *
+ * if int8 == INT8: # <<<<<<<<<<<<<<
+ * len_ = 1
+ * elif int8 == INT16:
+ */
+ __pyx_t_2 = (__pyx_v_int8 == __pyx_v_4s3ql_10_deltadump_INT8);
+ if (__pyx_t_2) {
+
+ /* "s3ql/_deltadump.pyx":429
+ *
+ * if int8 == INT8:
+ * len_ = 1 # <<<<<<<<<<<<<<
+ * elif int8 == INT16:
+ * len_ = 2
+ */
+ __pyx_v_len_ = 1;
+ goto __pyx_L4;
+ }
+
+ /* "s3ql/_deltadump.pyx":430
+ * if int8 == INT8:
+ * len_ = 1
+ * elif int8 == INT16: # <<<<<<<<<<<<<<
+ * len_ = 2
+ * elif int8 == INT32:
+ */
+ __pyx_t_2 = (__pyx_v_int8 == __pyx_v_4s3ql_10_deltadump_INT16);
+ if (__pyx_t_2) {
+
+ /* "s3ql/_deltadump.pyx":431
+ * len_ = 1
+ * elif int8 == INT16:
+ * len_ = 2 # <<<<<<<<<<<<<<
+ * elif int8 == INT32:
+ * len_ = 4
+ */
+ __pyx_v_len_ = 2;
+ goto __pyx_L4;
+ }
+
+ /* "s3ql/_deltadump.pyx":432
+ * elif int8 == INT16:
+ * len_ = 2
+ * elif int8 == INT32: # <<<<<<<<<<<<<<
+ * len_ = 4
+ * elif int8 == INT64:
+ */
+ __pyx_t_2 = (__pyx_v_int8 == __pyx_v_4s3ql_10_deltadump_INT32);
+ if (__pyx_t_2) {
+
+ /* "s3ql/_deltadump.pyx":433
+ * len_ = 2
+ * elif int8 == INT32:
+ * len_ = 4 # <<<<<<<<<<<<<<
+ * elif int8 == INT64:
+ * len_ = 8
+ */
+ __pyx_v_len_ = 4;
+ goto __pyx_L4;
+ }
+
+ /* "s3ql/_deltadump.pyx":434
+ * elif int8 == INT32:
+ * len_ = 4
+ * elif int8 == INT64: # <<<<<<<<<<<<<<
+ * len_ = 8
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_int8 == __pyx_v_4s3ql_10_deltadump_INT64);
+ if (__pyx_t_2) {
+
+ /* "s3ql/_deltadump.pyx":435
+ * len_ = 4
+ * elif int8 == INT64:
+ * len_ = 8 # <<<<<<<<<<<<<<
+ * else:
+ * len_ = 0
+ */
+ __pyx_v_len_ = 8;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":437
+ * len_ = 8
+ * else:
+ * len_ = 0 # <<<<<<<<<<<<<<
+ * uint64 = int8
+ *
+ */
+ __pyx_v_len_ = 0;
+
+ /* "s3ql/_deltadump.pyx":438
+ * else:
+ * len_ = 0
+ * uint64 = int8 # <<<<<<<<<<<<<<
+ *
+ * if len_ != 0:
+ */
+ __pyx_v_uint64 = __pyx_v_int8;
+ }
+ __pyx_L4:;
+
+ /* "s3ql/_deltadump.pyx":440
+ * uint64 = int8
+ *
+ * if len_ != 0: # <<<<<<<<<<<<<<
+ * uint64 = 0
+ * fread(&uint64, len_, fp)
+ */
+ __pyx_t_2 = (__pyx_v_len_ != 0);
+ if (__pyx_t_2) {
+
+ /* "s3ql/_deltadump.pyx":441
+ *
+ * if len_ != 0:
+ * uint64 = 0 # <<<<<<<<<<<<<<
+ * fread(&uint64, len_, fp)
+ * uint64 = le64toh(uint64)
+ */
+ __pyx_v_uint64 = 0;
+
+ /* "s3ql/_deltadump.pyx":442
+ * if len_ != 0:
+ * uint64 = 0
+ * fread(&uint64, len_, fp) # <<<<<<<<<<<<<<
+ * uint64 = le64toh(uint64)
+ *
+ */
+ __pyx_t_1 = __pyx_f_4s3ql_10_deltadump_fread((&__pyx_v_uint64), __pyx_v_len_, __pyx_v_fp); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 442; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":443
+ * uint64 = 0
+ * fread(&uint64, len_, fp)
+ * uint64 = le64toh(uint64) # <<<<<<<<<<<<<<
+ *
+ * if negative == 1:
+ */
+ __pyx_v_uint64 = le64toh(__pyx_v_uint64);
+ goto __pyx_L5;
+ }
+ __pyx_L5:;
+
+ /* "s3ql/_deltadump.pyx":445
+ * uint64 = le64toh(uint64)
+ *
+ * if negative == 1: # <<<<<<<<<<<<<<
+ * out[0] = - <int64_t> uint64
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_negative == 1);
+ if (__pyx_t_2) {
+
+ /* "s3ql/_deltadump.pyx":446
+ *
+ * if negative == 1:
+ * out[0] = - <int64_t> uint64 # <<<<<<<<<<<<<<
+ * else:
+ * out[0] = <int64_t> uint64
+ */
+ (__pyx_v_out[0]) = (-((int64_t)__pyx_v_uint64));
+ goto __pyx_L6;
+ }
+ /*else*/ {
+
+ /* "s3ql/_deltadump.pyx":448
+ * out[0] = - <int64_t> uint64
+ * else:
+ * out[0] = <int64_t> uint64 # <<<<<<<<<<<<<<
+ *
+ * def fail():
+ */
+ (__pyx_v_out[0]) = ((int64_t)__pyx_v_uint64);
+ }
+ __pyx_L6:;
+
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("s3ql._deltadump.read_integer", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "s3ql/_deltadump.pyx":450
+ * out[0] = <int64_t> uint64
+ *
+ * def fail(): # <<<<<<<<<<<<<<
+ * raise RuntimeError('Ouch!')
+ */
+
+static PyObject *__pyx_pf_4s3ql_10_deltadump_3fail(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static char __pyx_doc_4s3ql_10_deltadump_3fail[] = "fail()";
+static PyMethodDef __pyx_mdef_4s3ql_10_deltadump_3fail = {__Pyx_NAMESTR("fail"), (PyCFunction)__pyx_pf_4s3ql_10_deltadump_3fail, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_4s3ql_10_deltadump_3fail)};
+static PyObject *__pyx_pf_4s3ql_10_deltadump_3fail(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("fail");
+ __pyx_self = __pyx_self;
+
+ /* "s3ql/_deltadump.pyx":451
+ *
+ * def fail():
+ * raise RuntimeError('Ouch!') # <<<<<<<<<<<<<<
+ */
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_18), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("s3ql._deltadump.fail", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_tp_new_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load(PyTypeObject *t, PyObject *a, PyObject *k) {
+ PyObject *o = (*t->tp_alloc)(t, 0);
+ if (!o) return 0;
+ return o;
+}
+
+static void __pyx_tp_dealloc_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load(PyObject *o) {
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static PyMethodDef __pyx_methods_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load[] = {
+ {0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number___pyx_scope_struct___dump_or_load = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_hex*/
+ #endif
+ 0, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ #if PY_VERSION_HEX >= 0x02050000
+ 0, /*nb_index*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence___pyx_scope_struct___dump_or_load = {
+ 0, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ 0, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ 0, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping___pyx_scope_struct___dump_or_load = {
+ 0, /*mp_length*/
+ 0, /*mp_subscript*/
+ 0, /*mp_ass_subscript*/
+};
+
+static PyBufferProcs __pyx_tp_as_buffer___pyx_scope_struct___dump_or_load = {
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getreadbuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getwritebuffer*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getsegcount*/
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ 0, /*bf_getcharbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_getbuffer*/
+ #endif
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*bf_releasebuffer*/
+ #endif
+};
+
+static PyTypeObject __pyx_type_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load = {
+ PyVarObject_HEAD_INIT(0, 0)
+ __Pyx_NAMESTR("s3ql._deltadump.__pyx_scope_struct___dump_or_load"), /*tp_name*/
+ sizeof(struct __pyx_obj_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*tp_repr*/
+ &__pyx_tp_as_number___pyx_scope_struct___dump_or_load, /*tp_as_number*/
+ &__pyx_tp_as_sequence___pyx_scope_struct___dump_or_load, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping___pyx_scope_struct___dump_or_load, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ &__pyx_tp_as_buffer___pyx_scope_struct___dump_or_load, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ #if PY_VERSION_HEX >= 0x02060000
+ 0, /*tp_version_tag*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ __Pyx_NAMESTR("_deltadump"),
+ __Pyx_DOCSTR(__pyx_k_19), /* m_doc */
+ -1, /* m_size */
+ __pyx_methods /* m_methods */,
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_s_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 0, 1, 0},
+ {&__pyx_kp_s_12, __pyx_k_12, sizeof(__pyx_k_12), 0, 0, 1, 0},
+ {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0},
+ {&__pyx_kp_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 0},
+ {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0},
+ {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0},
+ {&__pyx_n_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 1},
+ {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0},
+ {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0},
+ {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0},
+ {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0},
+ {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0},
+ {&__pyx_kp_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 0},
+ {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0},
+ {&__pyx_n_s__BLOB, __pyx_k__BLOB, sizeof(__pyx_k__BLOB), 0, 0, 1, 1},
+ {&__pyx_n_s__CleanupManager, __pyx_k__CleanupManager, sizeof(__pyx_k__CleanupManager), 0, 0, 1, 1},
+ {&__pyx_n_s__INTEGER, __pyx_k__INTEGER, sizeof(__pyx_k__INTEGER), 0, 0, 1, 1},
+ {&__pyx_n_s__IOError, __pyx_k__IOError, sizeof(__pyx_k__IOError), 0, 0, 1, 1},
+ {&__pyx_n_s__MAX_BLOB_SIZE, __pyx_k__MAX_BLOB_SIZE, sizeof(__pyx_k__MAX_BLOB_SIZE), 0, 0, 1, 1},
+ {&__pyx_n_s__OSError, __pyx_k__OSError, sizeof(__pyx_k__OSError), 0, 0, 1, 1},
+ {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1},
+ {&__pyx_n_s__TIME, __pyx_k__TIME, sizeof(__pyx_k__TIME), 0, 0, 1, 1},
+ {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
+ {&__pyx_n_s____enter__, __pyx_k____enter__, sizeof(__pyx_k____enter__), 0, 0, 1, 1},
+ {&__pyx_n_s____exit__, __pyx_k____exit__, sizeof(__pyx_k____exit__), 0, 0, 1, 1},
+ {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
+ {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
+ {&__pyx_n_s___dump_or_load, __pyx_k___dump_or_load, sizeof(__pyx_k___dump_or_load), 0, 0, 1, 1},
+ {&__pyx_n_s__apsw, __pyx_k__apsw, sizeof(__pyx_k__apsw), 0, 0, 1, 1},
+ {&__pyx_n_s__cleanup_manager, __pyx_k__cleanup_manager, sizeof(__pyx_k__cleanup_manager), 0, 0, 1, 1},
+ {&__pyx_n_s__columns, __pyx_k__columns, sizeof(__pyx_k__columns), 0, 0, 1, 1},
+ {&__pyx_n_s__conn, __pyx_k__conn, sizeof(__pyx_k__conn), 0, 0, 1, 1},
+ {&__pyx_n_s__db, __pyx_k__db, sizeof(__pyx_k__db), 0, 0, 1, 1},
+ {&__pyx_n_s__debug, __pyx_k__debug, sizeof(__pyx_k__debug), 0, 0, 1, 1},
+ {&__pyx_n_s__deltadump, __pyx_k__deltadump, sizeof(__pyx_k__deltadump), 0, 0, 1, 1},
+ {&__pyx_n_s__dump_table, __pyx_k__dump_table, sizeof(__pyx_k__dump_table), 0, 0, 1, 1},
+ {&__pyx_n_s__exceptionfor, __pyx_k__exceptionfor, sizeof(__pyx_k__exceptionfor), 0, 0, 1, 1},
+ {&__pyx_n_s__fail, __pyx_k__fail, sizeof(__pyx_k__fail), 0, 0, 1, 1},
+ {&__pyx_n_s__fh, __pyx_k__fh, sizeof(__pyx_k__fh), 0, 0, 1, 1},
+ {&__pyx_n_s__fileno, __pyx_k__fileno, sizeof(__pyx_k__fileno), 0, 0, 1, 1},
+ {&__pyx_n_s__getLogger, __pyx_k__getLogger, sizeof(__pyx_k__getLogger), 0, 0, 1, 1},
+ {&__pyx_n_s__get_val, __pyx_k__get_val, sizeof(__pyx_k__get_val), 0, 0, 1, 1},
+ {&__pyx_n_s__join, __pyx_k__join, sizeof(__pyx_k__join), 0, 0, 1, 1},
+ {&__pyx_n_s__load_table, __pyx_k__load_table, sizeof(__pyx_k__load_table), 0, 0, 1, 1},
+ {&__pyx_n_s__log, __pyx_k__log, sizeof(__pyx_k__log), 0, 0, 1, 1},
+ {&__pyx_n_s__logging, __pyx_k__logging, sizeof(__pyx_k__logging), 0, 0, 1, 1},
+ {&__pyx_n_s__order, __pyx_k__order, sizeof(__pyx_k__order), 0, 0, 1, 1},
+ {&__pyx_n_s__os, __pyx_k__os, sizeof(__pyx_k__os), 0, 0, 1, 1},
+ {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1},
+ {&__pyx_n_s__register, __pyx_k__register, sizeof(__pyx_k__register), 0, 0, 1, 1},
+ {&__pyx_n_s__sqlite3pointer, __pyx_k__sqlite3pointer, sizeof(__pyx_k__sqlite3pointer), 0, 0, 1, 1},
+ {&__pyx_n_s__table, __pyx_k__table, sizeof(__pyx_k__table), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_IOError = __Pyx_GetName(__pyx_b, __pyx_n_s__IOError); if (!__pyx_builtin_IOError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_OSError = __Pyx_GetName(__pyx_b, __pyx_n_s__OSError); if (!__pyx_builtin_OSError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants");
+
+ /* "s3ql/_deltadump.pyx":216
+ * sqlite3_db = <sqlite3*> PyLong_AsVoidPtr(db.conn.sqlite3pointer())
+ *
+ * with CleanupManager(log) as cleanup: # <<<<<<<<<<<<<<
+ * fd = dup(fh.fileno())
+ * if fd == -1:
+ */
+ __pyx_k_tuple_11 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11));
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_k_tuple_11, 1, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_INCREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_k_tuple_11, 2, Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11));
+
+ /* "s3ql/_deltadump.pyx":294
+ * for i in range(col_count):
+ * if sqlite3_column_type(stmt, i) is SQLITE_NULL:
+ * raise ValueError("Can't dump NULL values") # <<<<<<<<<<<<<<
+ *
+ * if col_types[i] == _INTEGER:
+ */
+ __pyx_k_tuple_13 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_13));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_12));
+ PyTuple_SET_ITEM(__pyx_k_tuple_13, 0, ((PyObject *)__pyx_kp_s_12));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_12));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13));
+
+ /* "s3ql/_deltadump.pyx":451
+ *
+ * def fail():
+ * raise RuntimeError('Ouch!') # <<<<<<<<<<<<<<
+ */
+ __pyx_k_tuple_18 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_18));
+ __Pyx_INCREF(((PyObject *)__pyx_kp_s_17));
+ PyTuple_SET_ITEM(__pyx_k_tuple_18, 0, ((PyObject *)__pyx_kp_s_17));
+ __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_17));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_18));
+
+ /* "s3ql/_deltadump.pyx":72
+ * import logging
+ *
+ * log = logging.getLogger('deltadump') # <<<<<<<<<<<<<<
+ *
+ * # Column types
+ */
+ __pyx_k_tuple_20 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_20));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__deltadump));
+ PyTuple_SET_ITEM(__pyx_k_tuple_20, 0, ((PyObject *)__pyx_n_s__deltadump));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__deltadump));
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_20));
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_4096 = PyInt_FromLong(4096); if (unlikely(!__pyx_int_4096)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC init_deltadump(void); /*proto*/
+PyMODINIT_FUNC init_deltadump(void)
+#else
+PyMODINIT_FUNC PyInit__deltadump(void); /*proto*/
+PyMODINIT_FUNC PyInit__deltadump(void)
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_REFNANNY
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+ if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+ }
+ #endif
+ __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__deltadump(void)");
+ if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #ifdef __pyx_binding_PyCFunctionType_USED
+ if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_deltadump"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_19), 0, PYTHON_API_VERSION);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ #if PY_MAJOR_VERSION < 3
+ Py_INCREF(__pyx_m);
+ #endif
+ __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
+ if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ /*--- Initialize various global constants etc. ---*/
+ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_module_is_main_s3ql___deltadump) {
+ if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ }
+ /*--- Builtin init code ---*/
+ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Constants init code ---*/
+ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ /*--- Global init code ---*/
+ /*--- Variable export code ---*/
+ /*--- Function export code ---*/
+ /*--- Type init code ---*/
+ if (PyType_Ready(&__pyx_type_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_ptype_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load = &__pyx_type_4s3ql_10_deltadump___pyx_scope_struct___dump_or_load;
+ /*--- Type import code ---*/
+ /*--- Variable import code ---*/
+ /*--- Function import code ---*/
+ /*--- Execution code ---*/
+
+ /* "s3ql/_deltadump.pyx":67
+ * SQLITE_NULL
+ *
+ * from .cleanup_manager import CleanupManager # <<<<<<<<<<<<<<
+ * import apsw
+ * import os
+ */
+ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __Pyx_INCREF(((PyObject *)__pyx_n_s__CleanupManager));
+ PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__CleanupManager));
+ __Pyx_GIVEREF(((PyObject *)__pyx_n_s__CleanupManager));
+ __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__cleanup_manager), ((PyObject *)__pyx_t_1), 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__CleanupManager); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__CleanupManager, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":68
+ *
+ * from .cleanup_manager import CleanupManager
+ * import apsw # <<<<<<<<<<<<<<
+ * import os
+ * import logging
+ */
+ __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__apsw), 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__apsw, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":69
+ * from .cleanup_manager import CleanupManager
+ * import apsw
+ * import os # <<<<<<<<<<<<<<
+ * import logging
+ *
+ */
+ __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__os), 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__os, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":70
+ * import apsw
+ * import os
+ * import logging # <<<<<<<<<<<<<<
+ *
+ * log = logging.getLogger('deltadump')
+ */
+ __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__logging), 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__logging, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":72
+ * import logging
+ *
+ * log = logging.getLogger('deltadump') # <<<<<<<<<<<<<<
+ *
+ * # Column types
+ */
+ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__logging); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__getLogger); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_k_tuple_20), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__log, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":75
+ *
+ * # Column types
+ * cdef int _INTEGER = 1 # <<<<<<<<<<<<<<
+ * cdef int _BLOB = 2
+ * cdef int _TIME = 3
+ */
+ __pyx_v_4s3ql_10_deltadump__INTEGER = 1;
+
+ /* "s3ql/_deltadump.pyx":76
+ * # Column types
+ * cdef int _INTEGER = 1
+ * cdef int _BLOB = 2 # <<<<<<<<<<<<<<
+ * cdef int _TIME = 3
+ *
+ */
+ __pyx_v_4s3ql_10_deltadump__BLOB = 2;
+
+ /* "s3ql/_deltadump.pyx":77
+ * cdef int _INTEGER = 1
+ * cdef int _BLOB = 2
+ * cdef int _TIME = 3 # <<<<<<<<<<<<<<
+ *
+ * # Make column types available as Python objects
+ */
+ __pyx_v_4s3ql_10_deltadump__TIME = 3;
+
+ /* "s3ql/_deltadump.pyx":80
+ *
+ * # Make column types available as Python objects
+ * INTEGER = _INTEGER # <<<<<<<<<<<<<<
+ * BLOB = _BLOB
+ * TIME = _TIME
+ */
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_4s3ql_10_deltadump__INTEGER); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__INTEGER, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":81
+ * # Make column types available as Python objects
+ * INTEGER = _INTEGER
+ * BLOB = _BLOB # <<<<<<<<<<<<<<
+ * TIME = _TIME
+ *
+ */
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_4s3ql_10_deltadump__BLOB); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__BLOB, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":82
+ * INTEGER = _INTEGER
+ * BLOB = _BLOB
+ * TIME = _TIME # <<<<<<<<<<<<<<
+ *
+ * # Integer length codes
+ */
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_4s3ql_10_deltadump__TIME); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__TIME, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":85
+ *
+ * # Integer length codes
+ * cdef uint8_t INT8 = 127 # <<<<<<<<<<<<<<
+ * cdef uint8_t INT16 = 126
+ * cdef uint8_t INT32 = 125
+ */
+ __pyx_v_4s3ql_10_deltadump_INT8 = 127;
+
+ /* "s3ql/_deltadump.pyx":86
+ * # Integer length codes
+ * cdef uint8_t INT8 = 127
+ * cdef uint8_t INT16 = 126 # <<<<<<<<<<<<<<
+ * cdef uint8_t INT32 = 125
+ * cdef uint8_t INT64 = 124
+ */
+ __pyx_v_4s3ql_10_deltadump_INT16 = 126;
+
+ /* "s3ql/_deltadump.pyx":87
+ * cdef uint8_t INT8 = 127
+ * cdef uint8_t INT16 = 126
+ * cdef uint8_t INT32 = 125 # <<<<<<<<<<<<<<
+ * cdef uint8_t INT64 = 124
+ *
+ */
+ __pyx_v_4s3ql_10_deltadump_INT32 = 125;
+
+ /* "s3ql/_deltadump.pyx":88
+ * cdef uint8_t INT16 = 126
+ * cdef uint8_t INT32 = 125
+ * cdef uint8_t INT64 = 124 # <<<<<<<<<<<<<<
+ *
+ * # Maximum size of BLOBs
+ */
+ __pyx_v_4s3ql_10_deltadump_INT64 = 124;
+
+ /* "s3ql/_deltadump.pyx":91
+ *
+ * # Maximum size of BLOBs
+ * MAX_BLOB_SIZE = 4096 # <<<<<<<<<<<<<<
+ *
+ * # Scale factor from time floats to integers
+ */
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__MAX_BLOB_SIZE, __pyx_int_4096) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+
+ /* "s3ql/_deltadump.pyx":95
+ * # Scale factor from time floats to integers
+ * # 1e9 would be perfect, but introduces rounding errors
+ * cdef double time_scale = 1<<30 # <<<<<<<<<<<<<<
+ *
+ * cdef inline int fwrite(const_void* buf, size_t len_, FILE* fp) except -1:
+ */
+ __pyx_v_4s3ql_10_deltadump_time_scale = 1073741824.0;
+
+ /* "s3ql/_deltadump.pyx":152
+ * return ptr
+ *
+ * def dump_table(table, order, columns, db, fh): # <<<<<<<<<<<<<<
+ * '''Dump *columns* of *table* into *fh*
+ *
+ */
+ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4s3ql_10_deltadump_dump_table, NULL, __pyx_n_s_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__dump_table, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":174
+ * return _dump_or_load(table, order, columns, db, fh)
+ *
+ * def load_table(table, columns, db, fh): # <<<<<<<<<<<<<<
+ * '''Load *columns* of *table* from *fh*
+ *
+ */
+ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4s3ql_10_deltadump_1load_table, NULL, __pyx_n_s_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__load_table, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":183
+ *
+ *
+ * def _dump_or_load(table, order, columns, db, fh): # <<<<<<<<<<<<<<
+ * '''Dump or load *columns* of *table*
+ *
+ */
+ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4s3ql_10_deltadump_2_dump_or_load, NULL, __pyx_n_s_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s___dump_or_load, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":450
+ * out[0] = <int64_t> uint64
+ *
+ * def fail(): # <<<<<<<<<<<<<<
+ * raise RuntimeError('Ouch!')
+ */
+ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4s3ql_10_deltadump_3fail, NULL, __pyx_n_s_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 450; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__fail, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 450; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "s3ql/_deltadump.pyx":1
+ * ''' # <<<<<<<<<<<<<<
+ * _deltadump.pyx - this file is part of S3QL (http://s3ql.googlecode.com)
+ *
+ */
+ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ if (__pyx_m) {
+ __Pyx_AddTraceback("init s3ql._deltadump", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ Py_DECREF(__pyx_m); __pyx_m = 0;
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init s3ql._deltadump");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if PY_MAJOR_VERSION < 3
+ return;
+ #else
+ return __pyx_m;
+ #endif
+}
+
+/* Runtime support code */
+
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule((char *)modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif /* CYTHON_REFNANNY */
+
+static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+ PyObject *result;
+ result = PyObject_GetAttr(dict, name);
+ if (!result) {
+ if (dict != __pyx_b) {
+ PyErr_Clear();
+ result = PyObject_GetAttr(__pyx_b, name);
+ }
+ if (!result) {
+ PyErr_SetObject(PyExc_NameError, name);
+ }
+ }
+ return result;
+}
+
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+
+
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ /* cause is unused */
+ Py_XINCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+ /* First, check the traceback argument, replacing None with NULL. */
+ if (tb == Py_None) {
+ Py_DECREF(tb);
+ tb = 0;
+ }
+ else if (tb != NULL && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ /* Next, replace a missing value with None */
+ if (value == NULL) {
+ value = Py_None;
+ Py_INCREF(value);
+ }
+ #if PY_VERSION_HEX < 0x02050000
+ if (!PyClass_Check(type))
+ #else
+ if (!PyType_Check(type))
+ #endif
+ {
+ /* Raising an instance. The value should be a dummy. */
+ if (value != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ /* Normalize to raise <class>, <instance> */
+ Py_DECREF(value);
+ value = type;
+ #if PY_VERSION_HEX < 0x02050000
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
+ Py_INCREF(type);
+ }
+ else {
+ type = 0;
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ #endif
+ }
+
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+
+#else /* Python 3+ */
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (!PyExceptionClass_Check(type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+
+ if (cause) {
+ PyObject *fixed_cause;
+ if (PyExceptionClass_Check(cause)) {
+ fixed_cause = PyObject_CallObject(cause, NULL);
+ if (fixed_cause == NULL)
+ goto bad;
+ }
+ else if (PyExceptionInstance_Check(cause)) {
+ fixed_cause = cause;
+ Py_INCREF(fixed_cause);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "exception causes must derive from "
+ "BaseException");
+ goto bad;
+ }
+ if (!value) {
+ value = PyObject_CallObject(type, NULL);
+ }
+ PyException_SetCause(value, fixed_cause);
+ }
+
+ PyErr_SetObject(type, value);
+
+ if (tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+ }
+
+bad:
+ return;
+}
+#endif
+
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AS_STRING(kw_name));
+ #endif
+}
+
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ #if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
+ #else
+ if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
+ #endif
+ goto invalid_keyword_type;
+ } else {
+ for (name = first_kw_arg; *name; name++) {
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) break;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) break;
+ #endif
+ }
+ if (*name) {
+ values[name-argnames] = value;
+ } else {
+ /* unexpected keyword found */
+ for (name=argnames; name != first_kw_arg; name++) {
+ if (**name == key) goto arg_passed_twice;
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
+ PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
+ #else
+ if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
+ _PyString_Eq(**name, key)) goto arg_passed_twice;
+ #endif
+ }
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, **name);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%s() got an unexpected keyword argument '%s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *local_type, *local_value, *local_tb;
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+ local_type = tstate->curexc_type;
+ local_value = tstate->curexc_value;
+ local_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+ PyErr_NormalizeException(&local_type, &local_value, &local_tb);
+ if (unlikely(tstate->curexc_type))
+ goto bad;
+ #if PY_MAJOR_VERSION >= 3
+ if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
+ goto bad;
+ #endif
+ *type = local_type;
+ *value = local_value;
+ *tb = local_tb;
+ Py_INCREF(local_type);
+ Py_INCREF(local_value);
+ Py_INCREF(local_tb);
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = local_type;
+ tstate->exc_value = local_value;
+ tstate->exc_traceback = local_tb;
+ /* Make sure tstate is in a consistent state when we XDECREF
+ these objects (XDECREF may run arbitrary code). */
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+ return 0;
+bad:
+ *type = 0;
+ *value = 0;
+ *tb = 0;
+ Py_XDECREF(local_type);
+ Py_XDECREF(local_value);
+ Py_XDECREF(local_tb);
+ return -1;
+}
+
+
+static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
+ PyThreadState *tstate = PyThreadState_GET();
+ *type = tstate->exc_type;
+ *value = tstate->exc_value;
+ *tb = tstate->exc_traceback;
+ Py_XINCREF(*type);
+ Py_XINCREF(*value);
+ Py_XINCREF(*tb);
+}
+
+static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyThreadState *tstate = PyThreadState_GET();
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = type;
+ tstate->exc_value = value;
+ tstate->exc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
+ PyObject *py_import = 0;
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
+ if (!py_import)
+ goto bad;
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ #if PY_VERSION_HEX >= 0x02050000
+ {
+ PyObject *py_level = PyInt_FromLong(level);
+ if (!py_level)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, py_level, NULL);
+ Py_DECREF(py_level);
+ }
+ #else
+ if (level>0) {
+ PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
+ goto bad;
+ }
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, NULL);
+ #endif
+bad:
+ Py_XDECREF(empty_list);
+ Py_XDECREF(py_import);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+
+static PyObject *__pyx_binding_PyCFunctionType_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module) {
+ __pyx_binding_PyCFunctionType_object *op = PyObject_GC_New(__pyx_binding_PyCFunctionType_object, __pyx_binding_PyCFunctionType);
+ if (op == NULL)
+ return NULL;
+ op->func.m_ml = ml;
+ Py_XINCREF(self);
+ op->func.m_self = self;
+ Py_XINCREF(module);
+ op->func.m_module = module;
+ PyObject_GC_Track(op);
+ return (PyObject *)op;
+}
+
+static void __pyx_binding_PyCFunctionType_dealloc(__pyx_binding_PyCFunctionType_object *m) {
+ PyObject_GC_UnTrack(m);
+ Py_XDECREF(m->func.m_self);
+ Py_XDECREF(m->func.m_module);
+ PyObject_GC_Del(m);
+}
+
+static PyObject *__pyx_binding_PyCFunctionType_descr_get(PyObject *func, PyObject *obj, PyObject *type) {
+ if (obj == Py_None)
+ obj = NULL;
+ return PyMethod_New(func, obj, type);
+}
+
+static int __pyx_binding_PyCFunctionType_init(void) {
+ __pyx_binding_PyCFunctionType_type = PyCFunction_Type;
+ __pyx_binding_PyCFunctionType_type.tp_name = __Pyx_NAMESTR("cython_binding_builtin_function_or_method");
+ __pyx_binding_PyCFunctionType_type.tp_dealloc = (destructor)__pyx_binding_PyCFunctionType_dealloc;
+ __pyx_binding_PyCFunctionType_type.tp_descr_get = __pyx_binding_PyCFunctionType_descr_get;
+ if (PyType_Ready(&__pyx_binding_PyCFunctionType_type) < 0) {
+ return -1;
+ }
+ __pyx_binding_PyCFunctionType = &__pyx_binding_PyCFunctionType_type;
+ return 0;
+
+}
+
+static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_int64_t(int64_t val) {
+ const int64_t neg_one = (int64_t)-1, const_zero = (int64_t)0;
+ const int is_unsigned = const_zero < neg_one;
+ if ((sizeof(int64_t) == sizeof(char)) ||
+ (sizeof(int64_t) == sizeof(short))) {
+ return PyInt_FromLong((long)val);
+ } else if ((sizeof(int64_t) == sizeof(int)) ||
+ (sizeof(int64_t) == sizeof(long))) {
+ if (is_unsigned)
+ return PyLong_FromUnsignedLong((unsigned long)val);
+ else
+ return PyInt_FromLong((long)val);
+ } else if (sizeof(int64_t) == sizeof(PY_LONG_LONG)) {
+ if (is_unsigned)
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val);
+ else
+ return PyLong_FromLongLong((PY_LONG_LONG)val);
+ } else {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ return _PyLong_FromByteArray(bytes, sizeof(int64_t),
+ little, !is_unsigned);
+ }
+}
+
+static CYTHON_INLINE int64_t __Pyx_PyInt_from_py_int64_t(PyObject* x) {
+ const int64_t neg_one = (int64_t)-1, const_zero = (int64_t)0;
+ const int is_unsigned = const_zero < neg_one;
+ if (sizeof(int64_t) == sizeof(char)) {
+ if (is_unsigned)
+ return (int64_t)__Pyx_PyInt_AsUnsignedChar(x);
+ else
+ return (int64_t)__Pyx_PyInt_AsSignedChar(x);
+ } else if (sizeof(int64_t) == sizeof(short)) {
+ if (is_unsigned)
+ return (int64_t)__Pyx_PyInt_AsUnsignedShort(x);
+ else
+ return (int64_t)__Pyx_PyInt_AsSignedShort(x);
+ } else if (sizeof(int64_t) == sizeof(int)) {
+ if (is_unsigned)
+ return (int64_t)__Pyx_PyInt_AsUnsignedInt(x);
+ else
+ return (int64_t)__Pyx_PyInt_AsSignedInt(x);
+ } else if (sizeof(int64_t) == sizeof(long)) {
+ if (is_unsigned)
+ return (int64_t)__Pyx_PyInt_AsUnsignedLong(x);
+ else
+ return (int64_t)__Pyx_PyInt_AsSignedLong(x);
+ } else if (sizeof(int64_t) == sizeof(PY_LONG_LONG)) {
+ if (is_unsigned)
+ return (int64_t)__Pyx_PyInt_AsUnsignedLongLong(x);
+ else
+ return (int64_t)__Pyx_PyInt_AsSignedLongLong(x);
+ } else {
+ int64_t val;
+ PyObject *v = __Pyx_PyNumber_Int(x);
+ #if PY_VERSION_HEX < 0x03000000
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+ return (int64_t)-1;
+ }
+}
+
+static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
+ const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned char" :
+ "value too large to convert to unsigned char");
+ }
+ return (unsigned char)-1;
+ }
+ return (unsigned char)val;
+ }
+ return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
+ const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned short" :
+ "value too large to convert to unsigned short");
+ }
+ return (unsigned short)-1;
+ }
+ return (unsigned short)val;
+ }
+ return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
+ const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(unsigned int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(unsigned int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to unsigned int" :
+ "value too large to convert to unsigned int");
+ }
+ return (unsigned int)-1;
+ }
+ return (unsigned int)val;
+ }
+ return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
+}
+
+static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
+ const char neg_one = (char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to char" :
+ "value too large to convert to char");
+ }
+ return (char)-1;
+ }
+ return (char)val;
+ }
+ return (char)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
+ const short neg_one = (short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to short" :
+ "value too large to convert to short");
+ }
+ return (short)-1;
+ }
+ return (short)val;
+ }
+ return (short)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
+ const signed char neg_one = (signed char)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed char) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed char)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed char" :
+ "value too large to convert to signed char");
+ }
+ return (signed char)-1;
+ }
+ return (signed char)val;
+ }
+ return (signed char)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
+ const signed short neg_one = (signed short)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed short) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed short)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed short" :
+ "value too large to convert to signed short");
+ }
+ return (signed short)-1;
+ }
+ return (signed short)val;
+ }
+ return (signed short)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
+ const signed int neg_one = (signed int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(signed int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(signed int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to signed int" :
+ "value too large to convert to signed int");
+ }
+ return (signed int)-1;
+ }
+ return (signed int)val;
+ }
+ return (signed int)__Pyx_PyInt_AsSignedLong(x);
+}
+
+static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
+ const int neg_one = (int)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (sizeof(int) < sizeof(long)) {
+ long val = __Pyx_PyInt_AsLong(x);
+ if (unlikely(val != (long)(int)val)) {
+ if (!unlikely(val == -1 && PyErr_Occurred())) {
+ PyErr_SetString(PyExc_OverflowError,
+ (is_unsigned && unlikely(val < 0)) ?
+ "can't convert negative value to int" :
+ "value too large to convert to int");
+ }
+ return (int)-1;
+ }
+ return (int)val;
+ }
+ return (int)__Pyx_PyInt_AsLong(x);
+}
+
+static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
+ const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return (unsigned long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned long");
+ return (unsigned long)-1;
+ }
+ return (unsigned long)PyLong_AsUnsignedLong(x);
+ } else {
+ return (unsigned long)PyLong_AsLong(x);
+ }
+ } else {
+ unsigned long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned long)-1;
+ val = __Pyx_PyInt_AsUnsignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
+ const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return (unsigned PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to unsigned PY_LONG_LONG");
+ return (unsigned PY_LONG_LONG)-1;
+ }
+ return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
+ } else {
+ return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x);
+ }
+ } else {
+ unsigned PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (unsigned PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
+ const long neg_one = (long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return (long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long)-1;
+ }
+ return (long)PyLong_AsUnsignedLong(x);
+ } else {
+ return (long)PyLong_AsLong(x);
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (long)-1;
+ val = __Pyx_PyInt_AsLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
+ const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return (PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to PY_LONG_LONG");
+ return (PY_LONG_LONG)-1;
+ }
+ return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
+ } else {
+ return (PY_LONG_LONG)PyLong_AsLongLong(x);
+ }
+ } else {
+ PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
+ const signed long neg_one = (signed long)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return (signed long)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed long");
+ return (signed long)-1;
+ }
+ return (signed long)PyLong_AsUnsignedLong(x);
+ } else {
+ return (signed long)PyLong_AsLong(x);
+ }
+ } else {
+ signed long val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed long)-1;
+ val = __Pyx_PyInt_AsSignedLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
+ const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_VERSION_HEX < 0x03000000
+ if (likely(PyInt_Check(x))) {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return (signed PY_LONG_LONG)val;
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+ if (unlikely(Py_SIZE(x) < 0)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to signed PY_LONG_LONG");
+ return (signed PY_LONG_LONG)-1;
+ }
+ return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
+ } else {
+ return (signed PY_LONG_LONG)PyLong_AsLongLong(x);
+ }
+ } else {
+ signed PY_LONG_LONG val;
+ PyObject *tmp = __Pyx_PyNumber_Int(x);
+ if (!tmp) return (signed PY_LONG_LONG)-1;
+ val = __Pyx_PyInt_AsSignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+}
+
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ #if PY_VERSION_HEX < 0x02050000
+ return PyErr_Warn(NULL, message);
+ #else
+ return PyErr_WarnEx(NULL, message, 1);
+ #endif
+ }
+ return 0;
+}
+
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+
+static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
+ int __pyx_lineno, const char *__pyx_filename) {
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ PyObject *py_globals = 0;
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(__pyx_filename);
+ #else
+ py_srcfile = PyUnicode_FromString(__pyx_filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (__pyx_clineno) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_globals = PyModule_GetDict(__pyx_m);
+ if (!py_globals) goto bad;
+ py_code = PyCode_New(
+ 0, /*int argcount,*/
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*int kwonlyargcount,*/
+ #endif
+ 0, /*int nlocals,*/
+ 0, /*int stacksize,*/
+ 0, /*int flags,*/
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ __pyx_lineno, /*int firstlineno,*/
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ if (!py_code) goto bad;
+ py_frame = PyFrame_New(
+ PyThreadState_GET(), /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ py_globals, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ py_frame->f_lineno = __pyx_lineno;
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else /* Python 3+ has unicode identifiers */
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+/* Type Conversion Functions */
+
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
+ PyNumberMethods *m;
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_VERSION_HEX < 0x03000000
+ if (PyInt_Check(x) || PyLong_Check(x))
+#else
+ if (PyLong_Check(x))
+#endif
+ return Py_INCREF(x), x;
+ m = Py_TYPE(x)->tp_as_number;
+#if PY_VERSION_HEX < 0x03000000
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = PyNumber_Long(x);
+ }
+#else
+ if (m && m->nb_int) {
+ name = "int";
+ res = PyNumber_Long(x);
+ }
+#endif
+ if (res) {
+#if PY_VERSION_HEX < 0x03000000
+ if (!PyInt_Check(res) && !PyLong_Check(res)) {
+#else
+ if (!PyLong_Check(res)) {
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%s__ returned non-%s (type %.200s)",
+ name, name, Py_TYPE(res)->tp_name);
+ Py_DECREF(res);
+ return NULL;
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject* x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+#if PY_VERSION_HEX < 0x02050000
+ if (ival <= LONG_MAX)
+ return PyInt_FromLong((long)ival);
+ else {
+ unsigned char *bytes = (unsigned char *) &ival;
+ int one = 1; int little = (int)*(unsigned char*)&one;
+ return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
+ }
+#else
+ return PyInt_FromSize_t(ival);
+#endif
+}
+
+static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
+ unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
+ if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
+ return (size_t)-1;
+ } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to size_t");
+ return (size_t)-1;
+ }
+ return (size_t)val;
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/src/s3ql/_deltadump.pyx b/src/s3ql/_deltadump.pyx
new file mode 100644
index 0000000..adab2a3
--- /dev/null
+++ b/src/s3ql/_deltadump.pyx
@@ -0,0 +1,448 @@
+'''
+_deltadump.pyx - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+# Analysis of Cython code not really working yet
+#@PydevCodeAnalysisIgnore
+
+from __future__ import print_function, division
+
+from cpython.long cimport PyLong_AsVoidPtr
+from cpython.exc cimport PyErr_NoMemory
+from libc.stdio cimport (FILE, const_char, const_void, fclose as fclose_c,
+ fwrite as fwrite_c, fread as fread_c, ftell)
+from libc.string cimport strerror
+from libc.errno cimport errno
+from libc.stdlib cimport calloc as calloc_c, free as free_c
+from libc.stdint cimport (int64_t, uint8_t, uint16_t, uint32_t, uint64_t)
+from posix.unistd cimport dup, lseek, SEEK_SET
+
+cdef extern from 'stdint.h' nogil:
+ enum: UINT8_MAX
+ enum: UINT16_MAX
+ enum: UINT32_MAX
+
+cdef extern from 'stdio.h' nogil:
+ FILE *fdopen(int fd, const_char *mode)
+ int fileno(FILE *stream)
+
+cdef extern from 'endian.h' nogil:
+ uint64_t htole64(uint64_t host_64bits)
+ uint64_t le64toh(uint64_t little_endian_64bits)
+
+cdef extern from 'sqlite3.h' nogil:
+ ctypedef int sqlite3
+ ctypedef int sqlite3_stmt
+ ctypedef int64_t sqlite3_int64
+
+ int sqlite3_prepare_v2(sqlite3 *db,
+ char *zSql,
+ int nByte,
+ sqlite3_stmt **ppStmt,
+ char **pzTail)
+ int sqlite3_step(sqlite3_stmt*)
+ sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol)
+ const_void *sqlite3_column_blob(sqlite3_stmt*, int iCol)
+ int sqlite3_column_bytes(sqlite3_stmt*, int iCol)
+ int sqlite3_bind_blob(sqlite3_stmt*, int iCol, const_void*, int n, void(*)(void*))
+ int sqlite3_bind_int64(sqlite3_stmt*, int iCol, sqlite3_int64)
+ int sqlite3_reset(sqlite3_stmt *pStmt)
+ int sqlite3_finalize(sqlite3_stmt *pStmt)
+ int sqlite3_column_type(sqlite3_stmt*, int iCol)
+ double sqlite3_column_double(sqlite3_stmt*, int iCol)
+ int sqlite3_bind_double(sqlite3_stmt*, int, double)
+
+ void SQLITE_TRANSIENT(void*)
+
+ enum:
+ SQLITE_OK
+ SQLITE_DONE
+ SQLITE_ROW
+ SQLITE_NULL
+
+from .cleanup_manager import CleanupManager
+import apsw
+import os
+import logging
+
+log = logging.getLogger('deltadump')
+
+# Column types
+cdef int _INTEGER = 1
+cdef int _BLOB = 2
+cdef int _TIME = 3
+
+# Make column types available as Python objects
+INTEGER = _INTEGER
+BLOB = _BLOB
+TIME = _TIME
+
+# Integer length codes
+cdef uint8_t INT8 = 127
+cdef uint8_t INT16 = 126
+cdef uint8_t INT32 = 125
+cdef uint8_t INT64 = 124
+
+# Maximum size of BLOBs
+MAX_BLOB_SIZE = 4096
+
+# Scale factor from time floats to integers
+# 1e9 would be perfect, but introduces rounding errors
+cdef double time_scale = 1<<30
+
+cdef inline int fwrite(const_void* buf, size_t len_, FILE* fp) except -1:
+ '''Call libc's fwrite() and raise exception on failure'''
+
+ if fwrite_c(buf, len_, 1, fp) != 1:
+ raise IOError(errno, strerror(errno))
+
+cdef inline int fread(void* buf, size_t len_, FILE* fp) except -1:
+ '''Call libc's fread() and raise exception on failure'''
+
+ if fread_c(buf, len_, 1, fp) != 1:
+ raise IOError(errno, strerror(errno))
+
+cdef int free(void* ptr) except -1:
+ '''Call libc.free() and return None'''
+
+ free_c(ptr)
+
+cdef int sqlite3_finalize_p(sqlite3_stmt* stmt) except -1:
+ '''Call sqlite3_finalize and raise exception on failure'''
+
+ rc = sqlite3_finalize(stmt)
+ if rc != SQLITE_OK:
+ raise apsw.exceptionfor(rc)
+
+cdef int fclose(FILE* fp) except -1:
+ '''Call libc.fclose() and raise exception on failure'''
+
+ cdef ssize_t off
+
+ # Reposition FD to position of FILE*, otherwise next read from FD will miss
+ # data currently in stream buffer. It seems that call to fflush() achieves
+ # the same thing, but this does not seem to be documented so we don't rely
+ # on it.
+ off = ftell(fp)
+ if off == -1:
+ raise OSError(errno, strerror(errno))
+
+ if lseek(fileno(fp), off, SEEK_SET) != off:
+ raise OSError(errno, strerror(errno))
+
+ if fclose_c(fp) != 0:
+ raise OSError(errno, strerror(errno))
+
+cdef void* calloc(size_t cnt, size_t size) except NULL:
+ '''Call libc.calloc and raise exception on failure'''
+
+ cdef void* ptr
+
+ ptr = calloc_c(cnt, size)
+
+ if ptr is NULL:
+ PyErr_NoMemory()
+
+ return ptr
+
+def dump_table(table, order, columns, db, fh):
+ '''Dump *columns* of *table* into *fh*
+
+ *order* specifies the order in which the rows are written and must be a
+ string that can be inserted after the "ORDER BY" clause in an SQL SELECT
+ statement.
+
+ *db* is an `s3ql.Connection` instance for the database.
+
+ *columns* must a list of 3-tuples, one for each column that should be
+ stored. The first element of the tuple must contain the column name and the
+ second element the type of data stored in the column (`INTEGER`, `TIME`
+ or `BLOB`). Times will be converted to nanosecond integers.
+
+ For integers and seconds, the third tuple element specifies the expected
+ change of the values between rows. For blobs it can be either zero
+ (indicating variable length columns) or an integer specifying the length of
+ the column values in bytes.
+ '''
+
+ return _dump_or_load(table, order, columns, db, fh)
+
+def load_table(table, columns, db, fh):
+ '''Load *columns* of *table* from *fh*
+
+ Parameters are described in the docstring of the `dump_table` function.
+ '''
+
+ return _dump_or_load(table, None, columns, db, fh)
+
+
+def _dump_or_load(table, order, columns, db, fh):
+ '''Dump or load *columns* of *table*
+
+ If *order* is None, load data from *fh* into *db*.
+
+ If *order* is not None, data will be read from *db* and written
+ into *fh*. In this case, *order* specifies the order in which
+ the rows are written and must be a string that can be inserted
+ after the "ORDER BY" clause in an SQL SELECT statement.
+
+ *db* is an `s3ql.Connection` instance for the database.
+
+ *columns* must a list of 3-tuples, one for each column that should be stored
+ or retrieved. The first element of the tuple must contain the column name
+ and the second element the type of data stored in the column (`INTEGER`,
+ `TIME` or `BLOB`). Times will be converted to nanosecond integers.
+
+ For integers and times, the third tuple element specifies the expected
+ change of the values between rows. For blobs it can be either zero
+ (indicating variable length columns) or an integer specifying the length of
+ the column values in bytes.
+ '''
+
+ cdef sqlite3 *sqlite3_db
+ cdef sqlite3_stmt *stmt
+ cdef int *col_types, *col_args, col_count, rc, fd
+ cdef int64_t *int64_prev
+ cdef FILE *fp
+ cdef void *buf
+ cdef int64_t row_count
+
+ sqlite3_db = <sqlite3*> PyLong_AsVoidPtr(db.conn.sqlite3pointer())
+
+ with CleanupManager(log) as cleanup:
+ fd = dup(fh.fileno())
+ if fd == -1:
+ raise OSError(errno, strerror(errno))
+ fp = fdopen(fd, 'r+b')
+ if fp == NULL:
+ raise OSError(errno, strerror(errno))
+ cleanup.register(lambda: fclose(fp))
+
+ # Allocate col_args and col_types
+ col_count = len(columns)
+ col_types = <int*> calloc(col_count, sizeof(int))
+ cleanup.register(lambda: free(col_types))
+ col_args = <int*> calloc(col_count, sizeof(int))
+ cleanup.register(lambda: free(col_args))
+
+ # Initialize col_args and col_types
+ for i in range(col_count):
+ if columns[i][1] not in (BLOB, INTEGER, TIME):
+ raise ValueError("Invalid type for column %d" % i)
+ col_types[i] = columns[i][1]
+
+ if len(columns[i]) == 3:
+ col_args[i] = columns[i][2]
+ else:
+ col_args[i] = 0
+
+ # Allocate int64_prev
+ int64_prev = <int64_t*> calloc(len(columns), sizeof(int64_t))
+ cleanup.register(lambda: free(int64_prev))
+
+ # Prepare statement
+ col_names = [ x[0] for x in columns ]
+ if order is None:
+ query = ("INSERT INTO %s (%s) VALUES(%s)"
+ % (table,
+ ', '.join(col_names),
+ ', '.join('?' * col_count)))
+ else:
+ query = ("SELECT %s FROM %s ORDER BY %s " %
+ (', '.join(col_names), table, order))
+ rc = sqlite3_prepare_v2(sqlite3_db, query, -1, &stmt, NULL)
+ if rc != SQLITE_OK:
+ raise apsw.exceptionfor(rc)
+ cleanup.register(lambda: sqlite3_finalize_p(stmt))
+
+ # Dump or load data as requested
+ if order is None:
+ buf = calloc(MAX_BLOB_SIZE, 1)
+ cleanup.register(lambda: free(buf))
+ read_integer(&row_count, fp)
+ log.debug('_dump_or_load(%s): reading %d rows', table, row_count)
+ _load_table(col_types, col_args, int64_prev, col_count,
+ row_count, stmt, fp, buf)
+ else:
+ row_count = db.get_val("SELECT COUNT(rowid) FROM %s" % table)
+ log.debug('_dump_or_load(%s): writing %d rows', table, row_count)
+ write_integer(row_count, fp)
+ _dump_table(col_types, col_args, int64_prev, col_count, stmt, fp)
+
+
+cdef _dump_table(int* col_types, int* col_args, int64_t* int64_prev,
+ int col_count, sqlite3_stmt* stmt, FILE* fp):
+
+ cdef const_void *buf
+ cdef int rc, i, len_
+ cdef int64_t int64, tmp
+
+ # Iterate through rows
+ while True:
+ rc = sqlite3_step(stmt)
+ if rc == SQLITE_DONE:
+ break
+ elif rc != SQLITE_ROW:
+ raise apsw.exceptionfor(rc)
+
+ for i in range(col_count):
+ if sqlite3_column_type(stmt, i) is SQLITE_NULL:
+ raise ValueError("Can't dump NULL values")
+
+ if col_types[i] == _INTEGER:
+ int64 = sqlite3_column_int64(stmt, i)
+ tmp = int64
+ int64 -= int64_prev[i] + col_args[i]
+ int64_prev[i] = tmp
+ write_integer(int64, fp)
+
+ elif col_types[i] == _TIME:
+ int64 = <int64_t> (sqlite3_column_double(stmt, i) * time_scale)
+ tmp = int64
+ int64 -= int64_prev[i] + col_args[i]
+ int64_prev[i] = tmp
+ write_integer(int64, fp)
+
+ elif col_types[i] == _BLOB:
+ buf = sqlite3_column_blob(stmt, i)
+ len_ = sqlite3_column_bytes(stmt, i)
+ if len_ > MAX_BLOB_SIZE:
+ raise ValueError('Can not dump BLOB of size %d (max: %d)',
+ len_, MAX_BLOB_SIZE)
+ if col_args[i] == 0:
+ write_integer(len_ - int64_prev[i], fp)
+ int64_prev[i] = len_
+ elif len_ != col_args[i]:
+ raise ValueError("Length %d != %d in column %d" % (len_, col_args[i], i))
+
+ if len_ != 0:
+ fwrite(buf, len_, fp)
+
+cdef _load_table(int* col_types, int* col_args, int64_t* int64_prev,
+ int col_count, int row_count, sqlite3_stmt* stmt,
+ FILE* fp, void* buf):
+
+ cdef int64_t int64
+ cdef int rc, len_, i, j
+
+ # Iterate through rows
+ for i in range(row_count):
+ for j in range(col_count):
+ if col_types[j] == _INTEGER:
+ read_integer(&int64, fp)
+ int64 += col_args[j] + int64_prev[j]
+ int64_prev[j] = int64
+ rc = sqlite3_bind_int64(stmt, j+1, int64)
+ if rc != SQLITE_OK:
+ raise apsw.exceptionfor(rc)
+
+ if col_types[j] == _TIME:
+ read_integer(&int64, fp)
+ int64 += col_args[j] + int64_prev[j]
+ int64_prev[j] = int64
+ rc = sqlite3_bind_double(stmt, j+1, int64 / time_scale)
+ if rc != SQLITE_OK:
+ raise apsw.exceptionfor(rc)
+
+ elif col_types[j] == _BLOB:
+ if col_args[j] == 0:
+ read_integer(&int64, fp)
+ len_ = int64_prev[j] + int64
+ int64_prev[j] = len_
+ else:
+ len_ = col_args[j]
+
+ if len_ > MAX_BLOB_SIZE:
+ raise RuntimeError('BLOB too large to read (%d vs %d)', len_, MAX_BLOB_SIZE)
+
+ if len_ != 0:
+ fread(buf, len_, fp)
+
+ rc = sqlite3_bind_blob(stmt, j+1, buf, len_, SQLITE_TRANSIENT)
+ if rc != SQLITE_OK:
+ raise apsw.exceptionfor(rc)
+
+ rc = sqlite3_step(stmt)
+ if rc != SQLITE_DONE:
+ raise apsw.exceptionfor(rc)
+
+ rc = sqlite3_reset(stmt)
+ if rc != SQLITE_OK:
+ raise apsw.exceptionfor(rc)
+
+cdef inline int write_integer(int64_t int64, FILE* fp) except -1:
+ '''Write *int64* into *fp*, using as little space as possible'''
+
+ cdef uint8_t int8
+ cdef size_t len_
+ cdef uint64_t uint64
+
+ if int64 < 0:
+ uint64 = <uint64_t> -int64
+ int8 = <uint8_t> 0x80 # Highest bit set
+ else:
+ uint64 = <uint64_t> int64
+ int8 = 0
+
+ if uint64 < 0x80 and uint64 not in (INT8, INT16, INT32, INT64):
+ len_ = 0
+ int8 += <uint8_t> uint64
+ elif uint64 < UINT8_MAX:
+ len_ = 1
+ int8 += INT8
+ elif uint64 < UINT16_MAX:
+ len_ = 2
+ int8 += INT16
+ elif uint64 < UINT32_MAX:
+ len_ = 4
+ int8 += INT32
+ else:
+ len_ = 8
+ int8 += INT64
+
+ fwrite(&int8, 1, fp)
+ if len_ != 0:
+ uint64 = htole64(uint64)
+ fwrite(&uint64, len_, fp)
+
+cdef inline int read_integer(int64_t* out, FILE* fp) except -1:
+ '''Read integer written using `write_integer` from *fp*'''
+
+ cdef uint8_t int8
+ cdef size_t len_
+ cdef uint64_t uint64
+ cdef char negative
+
+ fread(&int8, 1, fp)
+
+ if int8 & 0x80 != 0:
+ negative = 1
+ int8 = int8 & (~ 0x80)
+ else:
+ negative = 0
+
+ if int8 == INT8:
+ len_ = 1
+ elif int8 == INT16:
+ len_ = 2
+ elif int8 == INT32:
+ len_ = 4
+ elif int8 == INT64:
+ len_ = 8
+ else:
+ len_ = 0
+ uint64 = int8
+
+ if len_ != 0:
+ uint64 = 0
+ fread(&uint64, len_, fp)
+ uint64 = le64toh(uint64)
+
+ if negative == 1:
+ out[0] = - <int64_t> uint64
+ else:
+ out[0] = <int64_t> uint64
diff --git a/src/s3ql/adm.py b/src/s3ql/adm.py
new file mode 100644
index 0000000..56a13cd
--- /dev/null
+++ b/src/s3ql/adm.py
@@ -0,0 +1,507 @@
+'''
+adm.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+from __future__ import division, print_function, absolute_import
+from . import CURRENT_FS_REV, REV_VER_MAP
+from .backends.common import BetterBucket, get_bucket
+from .common import (QuietError, BUFSIZE, setup_logging, get_bucket_cachedir,
+ get_seq_no, stream_write_bz2, CTRL_INODE)
+from .database import Connection, NoSuchRowError
+from .fsck import Fsck
+from .metadata import (restore_metadata, cycle_metadata, dump_metadata,
+ create_tables)
+from .parse_args import ArgumentParser
+from datetime import datetime as Datetime
+from getpass import getpass
+from llfuse import ROOT_INODE
+from s3ql.backends.common import NoSuchBucket
+import cPickle as pickle
+import logging
+import lzma
+import os
+import shutil
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+
+log = logging.getLogger("adm")
+
+def parse_args(args):
+ '''Parse command line'''
+
+ parser = ArgumentParser(
+ description="Manage S3QL Buckets.",
+ epilog=textwrap.dedent('''\
+ Hint: run `%(prog)s <action> --help` to get help on the additional
+ arguments that the different actions take.'''))
+
+ pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\
+ Hint: run `%(prog)s --help` to get help on other available actions and
+ optional arguments that can be used with all actions.'''))
+ pparser.add_storage_url()
+
+ subparsers = parser.add_subparsers(metavar='<action>', dest='action',
+ help='may be either of')
+ subparsers.add_parser("passphrase", help="change bucket passphrase",
+ parents=[pparser])
+ subparsers.add_parser("upgrade", help="upgrade file system to newest revision",
+ parents=[pparser])
+ subparsers.add_parser("clear", help="delete all S3QL data from the bucket",
+ parents=[pparser])
+ subparsers.add_parser("download-metadata",
+ help="Interactively download metadata backups. "
+ "Use only if you know what you are doing.",
+ parents=[pparser])
+
+ parser.add_debug_modules()
+ parser.add_quiet()
+ parser.add_log()
+ parser.add_authfile()
+ parser.add_cachedir()
+ parser.add_version()
+
+ options = parser.parse_args(args)
+
+ return options
+
+def main(args=None):
+ '''Change or show S3QL file system parameters'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ # Check if fs is mounted on this computer
+ # This is not foolproof but should prevent common mistakes
+ match = options.storage_url + ' /'
+ with open('/proc/mounts', 'r') as fh:
+ for line in fh:
+ if line.startswith(match):
+ raise QuietError('Can not work on mounted file system.')
+
+ if options.action == 'clear':
+ try:
+ bucket = get_bucket(options, plain=True)
+ except NoSuchBucket as exc:
+ raise QuietError(str(exc))
+ return clear(bucket,
+ get_bucket_cachedir(options.storage_url, options.cachedir))
+
+ try:
+ bucket = get_bucket(options)
+ except NoSuchBucket as exc:
+ raise QuietError(str(exc))
+
+ if options.action == 'upgrade':
+ return upgrade(bucket, get_bucket_cachedir(options.storage_url,
+ options.cachedir))
+
+ if options.action == 'passphrase':
+ return change_passphrase(bucket)
+
+ if options.action == 'download-metadata':
+ return download_metadata(bucket, options.storage_url)
+
+
+def download_metadata(bucket, storage_url):
+ '''Download old metadata backups'''
+
+ backups = sorted(bucket.list('s3ql_metadata_bak_'))
+
+ if not backups:
+ raise QuietError('No metadata backups found.')
+
+ log.info('The following backups are available:')
+ log.info('%3s %-23s %-15s', 'No', 'Name', 'Date')
+ for (i, name) in enumerate(backups):
+ params = bucket.lookup(name)
+ if 'last-modified' in params:
+ date = Datetime.fromtimestamp(params['last-modified']).strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ # (metadata might from an older fs revision)
+ date = '(unknown)'
+
+ log.info('%3d %-23s %-15s', i, name, date)
+
+ name = None
+ while name is None:
+ buf = raw_input('Enter no to download: ')
+ try:
+ name = backups[int(buf.strip())]
+ except:
+ log.warn('Invalid input')
+
+ log.info('Downloading %s...', name)
+
+ cachepath = get_bucket_cachedir(storage_url, '.')
+ for i in ('.db', '.params'):
+ if os.path.exists(cachepath + i):
+ raise QuietError('%s already exists, aborting.' % cachepath+i)
+
+ param = bucket.lookup(name)
+ try:
+ log.info('Reading metadata...')
+ def do_read(fh):
+ os.close(os.open(cachepath + '.db', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR))
+ db = Connection(cachepath + '.db', fast_mode=True)
+ try:
+ restore_metadata(fh, db)
+ finally:
+ db.close()
+ bucket.perform_read(do_read, name)
+ except:
+ # Don't keep file if it doesn't contain anything sensible
+ os.unlink(cachepath + '.db')
+ raise
+
+ # Raise sequence number so that fsck.s3ql actually uses the
+ # downloaded backup
+ seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ]
+ param['seq_no'] = max(seq_nos) + 1
+ pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
+
+def change_passphrase(bucket):
+ '''Change bucket passphrase'''
+
+ if not isinstance(bucket, BetterBucket) and bucket.passphrase:
+ raise QuietError('Bucket is not encrypted.')
+
+ data_pw = bucket.passphrase
+
+ if sys.stdin.isatty():
+ wrap_pw = getpass("Enter new encryption password: ")
+ if not wrap_pw == getpass("Confirm new encryption password: "):
+ raise QuietError("Passwords don't match")
+ else:
+ wrap_pw = sys.stdin.readline().rstrip()
+
+ bucket.passphrase = wrap_pw
+ bucket['s3ql_passphrase'] = data_pw
+ bucket.passphrase = data_pw
+
+def clear(bucket, cachepath):
+ print('I am about to delete the S3QL file system in %s.' % bucket,
+ 'Please enter "yes" to continue.', '> ', sep='\n', end='')
+
+ if sys.stdin.readline().strip().lower() != 'yes':
+ raise QuietError()
+
+ log.info('Deleting...')
+
+ for suffix in ('.db', '.params'):
+ name = cachepath + suffix
+ if os.path.exists(name):
+ os.unlink(name)
+
+ name = cachepath + '-cache'
+ if os.path.exists(name):
+ shutil.rmtree(name)
+
+ bucket.clear()
+
+ print('File system deleted.')
+
+ if not bucket.is_get_consistent():
+ log.info('Note: it may take a while for the removals to propagate through the backend.')
+
+def get_old_rev_msg(rev, prog):
+ return textwrap.dedent('''\
+ The last S3QL version that supported this file system revision
+ was %(version)s. You can run this version's %(prog)s by executing:
+
+ $ wget http://s3ql.googlecode.com/files/s3ql-%(version)s.tar.bz2
+ $ tar xjf s3ql-%(version)s.tar.bz2
+ $ s3ql-%(version)s/bin/%(prog)s <options>
+ ''' % { 'version': REV_VER_MAP[rev],
+ 'prog': prog })
+
+def upgrade(bucket, cachepath):
+ '''Upgrade file system to newest revision'''
+
+ log.info('Getting file system parameters..')
+
+ seq_nos = list(bucket.list('s3ql_seq_no_'))
+ if (seq_nos[0].endswith('.meta')
+ or seq_nos[0].endswith('.dat')):
+ print(textwrap.dedent('''
+ File system revision too old to upgrade!
+
+ You need to use an older S3QL version to upgrade to a more recent
+ revision before you can use this version to upgrade to the newest
+ revision.
+ '''))
+ print(get_old_rev_msg(11+1, 's3qladm'))
+ raise QuietError()
+ seq_no = get_seq_no(bucket)
+
+ # Check for cached metadata
+ db = None
+ if os.path.exists(cachepath + '.params'):
+ param = pickle.load(open(cachepath + '.params', 'rb'))
+ if param['seq_no'] < seq_no:
+ log.info('Ignoring locally cached metadata (outdated).')
+ param = bucket.lookup('s3ql_metadata')
+ else:
+ log.info('Using cached metadata.')
+ db = Connection(cachepath + '.db')
+ else:
+ param = bucket.lookup('s3ql_metadata')
+
+ # Check for unclean shutdown
+ if param['seq_no'] < seq_no:
+ if bucket.is_get_consistent():
+ print(textwrap.fill(textwrap.dedent('''\
+ It appears that the file system is still mounted somewhere else. If this is not
+ the case, the file system may have not been unmounted cleanly and you should try
+ to run fsck on the computer where the file system has been mounted most recently.
+ ''')))
+ else:
+ print(textwrap.fill(textwrap.dedent('''\
+ It appears that the file system is still mounted somewhere else. If this is not the
+ case, the file system may have not been unmounted cleanly or the data from the
+ most-recent mount may have not yet propagated through the backend. In the later case,
+ waiting for a while should fix the problem, in the former case you should try to run
+ fsck on the computer where the file system has been mounted most recently.
+ ''')))
+
+ print(get_old_rev_msg(param['revision'], 'fsck.s3ql'))
+ raise QuietError()
+
+ # Check that the fs itself is clean
+ if param['needs_fsck']:
+ raise QuietError("File system damaged, run fsck first!")
+
+ # Check revision
+ if param['revision'] < CURRENT_FS_REV - 1:
+ print(textwrap.dedent('''
+ File system revision too old to upgrade!
+
+ You need to use an older S3QL version to upgrade to a more recent
+ revision before you can use this version to upgrade to the newest
+ revision.
+ '''))
+ print(get_old_rev_msg(param['revision']+1, 's3qladm'))
+ raise QuietError()
+
+ elif param['revision'] >= CURRENT_FS_REV:
+ print('File system already at most-recent revision')
+ return
+
+ print(textwrap.dedent('''
+ I am about to update the file system to the newest revision.
+ You will not be able to access the file system with any older version
+ of S3QL after this operation.
+
+ You should make very sure that this command is not interrupted and
+ that no one else tries to mount, fsck or upgrade the file system at
+ the same time.
+
+ '''))
+
+ print('Please enter "yes" to continue.', '> ', sep='\n', end='')
+
+ if sys.stdin.readline().strip().lower() != 'yes':
+ raise QuietError()
+
+ # Download metadata
+ if not db:
+ log.info("Downloading & uncompressing metadata...")
+ def do_read(fh):
+ os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR))
+ db = Connection(cachepath + '.db.tmp', fast_mode=True)
+ try:
+ restore_legacy_metadata(fh, db)
+ finally:
+ # If metata reading has to be retried, we don't want to hold
+ # a lock on the database.
+ db.close()
+ bucket.perform_read(do_read, "s3ql_metadata")
+ os.rename(cachepath + '.db.tmp', cachepath + '.db')
+ db = Connection(cachepath + '.db')
+
+ log.info('Upgrading from revision %d to %d...', param['revision'],
+ CURRENT_FS_REV)
+
+ db.execute("""
+ CREATE TABLE ext_attributes_new (
+ inode INTEGER NOT NULL REFERENCES inodes(id),
+ name_id INTEGER NOT NULL REFERENCES names(id),
+ value BLOB NOT NULL,
+
+ PRIMARY KEY (inode, name_id)
+ )""")
+ for (inode, name, val) in db.query('SELECT inode, name, value FROM ext_attributes'):
+ db.execute('INSERT INTO ext_attributes_new (inode, name_id, value) VALUES(?,?,?)',
+ (inode, _add_name(db, name), val))
+ db.execute('DROP TABLE ext_attributes')
+ db.execute('ALTER TABLE ext_attributes_new RENAME TO ext_attributes')
+ db.execute("""
+ CREATE VIEW ext_attributes_v AS
+ SELECT * FROM ext_attributes JOIN names ON names.id = name_id
+ """)
+
+ renumber_inodes(db)
+
+ # fsck required to make sure that dump will work
+ fsck = Fsck(cachepath + '-cache', bucket, param, db)
+ fsck.check()
+
+ if fsck.uncorrectable_errors:
+ raise QuietError("Uncorrectable errors found, aborting.")
+
+ param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes')
+ param['inode_gen'] = 1
+ param['revision'] = CURRENT_FS_REV
+ param['last-modified'] = time.time() - time.timezone
+
+ cycle_metadata(bucket)
+ log.info('Dumping metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(db, fh)
+ def do_write(obj_fh):
+ fh.seek(0)
+ stream_write_bz2(fh, obj_fh)
+ return obj_fh
+
+ log.info("Compressing and uploading metadata...")
+ bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')
+ obj_fh = bucket.perform_write(do_write, "s3ql_metadata", metadata=param,
+ is_compressed=True)
+ log.info('Wrote %.2f MB of compressed metadata.', obj_fh.get_obj_size() / 1024**2)
+ pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
+
+ db.execute('ANALYZE')
+ db.execute('VACUUM')
+
+def _add_name(db, name):
+ '''Get id for *name* and increase refcount
+
+ Name is inserted in table if it does not yet exist.
+ '''
+
+ try:
+ name_id = db.get_val('SELECT id FROM names WHERE name=?', (name,))
+ except NoSuchRowError:
+ name_id = db.rowid('INSERT INTO names (name, refcount) VALUES(?,?)',
+ (name, 1))
+ else:
+ db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,))
+
+ return name_id
+
+def renumber_inodes(db):
+ '''Renumber inodes'''
+
+ log.info('Renumbering inodes...')
+ for table in ('inodes', 'inode_blocks', 'symlink_targets',
+ 'contents', 'names', 'blocks', 'objects', 'ext_attributes'):
+ db.execute('ALTER TABLE %s RENAME TO %s_old' % (table, table))
+
+ for table in ('contents_v', 'ext_attributes_v'):
+ db.execute('DROP VIEW %s' % table)
+
+ create_tables(db)
+ for table in ('names', 'blocks', 'objects'):
+ db.execute('DROP TABLE %s' % table)
+ db.execute('ALTER TABLE %s_old RENAME TO %s' % (table, table))
+
+ log.info('..mapping..')
+ db.execute('CREATE TEMPORARY TABLE inode_map (rowid INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER UNIQUE)')
+ db.execute('INSERT INTO inode_map (rowid, id) VALUES(?,?)', (ROOT_INODE, ROOT_INODE))
+ db.execute('INSERT INTO inode_map (rowid, id) VALUES(?,?)', (CTRL_INODE, CTRL_INODE))
+ db.execute('INSERT INTO inode_map (id) SELECT id FROM inodes_old WHERE id > ? ORDER BY ctime ASC',
+ (CTRL_INODE,))
+
+ log.info('..inodes..')
+ db.execute('INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size,locked,rdev) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = inodes_old.id), '
+ ' mode,uid,gid,mtime,atime,ctime,refcount,size,locked,rdev FROM inodes_old')
+
+ log.info('..inode_blocks..')
+ db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = inode_blocks_old.inode), '
+ ' blockno, block_id FROM inode_blocks_old')
+
+ log.info('..contents..')
+ db.execute('INSERT INTO contents (inode, parent_inode, name_id) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = contents_old.inode), '
+ ' (SELECT rowid FROM inode_map WHERE inode_map.id = contents_old.parent_inode), '
+ ' name_id FROM contents_old')
+
+ log.info('..symlink_targets..')
+ db.execute('INSERT INTO symlink_targets (inode, target) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = symlink_targets_old.inode), '
+ ' target FROM symlink_targets_old')
+
+ log.info('..ext_attributes..')
+ db.execute('INSERT INTO ext_attributes (inode, name_id, value) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = ext_attributes_old.inode), '
+ ' name_id, value FROM ext_attributes_old')
+
+ for table in ('inodes', 'inode_blocks', 'symlink_targets',
+ 'contents', 'ext_attributes'):
+ db.execute('DROP TABLE %s_old' % table)
+
+ db.execute('DROP TABLE inode_map')
+
+def restore_legacy_metadata(ifh, conn):
+
+ # Note: unpickling is terribly slow if fh is not a real file object, so
+ # uncompressing to a temporary file also gives a performance boost
+ log.info('Downloading and decompressing metadata...')
+ tmp = tempfile.TemporaryFile()
+ decompressor = lzma.LZMADecompressor()
+ while True:
+ buf = ifh.read(BUFSIZE)
+ if not buf:
+ break
+ buf = decompressor.decompress(buf)
+ if buf:
+ tmp.write(buf)
+ del decompressor
+ tmp.seek(0)
+
+ log.info("Reading metadata...")
+ unpickler = pickle.Unpickler(tmp)
+ (to_dump, columns) = unpickler.load()
+ create_tables(conn)
+ conn.execute("""
+ DROP VIEW ext_attributes_v;
+ DROP TABLE ext_attributes;
+ CREATE TABLE ext_attributes (
+ inode INTEGER NOT NULL REFERENCES inodes(id),
+ name BLUB NOT NULL,
+ value BLOB NOT NULL,
+
+ PRIMARY KEY (inode, name)
+ )""")
+
+ for (table, _) in to_dump:
+ log.info('..%s..', table)
+ col_str = ', '.join(columns[table])
+ val_str = ', '.join('?' for _ in columns[table])
+ sql_str = 'INSERT INTO %s (%s) VALUES(%s)' % (table, col_str, val_str)
+ while True:
+ buf = unpickler.load()
+ if not buf:
+ break
+ for row in buf:
+ conn.execute(sql_str, row)
+
+ tmp.close()
+ conn.execute('ANALYZE')
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
+
diff --git a/src/s3ql/backends/common.py b/src/s3ql/backends/common.py
index e7ed5fc..72a58a9 100644
--- a/src/s3ql/backends/common.py
+++ b/src/s3ql/backends/common.py
@@ -246,6 +246,11 @@ class AbstractBucket(object):
pass
@abstractmethod
+ def get_size(self, key):
+ '''Return size of object stored under *key*'''
+ pass
+
+ @abstractmethod
def open_read(self, key):
"""Open object for reading
@@ -371,6 +376,15 @@ class BetterBucket(AbstractBucket):
convert_legacy_metadata(metadata)
return self._unwrap_meta(metadata)
+ def get_size(self, key):
+ '''Return size of object stored under *key*
+
+ This method returns the compressed size, i.e. the storage space
+ that's actually occupied by the object.
+ '''
+
+ return self.bucket.get_size(key)
+
def is_temp_failure(self, exc):
'''Return true if exc indicates a temporary error
diff --git a/src/s3ql/backends/local.py b/src/s3ql/backends/local.py
index 46aabad..443af9b 100644
--- a/src/s3ql/backends/local.py
+++ b/src/s3ql/backends/local.py
@@ -66,6 +66,11 @@ class Bucket(AbstractBucket):
raise ChecksumError('Invalid metadata')
raise
+ def get_size(self, key):
+ '''Return size of object stored under *key*'''
+
+ return os.path.getsize(self._key_to_path(key))
+
def open_read(self, key):
"""Open object for reading
diff --git a/src/s3ql/backends/s3.py b/src/s3ql/backends/s3.py
index ae1c306..70d417d 100644
--- a/src/s3ql/backends/s3.py
+++ b/src/s3ql/backends/s3.py
@@ -242,6 +242,27 @@ class Bucket(AbstractBucket):
return extractmeta(resp)
@retry
+ def get_size(self, key):
+ '''Return size of object stored under *key*'''
+
+ log.debug('get_size(%s)', key)
+
+ try:
+ resp = self._do_request('HEAD', '/%s%s' % (self.prefix, key))
+ assert resp.length == 0
+ except HTTPError as exc:
+ if exc.status == 404:
+ raise NoSuchObject(key)
+ else:
+ raise
+
+ for (name, val) in resp.getheaders():
+ if name.lower() == 'content-length':
+ return int(val)
+ raise RuntimeError('HEAD request did not return Content-Length')
+
+
+ @retry
def open_read(self, key):
''''Open object for reading
@@ -403,7 +424,9 @@ class Bucket(AbstractBucket):
False.
"""
- for (no, s3key) in enumerate(self):
+ # We have to cache keys, because otherwise we can't use the
+ # http connection to delete keys.
+ for (no, s3key) in enumerate(list(self)):
if no != 0 and no % 1000 == 0:
log.info('clear(): deleted %d objects so far..', no)
diff --git a/src/s3ql/block_cache.py b/src/s3ql/block_cache.py
index 86658f2..380c9e2 100644
--- a/src/s3ql/block_cache.py
+++ b/src/s3ql/block_cache.py
@@ -374,6 +374,13 @@ class BlockCache(object):
'VALUES(?,?,?,?)', (1, obj_id, hash_, el.size))
log.debug('upload(%s): created new block %d', el, block_id)
log.debug('upload(%s): adding to upload queue', el)
+
+ # Note: we must finish all db transactions before adding to
+ # in_transit, otherwise commit() may return before all blocks
+ # are available in db.
+ self.db.execute('INSERT OR REPLACE INTO inode_blocks (block_id, inode, blockno) '
+ 'VALUES(?,?,?)', (block_id, el.inode, el.blockno))
+
self.in_transit.add(obj_id)
with lock_released:
if not self.upload_threads:
@@ -393,15 +400,14 @@ class BlockCache(object):
log.debug('upload(%s): (re)linking to %d', el, block_id)
self.db.execute('UPDATE blocks SET refcount=refcount+1 WHERE id=?',
(block_id,))
+ self.db.execute('INSERT OR REPLACE INTO inode_blocks (block_id, inode, blockno) '
+ 'VALUES(?,?,?)', (block_id, el.inode, el.blockno))
el.dirty = False
self.in_transit.remove((el.inode, el.blockno))
except:
self.in_transit.remove((el.inode, el.blockno))
raise
-
- self.db.execute('INSERT OR REPLACE INTO inode_blocks (block_id, inode, blockno) '
- 'VALUES(?,?,?)', (block_id, el.inode, el.blockno))
-
+
# Check if we have to remove an old block
if not old_block_id:
log.debug('upload(%s): no old block, returning', el)
diff --git a/src/s3ql/cleanup_manager.py b/src/s3ql/cleanup_manager.py
new file mode 100644
index 0000000..e4beee0
--- /dev/null
+++ b/src/s3ql/cleanup_manager.py
@@ -0,0 +1,40 @@
+'''
+block_cache.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2011 Nikolaus Rath <Nikolaus@rath.org>
+Copyright (C) 2011 Jan Kaliszewski <zuo@chopin.edu.pl>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+
+class CleanupManager(object):
+
+ def __init__(self, logger=None, initial_callbacks=()):
+ self.cleanup_callbacks = list(initial_callbacks)
+ self.logger = logger
+
+ def register(self, callback, *args, **kwargs):
+ self.cleanup_callbacks.append((callback, args, kwargs))
+
+ def unregister(self, callback, *args, **kwargs):
+ self.cleanup_callbacks.remove((callback, args, kwargs))
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc, tb):
+ self._next_callback()
+
+ def _next_callback(self):
+ if self.cleanup_callbacks:
+ callback, args, kwargs = self.cleanup_callbacks.pop()
+ try:
+ callback(*args, **kwargs)
+ except:
+ if self.logger:
+ self.logger.exception('Exception during cleanup:')
+ finally:
+ # all cleanup callbacks to be used
+ # Py3.x: all errors to be reported
+ self._next_callback()
diff --git a/src/s3ql/cli/__init__.py b/src/s3ql/cli/__init__.py
deleted file mode 100644
index f716dc8..0000000
--- a/src/s3ql/cli/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-'''
-__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
-
-Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
-
-This program can be distributed under the terms of the GNU GPLv3.
-'''
-
-from __future__ import division, print_function, absolute_import
-
-__all__ = ['adm', 'cp', 'ctrl', 'fsck', 'lock', 'mkfs',
- 'mount', 'remove', 'statfs', 'umount' ]
diff --git a/src/s3ql/cli/adm.py b/src/s3ql/cli/adm.py
deleted file mode 100644
index a8fdd3e..0000000
--- a/src/s3ql/cli/adm.py
+++ /dev/null
@@ -1,945 +0,0 @@
-'''
-adm.py - this file is part of S3QL (http://s3ql.googlecode.com)
-
-Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
-
-This program can be distributed under the terms of the GNU GPLv3.
-'''
-
-from __future__ import division, print_function, absolute_import
-from Queue import Queue
-from datetime import datetime as Datetime
-from getpass import getpass
-from s3ql import CURRENT_FS_REV
-from s3ql.backends.common import (BetterBucket, get_bucket, NoSuchBucket,
- ChecksumError, AbstractBucket, NoSuchObject)
-from s3ql.backends.local import Bucket as LocalBucket, ObjectR, unescape, escape
-from s3ql.common import (QuietError, restore_metadata, cycle_metadata, BUFSIZE,
- dump_metadata, create_tables, setup_logging, get_bucket_cachedir)
-from s3ql.database import Connection
-from s3ql.fsck import Fsck
-from s3ql.parse_args import ArgumentParser
-from threading import Thread
-import ConfigParser
-import cPickle as pickle
-import errno
-import hashlib
-import logging
-import os
-import re
-import shutil
-import stat
-import sys
-import tempfile
-import textwrap
-import time
-
-log = logging.getLogger("adm")
-
-def parse_args(args):
- '''Parse command line'''
-
- parser = ArgumentParser(
- description="Manage S3QL Buckets.",
- epilog=textwrap.dedent('''\
- Hint: run `%(prog)s <action> --help` to get help on the additional
- arguments that the different actions take.'''))
-
- pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\
- Hint: run `%(prog)s --help` to get help on other available actions and
- optional arguments that can be used with all actions.'''))
- pparser.add_storage_url()
-
- subparsers = parser.add_subparsers(metavar='<action>', dest='action',
- help='may be either of')
- subparsers.add_parser("passphrase", help="change bucket passphrase",
- parents=[pparser])
- subparsers.add_parser("upgrade", help="upgrade file system to newest revision",
- parents=[pparser])
- subparsers.add_parser("clear", help="delete all S3QL data from the bucket",
- parents=[pparser])
- subparsers.add_parser("download-metadata",
- help="Interactively download metadata backups. "
- "Use only if you know what you are doing.",
- parents=[pparser])
-
- parser.add_debug_modules()
- parser.add_quiet()
- parser.add_log()
- parser.add_authfile()
- parser.add_cachedir()
- parser.add_version()
-
- options = parser.parse_args(args)
-
- return options
-
-def main(args=None):
- '''Change or show S3QL file system parameters'''
-
- if args is None:
- args = sys.argv[1:]
-
- options = parse_args(args)
- setup_logging(options)
-
- # Check if fs is mounted on this computer
- # This is not foolproof but should prevent common mistakes
- match = options.storage_url + ' /'
- with open('/proc/mounts', 'r') as fh:
- for line in fh:
- if line.startswith(match):
- raise QuietError('Can not work on mounted file system.')
-
- if options.action == 'clear':
- return clear(get_bucket(options, plain=True),
- get_bucket_cachedir(options.storage_url, options.cachedir))
-
- if options.action == 'upgrade':
- return upgrade(get_possibly_old_bucket(options),
- get_bucket_cachedir(options.storage_url, options.cachedir))
-
- bucket = get_bucket(options)
-
- if options.action == 'passphrase':
- return change_passphrase(bucket)
-
- if options.action == 'download-metadata':
- return download_metadata(bucket, options.storage_url)
-
-
-def download_metadata(bucket, storage_url):
- '''Download old metadata backups'''
-
- backups = sorted(bucket.list('s3ql_metadata_bak_'))
-
- if not backups:
- raise QuietError('No metadata backups found.')
-
- log.info('The following backups are available:')
- log.info('%3s %-23s %-15s', 'No', 'Name', 'Date')
- for (i, name) in enumerate(backups):
- params = bucket.lookup(name)
- if 'last-modified' in params:
- date = Datetime.fromtimestamp(params['last-modified']).strftime('%Y-%m-%d %H:%M:%S')
- else:
- # (metadata might from an older fs revision)
- date = '(unknown)'
-
- log.info('%3d %-23s %-15s', i, name, date)
-
- name = None
- while name is None:
- buf = raw_input('Enter no to download: ')
- try:
- name = backups[int(buf.strip())]
- except:
- log.warn('Invalid input')
-
- log.info('Downloading %s...', name)
-
- cachepath = get_bucket_cachedir(storage_url, '.')
- for i in ('.db', '.params'):
- if os.path.exists(cachepath + i):
- raise QuietError('%s already exists, aborting.' % cachepath+i)
-
- param = bucket.lookup(name)
- try:
- log.info('Reading metadata...')
- def do_read(fh):
- os.close(os.open(cachepath + '.db', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
- stat.S_IRUSR | stat.S_IWUSR))
- db = Connection(cachepath + '.db', fast_mode=True)
- try:
- restore_metadata(fh, db)
- finally:
- db.close()
- bucket.perform_read(do_read, name)
- except:
- # Don't keep file if it doesn't contain anything sensible
- os.unlink(cachepath + '.db')
- raise
-
- # Raise sequence number so that fsck.s3ql actually uses the
- # downloaded backup
- seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ]
- param['seq_no'] = max(seq_nos) + 1
- pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
-
-def change_passphrase(bucket):
- '''Change bucket passphrase'''
-
- if not isinstance(bucket, BetterBucket) and bucket.passphrase:
- raise QuietError('Bucket is not encrypted.')
-
- data_pw = bucket.passphrase
-
- if sys.stdin.isatty():
- wrap_pw = getpass("Enter new encryption password: ")
- if not wrap_pw == getpass("Confirm new encryption password: "):
- raise QuietError("Passwords don't match")
- else:
- wrap_pw = sys.stdin.readline().rstrip()
-
- bucket.passphrase = wrap_pw
- bucket['s3ql_passphrase'] = data_pw
- bucket.passphrase = data_pw
-
-def clear(bucket, cachepath):
- print('I am about to delete the S3QL file system in %s.' % bucket,
- 'Please enter "yes" to continue.', '> ', sep='\n', end='')
-
- if sys.stdin.readline().strip().lower() != 'yes':
- raise QuietError()
-
- log.info('Deleting...')
-
- for suffix in ('.db', '.params'):
- name = cachepath + suffix
- if os.path.exists(name):
- os.unlink(name)
-
- name = cachepath + '-cache'
- if os.path.exists(name):
- shutil.rmtree(name)
-
- bucket.clear()
-
- print('File system deleted.')
-
- if not bucket.is_get_consistent():
- log.info('Note: it may take a while for the removals to propagate through the backend.')
-
-
-def get_possibly_old_bucket(options, plain=False):
- '''Return factory producing bucket objects for given storage-url
-
- If *plain* is true, don't attempt to unlock and don't wrap into
- BetterBucket.
- '''
-
- hit = re.match(r'^([a-zA-Z0-9]+)://(.+)$', options.storage_url)
- if not hit:
- raise QuietError('Unknown storage url: %s' % options.storage_url)
-
- backend_name = 's3ql.backends.%s' % hit.group(1)
- bucket_name = hit.group(2)
- try:
- __import__(backend_name)
- except ImportError:
- raise QuietError('No such backend: %s' % hit.group(1))
-
- bucket_class = getattr(sys.modules[backend_name], 'Bucket')
-
- # Read authfile
- config = ConfigParser.SafeConfigParser()
- if os.path.isfile(options.authfile):
- mode = os.stat(options.authfile).st_mode
- if mode & (stat.S_IRGRP | stat.S_IROTH):
- raise QuietError("%s has insecure permissions, aborting." % options.authfile)
- config.read(options.authfile)
-
- backend_login = None
- backend_pw = None
- bucket_passphrase = None
- for section in config.sections():
- def getopt(name):
- try:
- return config.get(section, name)
- except ConfigParser.NoOptionError:
- return None
-
- pattern = getopt('storage-url')
-
- if not pattern or not options.storage_url.startswith(pattern):
- continue
-
- backend_login = backend_login or getopt('backend-login')
- backend_pw = backend_pw or getopt('backend-password')
- bucket_passphrase = bucket_passphrase or getopt('bucket-passphrase')
-
- if not backend_login and bucket_class.needs_login:
- if sys.stdin.isatty():
- backend_login = getpass("Enter backend login: ")
- else:
- backend_login = sys.stdin.readline().rstrip()
-
- if not backend_pw and bucket_class.needs_login:
- if sys.stdin.isatty():
- backend_pw = getpass("Enter backend password: ")
- else:
- backend_pw = sys.stdin.readline().rstrip()
-
- bucket = bucket_class(bucket_name, backend_login, backend_pw)
- if bucket_class == LocalBucket and 's3ql_metadata.dat' in bucket:
- bucket_class = LegacyLocalBucket
-
- if plain:
- return lambda: bucket_class(bucket_name, backend_login, backend_pw)
-
- bucket = bucket_class(bucket_name, backend_login, backend_pw)
-
- try:
- encrypted = 's3ql_passphrase' in bucket
- except NoSuchBucket:
- raise QuietError('Bucket %d does not exist' % bucket_name)
-
- if encrypted and not bucket_passphrase:
- if sys.stdin.isatty():
- bucket_passphrase = getpass("Enter bucket encryption passphrase: ")
- else:
- bucket_passphrase = sys.stdin.readline().rstrip()
- elif not encrypted:
- bucket_passphrase = None
-
- if hasattr(options, 'compress'):
- compress = options.compress
- else:
- compress = 'zlib'
-
- if not encrypted:
- return lambda: BetterBucket(None, compress,
- bucket_class(bucket_name, backend_login, backend_pw))
-
- tmp_bucket = BetterBucket(bucket_passphrase, compress, bucket)
-
- try:
- data_pw = tmp_bucket['s3ql_passphrase']
- except ChecksumError:
- raise QuietError('Wrong bucket passphrase')
-
- return lambda: BetterBucket(data_pw, compress,
- bucket_class(bucket_name, backend_login, backend_pw))
-
-def upgrade(bucket_factory, cachepath):
- '''Upgrade file system to newest revision'''
-
- bucket = bucket_factory()
-
- log.info('Getting file system parameters..')
- seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ]
- seq_no = max(seq_nos)
- if not seq_nos:
- raise QuietError(textwrap.dedent('''
- File system revision too old to upgrade!
-
- You need to use an older S3QL version to upgrade to a more recent
- revision before you can use this version to upgrade to the newest
- revision.
- '''))
-
- # Check for cached metadata
- db = None
- if os.path.exists(cachepath + '.params'):
- param = pickle.load(open(cachepath + '.params', 'rb'))
- if param['seq_no'] < seq_no:
- log.info('Ignoring locally cached metadata (outdated).')
- param = bucket.lookup('s3ql_metadata')
- else:
- log.info('Using cached metadata.')
- db = Connection(cachepath + '.db')
- else:
- param = bucket.lookup('s3ql_metadata')
-
- # Check for unclean shutdown
- if param['seq_no'] < seq_no:
- if bucket.is_get_consistent():
- raise QuietError(textwrap.fill(textwrap.dedent('''\
- It appears that the file system is still mounted somewhere else. If this is not
- the case, the file system may have not been unmounted cleanly and you should try
- to run fsck on the computer where the file system has been mounted most recently.
- ''')))
- else:
- raise QuietError(textwrap.fill(textwrap.dedent('''\
- It appears that the file system is still mounted somewhere else. If this is not the
- case, the file system may have not been unmounted cleanly or the data from the
- most-recent mount may have not yet propagated through the backend. In the later case,
- waiting for a while should fix the problem, in the former case you should try to run
- fsck on the computer where the file system has been mounted most recently.
- ''')))
-
- # Check that the fs itself is clean
- if param['needs_fsck']:
- raise QuietError("File system damaged, run fsck!")
-
- # Check revision
- if param['revision'] < CURRENT_FS_REV - 2:
- raise QuietError(textwrap.dedent('''
- File system revision too old to upgrade!
-
- You need to use an older S3QL version to upgrade to a more recent
- revision before you can use this version to upgrade to the newest
- revision.
- '''))
-
- elif param['revision'] >= CURRENT_FS_REV:
- print('File system already at most-recent revision')
- return
-
- print(textwrap.dedent('''
- I am about to update the file system to the newest revision.
- You will not be able to access the file system with any older version
- of S3QL after this operation.
-
- You should make very sure that this command is not interrupted and
- that no one else tries to mount, fsck or upgrade the file system at
- the same time.
-
- When using the local backend, metadata and data of each stored
- object will be merged into one file. This requires every object
- to be rewritten and may thus take some time.
- '''))
-
- print('Please enter "yes" to continue.', '> ', sep='\n', end='')
-
- if sys.stdin.readline().strip().lower() != 'yes':
- raise QuietError()
-
- log.info('Upgrading from revision %d to %d...', param['revision'],
- CURRENT_FS_REV)
-
- if param['revision'] == CURRENT_FS_REV - 1:
- upgrade_once(bucket, cachepath, db, param)
- else:
- upgrade_twice(bucket, cachepath, db, param, bucket_factory)
-
-def restore_legacy_metadata2(ifh, conn):
-
- # Unpickling is terribly slow if fh is not a real file object.
- if not hasattr(ifh, 'fileno'):
- with tempfile.TemporaryFile() as tmp:
- shutil.copyfileobj(ifh, tmp, BUFSIZE)
- tmp.seek(0)
- return restore_legacy_metadata2(tmp, conn)
-
- unpickler = pickle.Unpickler(ifh)
- (to_dump, columns) = unpickler.load()
- create_tables(conn)
- conn.execute('DROP TABLE inodes')
- conn.execute("""
- CREATE TABLE inodes (
- id INTEGER PRIMARY KEY,
- uid INT NOT NULL,
- gid INT NOT NULL,
- mode INT NOT NULL,
- mtime REAL NOT NULL,
- atime REAL NOT NULL,
- ctime REAL NOT NULL,
- refcount INT NOT NULL,
- size INT NOT NULL DEFAULT 0,
- rdev INT NOT NULL DEFAULT 0,
- locked BOOLEAN NOT NULL DEFAULT 0,
- block_id INT REFERENCES blocks(id)
- )""")
-
- for (table, _) in to_dump:
- log.info('Loading %s', table)
- if table == 'objects':
- columns[table][columns[table].index('compr_size')] = 'size'
- col_str = ', '.join(columns[table])
- val_str = ', '.join('?' for _ in columns[table])
- sql_str = 'INSERT INTO %s (%s) VALUES(%s)' % (table, col_str, val_str)
- while True:
- buf = unpickler.load()
- if not buf:
- break
- for row in buf:
- conn.execute(sql_str, row)
-
-def upgrade_once(bucket, cachepath, db, param):
-
- # Download metadata
- if not db:
- log.info("Downloading & uncompressing metadata...")
- def do_read(fh):
- os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
- stat.S_IRUSR | stat.S_IWUSR))
- db = Connection(cachepath + '.db.tmp', fast_mode=True)
- try:
- restore_legacy_metadata2(fh, db)
- finally:
- # If metata reading has to be retried, we don't want to hold
- # a lock on the database.
- db.close()
- bucket.perform_read(do_read, "s3ql_metadata")
- os.rename(cachepath + '.db.tmp', cachepath + '.db')
- db = Connection(cachepath + '.db')
- else:
- db.execute('ALTER TABLE objects RENAME TO leg_objects')
- db.execute("""
- CREATE TABLE objects (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- refcount INT NOT NULL,
- size INT
- )""")
- db.execute('INSERT INTO objects (id, refcount, size) '
- 'SELECT id, refcount, compr_size FROM leg_objects')
- db.execute('DROP TABLE leg_objects')
-
- db.execute('ALTER TABLE inodes RENAME TO leg_inodes')
- db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) '
- 'SELECT id, 0, block_id FROM leg_inodes WHERE block_id IS NOT NULL')
- db.execute("""
- CREATE TABLE inodes (
- id INTEGER PRIMARY KEY,
- uid INT NOT NULL,
- gid INT NOT NULL,
- mode INT NOT NULL,
- mtime REAL NOT NULL,
- atime REAL NOT NULL,
- ctime REAL NOT NULL,
- refcount INT NOT NULL,
- size INT NOT NULL DEFAULT 0,
- rdev INT NOT NULL DEFAULT 0,
- locked BOOLEAN NOT NULL DEFAULT 0
- )""")
- db.execute('insert into inodes (id,uid,gid,mode,mtime,atime,ctime,refcount,size,rdev,locked) '
- 'select id,uid,gid,mode,mtime,atime,ctime,refcount,size,rdev,locked '
- 'FROM leg_inodes')
- db.execute('DROP TABLE leg_inodes')
-
- param['seq_no'] += 1
- bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
- param['revision'] = CURRENT_FS_REV
- param['last-modified'] = time.time() - time.timezone
- pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
- cycle_metadata(bucket)
- bucket.perform_write(lambda fh: dump_metadata(fh, db) , "s3ql_metadata",
- metadata=param, is_compressed=True)
-
- db.execute('ANALYZE')
- db.execute('VACUUM')
-
-
-def upgrade_twice(bucket, cachepath, db, param, bucket_factory):
-
- if 's3ql_hash_check_status' not in bucket:
- if (isinstance(bucket, LegacyLocalBucket) or
- (isinstance(bucket, BetterBucket) and
- isinstance(bucket.bucket, LegacyLocalBucket))):
- log.info('Merging metadata into datafiles...')
- if isinstance(bucket, LegacyLocalBucket):
- bucketpath = bucket.name
- else:
- bucketpath = bucket.bucket.name
- i = 0
- for (path, _, filenames) in os.walk(bucketpath, topdown=True):
- for name in filenames:
- if not name.endswith('.meta'):
- continue
-
- basename = os.path.splitext(name)[0]
- if '=00' in basename:
- raise RuntimeError("No, seriously, you tried to break things, didn't you?")
-
- with open(os.path.join(path, name), 'r+b') as dst:
- dst.seek(0, os.SEEK_END)
- with open(os.path.join(path, basename + '.dat'), 'rb') as src:
- shutil.copyfileobj(src, dst, BUFSIZE)
-
- basename = basename.replace('#', '=23')
- os.rename(os.path.join(path, name),
- os.path.join(path, basename))
- os.unlink(os.path.join(path, basename + '.dat'))
-
- i += 1
- if i % 100 == 0:
- log.info('..processed %d objects so far..', i)
-
- print("Merging complete. Please restart s3qladm upgrade to complete the upgrade.")
- return
-
- # Download metadata
- if not db:
- log.info("Downloading & uncompressing metadata...")
- with tempfile.TemporaryFile() as tmp:
- def do_read(fh):
- tmp.seek(0)
- tmp.truncate()
- shutil.copyfileobj(fh, tmp, BUFSIZE)
- bucket.perform_read(do_read, "s3ql_metadata")
- os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
- stat.S_IRUSR | stat.S_IWUSR))
- db = Connection(cachepath + '.db.tmp', fast_mode=True)
- tmp.seek(0)
- restore_legacy_metadata(tmp, db)
- db.close()
- os.rename(cachepath + '.db.tmp', cachepath + '.db')
- db = Connection(cachepath + '.db')
-
- log.info('Upgrading metadata..')
- upgrade_metadata(db)
-
- param['seq_no'] += 1
- bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
- param['last-modified'] = time.time() - time.timezone
- pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
- cycle_metadata(bucket)
- bucket.perform_write(lambda fh: dump_metadata(fh, db) , "s3ql_metadata",
- metadata=param, is_compressed=True)
-
- db.execute('ANALYZE')
- db.execute('VACUUM')
-
- elif not db: # Metadata must have been already updated
- def do_read(fh):
- os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
- stat.S_IRUSR | stat.S_IWUSR))
- db = Connection(cachepath + '.db.tmp', fast_mode=True)
- restore_metadata(fh, db)
- db.close()
- bucket.perform_read(do_read, "s3ql_metadata")
- os.rename(cachepath + '.db.tmp', cachepath + '.db')
- db = Connection(cachepath + '.db')
-
- print(textwrap.dedent('''
- The following process may take a long time, but can be interrupted
- with Ctrl-C and resumed from this point by calling `s3qladm upgrade`
- again. Please see Changes.txt for why this is necessary.
- '''))
-
- if 's3ql_hash_check_status' not in bucket:
- log.info("Starting hash verification..")
- obj_id = 0
- bucket['s3ql_hash_check_status'] = '%d' % obj_id
- else:
- obj_id = int(bucket['s3ql_hash_check_status'])
- log.info("Resuming hash verification with object %d..", obj_id)
-
- try:
- total = db.get_val('SELECT COUNT(id) FROM objects WHERE id > ?', (obj_id,))
- i = 0
- queue = Queue(1)
- queue.error = None
- threads = []
- if (isinstance(bucket, LocalBucket) or
- (isinstance(bucket, BetterBucket) and
- isinstance(bucket.bucket,LocalBucket))):
- thread_count = 1
- else:
- thread_count = 25
-
- for _ in range(thread_count):
- t = Thread(target=check_hash, args=(queue, bucket_factory()))
- t.daemon = True
- t.start()
- threads.append(t)
-
- for (obj_id, hash_) in db.query('SELECT obj_id, hash FROM blocks JOIN objects '
- 'ON obj_id == objects.id WHERE obj_id > ? '
- 'ORDER BY obj_id ASC', (obj_id,)):
- queue.put((obj_id, hash_))
- i += 1
- if i % 100 == 0:
- log.info(' ..checked %d/%d objects..', i, total)
-
- if queue.error:
- raise queue.error[0], queue.error[1], queue.error[2]
-
- for t in threads:
- queue.put(None)
- for t in threads:
- t.join()
-
- except KeyboardInterrupt:
- log.info("Storing verification status...")
- for t in threads:
- queue.put(None)
- for t in threads:
- t.join()
- bucket['s3ql_hash_check_status'] = '%d' % obj_id
- raise QuietError('Aborting..')
-
- except:
- log.info("Storing verification status...")
- bucket['s3ql_hash_check_status'] = '%d' % obj_id
- raise
-
- log.info('Running fsck...')
- bucket['s3ql_hash_check_status'] = '%d' % obj_id
- fsck = Fsck(tempfile.mkdtemp(), bucket, param, db)
- fsck.check()
-
- if fsck.uncorrectable_errors:
- raise QuietError("Uncorrectable errors found, aborting.")
-
- param['revision'] = CURRENT_FS_REV
- param['seq_no'] += 1
- bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
- param['last-modified'] = time.time() - time.timezone
- pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
- bucket.perform_write(lambda fh: dump_metadata(fh, db) , "s3ql_metadata",
- metadata=param, is_compressed=True)
-
-def check_hash(queue, bucket):
-
- try:
- while True:
- tmp = queue.get()
- if tmp is None:
- break
-
- (obj_id, hash_) = tmp
-
-
- def do_read(fh):
- sha = hashlib.sha256()
- while True:
- buf = fh.read(BUFSIZE)
- if not buf:
- break
- sha.update(buf)
- return sha
- try:
- sha = bucket.perform_read(do_read, "s3ql_data_%d" % obj_id)
-
- except ChecksumError:
- log.warn('Object %d corrupted! Deleting..', obj_id)
- bucket.delete('s3ql_data_%d' % obj_id)
-
- except NoSuchObject:
- log.warn('Object %d seems to have disappeared', obj_id)
-
- else:
- if sha.digest() != hash_:
- log.warn('Object %d corrupted! Deleting..', obj_id)
- bucket.delete('s3ql_data_%d' % obj_id)
- except:
- queue.error = sys.exc_info()
- queue.get()
-
-
-def restore_legacy_metadata(ifh, conn):
- unpickler = pickle.Unpickler(ifh)
- (data_start, to_dump, sizes, columns) = unpickler.load()
- ifh.seek(data_start)
- create_legacy_tables(conn)
- for (table, _) in to_dump:
- log.info('Loading %s', table)
- col_str = ', '.join(columns[table])
- val_str = ', '.join('?' for _ in columns[table])
- sql_str = 'INSERT INTO %s (%s) VALUES(%s)' % (table, col_str, val_str)
- for _ in xrange(sizes[table]):
- buf = unpickler.load()
- for row in buf:
- conn.execute(sql_str, row)
-
-def upgrade_metadata(conn):
- for table in ('inodes', 'blocks', 'objects', 'contents', 'ext_attributes'):
- conn.execute('ALTER TABLE %s RENAME TO leg_%s' % (table, table))
-
- create_tables(conn)
-
- conn.execute('DROP TABLE ext_attributes')
- conn.execute('ALTER TABLE leg_ext_attributes RENAME TO ext_attributes')
-
- # Create a block for each object
- conn.execute('''
- INSERT INTO blocks (id, hash, refcount, obj_id, size)
- SELECT id, hash, refcount, id, size FROM leg_objects
- ''')
- conn.execute('''
- INSERT INTO objects (id, refcount, size)
- SELECT id, 1, compr_size FROM leg_objects
- ''')
- conn.execute('DROP TABLE leg_objects')
-
- # Create new inode_blocks table
- conn.execute('''
- INSERT INTO inode_blocks (inode, blockno, block_id)
- SELECT inode, blockno, obj_id
- FROM leg_blocks
- ''')
-
- # Create new inodes table
- conn.execute('''
- INSERT INTO inodes (id, uid, gid, mode, mtime, atime, ctime,
- refcount, size, rdev, locked)
- SELECT id, uid, gid, mode, mtime, atime, ctime,
- refcount, size, rdev, locked
- FROM leg_inodes
- ''')
-
- conn.execute('''
- INSERT INTO symlink_targets (inode, target)
- SELECT id, target FROM leg_inodes WHERE target IS NOT NULL
- ''')
-
- conn.execute('DROP TABLE leg_inodes')
- conn.execute('DROP TABLE leg_blocks')
-
- # Sort out names
- conn.execute('''
- INSERT INTO names (name, refcount)
- SELECT name, COUNT(name) FROM leg_contents GROUP BY name
- ''')
- conn.execute('''
- INSERT INTO contents (name_id, inode, parent_inode)
- SELECT names.id, inode, parent_inode
- FROM leg_contents JOIN names ON leg_contents.name == names.name
- ''')
- conn.execute('DROP TABLE leg_contents')
-
- conn.execute('ANALYZE')
-
-def create_legacy_tables(conn):
- conn.execute("""
- CREATE TABLE inodes (
- id INTEGER PRIMARY KEY,
- uid INT NOT NULL,
- gid INT NOT NULL,
- mode INT NOT NULL,
- mtime REAL NOT NULL,
- atime REAL NOT NULL,
- ctime REAL NOT NULL,
- refcount INT NOT NULL,
- target BLOB(256) ,
- size INT NOT NULL DEFAULT 0,
- rdev INT NOT NULL DEFAULT 0,
- locked BOOLEAN NOT NULL DEFAULT 0
- )
- """)
- conn.execute("""
- CREATE TABLE objects (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- refcount INT NOT NULL,
- hash BLOB(16) UNIQUE,
- size INT NOT NULL,
- compr_size INT
- )""")
- conn.execute("""
- CREATE TABLE blocks (
- inode INTEGER NOT NULL REFERENCES leg_inodes(id),
- blockno INT NOT NULL,
- obj_id INTEGER NOT NULL REFERENCES leg_objects(id),
- PRIMARY KEY (inode, blockno)
- )""")
- conn.execute("""
- CREATE TABLE contents (
- rowid INTEGER PRIMARY KEY AUTOINCREMENT,
- name BLOB(256) NOT NULL,
- inode INT NOT NULL REFERENCES leg_inodes(id),
- parent_inode INT NOT NULL REFERENCES leg_inodes(id),
-
- UNIQUE (name, parent_inode)
- )""")
- conn.execute("""
- CREATE TABLE ext_attributes (
- inode INTEGER NOT NULL REFERENCES inodes(id),
- name BLOB NOT NULL,
- value BLOB NOT NULL,
-
- PRIMARY KEY (inode, name)
- )""")
-
-
-class LegacyLocalBucket(AbstractBucket):
- needs_login = False
-
- def __init__(self, name, backend_login, backend_pw): #IGNORE:W0613
- super(LegacyLocalBucket, self).__init__()
- self.name = name
- if not os.path.exists(name):
- raise NoSuchBucket(name)
-
- def lookup(self, key):
- path = self._key_to_path(key) + '.meta'
- try:
- with open(path, 'rb') as src:
- return pickle.load(src)
- except IOError as exc:
- if exc.errno == errno.ENOENT:
- raise NoSuchObject(key)
- else:
- raise
-
- def is_temp_failure(self, exc):
- return False
-
- def open_read(self, key):
- path = self._key_to_path(key)
- try:
- fh = ObjectR(path + '.dat')
- except IOError as exc:
- if exc.errno == errno.ENOENT:
- raise NoSuchObject(key)
- else:
- raise
-
- fh.metadata = pickle.load(open(path + '.meta', 'rb'))
-
- return fh
-
- def open_write(self, key, metadata=None, is_compressed=False):
- raise RuntimeError('Not implemented')
-
- def clear(self):
- raise RuntimeError('Not implemented')
-
- def copy(self, src, dest):
- raise RuntimeError('Not implemented')
-
- def contains(self, key):
- path = self._key_to_path(key)
- try:
- os.lstat(path + '.meta')
- except OSError as exc:
- if exc.errno == errno.ENOENT:
- return False
- raise
- return True
-
- def delete(self, key, force=False):
- raise RuntimeError('Not implemented')
-
- def list(self, prefix=''):
- if prefix:
- base = os.path.dirname(self._key_to_path(prefix))
- else:
- base = self.name
-
- for (path, dirnames, filenames) in os.walk(base, topdown=True):
-
- # Do not look in wrong directories
- if prefix:
- rpath = path[len(self.name):] # path relative to base
- prefix_l = ''.join(rpath.split('/'))
-
- dirs_to_walk = list()
- for name in dirnames:
- prefix_ll = unescape(prefix_l + name)
- if prefix_ll.startswith(prefix[:len(prefix_ll)]):
- dirs_to_walk.append(name)
- dirnames[:] = dirs_to_walk
-
- for name in filenames:
- key = unescape(name)
-
- if not prefix or key.startswith(prefix):
- if key.endswith('.meta'):
- yield key[:-5]
-
- def _key_to_path(self, key):
- key = escape(key)
-
- if not key.startswith('s3ql_data_'):
- return os.path.join(self.name, key)
-
- no = key[10:]
- path = [ self.name, 's3ql_data_']
- for i in range(0, len(no), 3):
- path.append(no[:i])
- path.append(key)
-
- return os.path.join(*path)
-
- def is_get_consistent(self):
- return True
-
- def is_list_create_consistent(self):
- return True
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
-
diff --git a/src/s3ql/cli/fsck.py b/src/s3ql/cli/fsck.py
deleted file mode 100644
index f5d2c55..0000000
--- a/src/s3ql/cli/fsck.py
+++ /dev/null
@@ -1,245 +0,0 @@
-'''
-fsck.py - this file is part of S3QL (http://s3ql.googlecode.com)
-
-Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
-
-This program can be distributed under the terms of the GNU GPLv3.
-'''
-
-from __future__ import division, print_function, absolute_import
-from s3ql import CURRENT_FS_REV
-from s3ql.backends.common import get_bucket
-from s3ql.common import (get_bucket_cachedir, cycle_metadata, setup_logging,
- QuietError, get_seq_no, restore_metadata, dump_metadata, CTRL_INODE)
-from s3ql.database import Connection
-from s3ql.fsck import Fsck
-from s3ql.parse_args import ArgumentParser
-import apsw
-import cPickle as pickle
-import logging
-import os
-import stat
-import sys
-import textwrap
-import time
-
-log = logging.getLogger("fsck")
-
-def parse_args(args):
-
- parser = ArgumentParser(
- description="Checks and repairs an S3QL filesystem.")
-
- parser.add_log('~/.s3ql/fsck.log')
- parser.add_cachedir()
- parser.add_authfile()
- parser.add_debug_modules()
- parser.add_quiet()
- parser.add_version()
- parser.add_storage_url()
-
- parser.add_argument("--batch", action="store_true", default=False,
- help="If user input is required, exit without prompting.")
- parser.add_argument("--force", action="store_true", default=False,
- help="Force checking even if file system is marked clean.")
- parser.add_argument("--renumber-inodes", action="store_true", default=False,
- help="Renumber inodes to be stricly sequential starting from %d"
- % (CTRL_INODE+1))
- options = parser.parse_args(args)
-
- return options
-
-def main(args=None):
-
- if args is None:
- args = sys.argv[1:]
-
- options = parse_args(args)
- setup_logging(options)
-
- # Check if fs is mounted on this computer
- # This is not foolproof but should prevent common mistakes
- match = options.storage_url + ' /'
- with open('/proc/mounts', 'r') as fh:
- for line in fh:
- if line.startswith(match):
- raise QuietError('Can not check mounted file system.')
-
- bucket = get_bucket(options)
-
- cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)
- seq_no = get_seq_no(bucket)
- param_remote = bucket.lookup('s3ql_metadata')
- db = None
-
- if os.path.exists(cachepath + '.params'):
- assert os.path.exists(cachepath + '.db')
- param = pickle.load(open(cachepath + '.params', 'rb'))
- if param['seq_no'] < seq_no:
- log.info('Ignoring locally cached metadata (outdated).')
- param = bucket.lookup('s3ql_metadata')
- else:
- log.info('Using cached metadata.')
- db = Connection(cachepath + '.db')
- assert not os.path.exists(cachepath + '-cache') or param['needs_fsck']
-
- if param_remote['seq_no'] != param['seq_no']:
- log.warn('Remote metadata is outdated.')
- param['needs_fsck'] = True
-
- else:
- param = param_remote
- assert not os.path.exists(cachepath + '-cache')
- # .db might exist if mount.s3ql is killed at exactly the right instant
- # and should just be ignored.
-
- # Check revision
- if param['revision'] < CURRENT_FS_REV:
- raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
- elif param['revision'] > CURRENT_FS_REV:
- raise QuietError('File system revision too new, please update your '
- 'S3QL installation.')
-
- if param['seq_no'] < seq_no:
- if bucket.is_get_consistent():
- print(textwrap.fill(textwrap.dedent('''\
- Up to date metadata is not available. Probably the file system has not
- been properly unmounted and you should try to run fsck on the computer
- where the file system has been mounted most recently.
- ''')))
- else:
- print(textwrap.fill(textwrap.dedent('''\
- Up to date metadata is not available. Either the file system has not
- been unmounted cleanly or the data has not yet propagated through the backend.
- In the later case, waiting for a while should fix the problem, in
- the former case you should try to run fsck on the computer where
- the file system has been mounted most recently
- ''')))
-
- print('Enter "continue" to use the outdated data anyway:',
- '> ', sep='\n', end='')
- if options.batch:
- raise QuietError('(in batch mode, exiting)')
- if sys.stdin.readline().strip() != 'continue':
- raise QuietError()
-
- param['seq_no'] = seq_no
- param['needs_fsck'] = True
-
-
- if (not param['needs_fsck']
- and ((time.time() - time.timezone) - param['last_fsck'])
- < 60 * 60 * 24 * 31): # last check more than 1 month ago
- if options.force or options.renumber_inodes:
- log.info('File system seems clean, checking anyway.')
- else:
- log.info('File system is marked as clean. Use --force to force checking.')
- return
-
- # If using local metadata, check consistency
- if db:
- log.info('Checking DB integrity...')
- try:
- # get_list may raise CorruptError itself
- res = db.get_list('PRAGMA integrity_check(20)')
- if res[0][0] != u'ok':
- log.error('\n'.join(x[0] for x in res ))
- raise apsw.CorruptError()
- except apsw.CorruptError:
- raise QuietError('Local metadata is corrupted. Remove or repair the following '
- 'files manually and re-run fsck:\n'
- + cachepath + '.db (corrupted)\n'
- + cachepath + '.param (intact)')
- else:
- def do_read(fh):
- os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
- stat.S_IRUSR | stat.S_IWUSR))
- db = Connection(cachepath + '.db.tmp', fast_mode=True)
- try:
- restore_metadata(fh, db)
- finally:
- # If metata reading has to be retried, we don't want to hold
- # a lock on the database.
- db.close()
-
- bucket.perform_read(do_read, "s3ql_metadata")
- os.rename(cachepath + '.db.tmp', cachepath + '.db')
- db = Connection(cachepath + '.db')
-
- # Increase metadata sequence no
- param['seq_no'] += 1
- param['needs_fsck'] = True
- bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
- pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
-
- fsck = Fsck(cachepath + '-cache', bucket, param, db)
- fsck.check()
-
- if fsck.uncorrectable_errors:
- raise QuietError("Uncorrectable errors found, aborting.")
-
- if os.path.exists(cachepath + '-cache'):
- os.rmdir(cachepath + '-cache')
-
- if options.renumber_inodes:
- renumber_inodes(db)
-
- cycle_metadata(bucket)
- param['needs_fsck'] = False
- param['last_fsck'] = time.time() - time.timezone
- param['last-modified'] = time.time() - time.timezone
- bucket.perform_write(lambda fh: dump_metadata(fh, db) , "s3ql_metadata",
- metadata=param, is_compressed=True)
- pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
-
- db.execute('ANALYZE')
- db.execute('VACUUM')
- db.close()
-
- if options.renumber_inodes:
- print('',
- 'Inodes were renumbered. If this file system has been exported over NFS,',
- 'all NFS clients need to be restarted before mounting the S3QL file system ',
- 'again or data corruption may occur.', sep='\n')
-
-
-def renumber_inodes(db):
- '''Renumber inodes'''
-
- log.info('Renumbering inodes...')
- total = db.get_val('SELECT COUNT(id) FROM inodes')
- db.execute('CREATE TEMPORARY TABLE inode_ids AS '
- 'SELECT id FROM inodes WHERE id > ? ORDER BY id DESC',
- (max(total, CTRL_INODE),))
- db.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)')
- try:
- i = 0
- cur = CTRL_INODE+1
- for (id_,) in db.query("SELECT id FROM inode_ids"):
- while True:
- try:
- db.execute('UPDATE inodes SET id=? WHERE id=?', (cur, id_))
- except apsw.ConstraintError:
- cur += 1
- else:
- break
-
- db.execute('UPDATE contents SET inode=? WHERE inode=?', (cur, id_))
- db.execute('UPDATE contents SET parent_inode=? WHERE parent_inode=?', (cur, id_))
- db.execute('UPDATE inode_blocks SET inode=? WHERE inode=?', (cur, id_))
- db.execute('UPDATE symlink_targets SET inode=? WHERE inode=?', (cur, id_))
- db.execute('UPDATE ext_attributes SET inode=? WHERE inode=?', (cur, id_))
-
- cur += 1
- i += 1
- if i % 5000 == 0:
- log.info('..processed %d inodes so far..', i)
-
- finally:
- db.execute('DROP TABLE inode_ids')
- db.execute('DROP INDEX ix_contents_inode')
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
-
diff --git a/src/s3ql/common.py b/src/s3ql/common.py
index 6dfe902..dcc9c31 100644
--- a/src/s3ql/common.py
+++ b/src/s3ql/common.py
@@ -7,22 +7,24 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
+from cgitb import scanvars, __UNDEF__
from llfuse import ROOT_INODE
-import cPickle as pickle
+import bz2
import hashlib
+import inspect
+import linecache
import logging
import os
+import pydoc
import stat
import sys
-import tempfile
-import time
-import lzma
+import types
# Buffer size when writing objects
BUFSIZE = 256 * 1024
log = logging.getLogger('common')
-
+
def setup_logging(options):
root_logger = logging.getLogger()
if root_logger.handlers:
@@ -124,64 +126,12 @@ def get_seq_no(bucket):
return seq_no
-def cycle_metadata(bucket):
- from .backends.common import NoSuchObject
+def stream_write_bz2(ifh, ofh):
+ '''Compress *ifh* into *ofh* using bz2 compression'''
- log.info('Backing up old metadata...')
- for i in reversed(range(10)):
- try:
- bucket.copy("s3ql_metadata_bak_%d" % i, "s3ql_metadata_bak_%d" % (i + 1))
- except NoSuchObject:
- pass
-
- bucket.copy("s3ql_metadata", "s3ql_metadata_bak_0")
-
-def dump_metadata(ofh, conn):
-
- log.info('Dumping metadata...')
- # First write everything into temporary file
- tmp = tempfile.TemporaryFile()
- pickler = pickle.Pickler(tmp, 2)
- bufsize = 256
- buf = range(bufsize)
- tables_to_dump = [('objects', 'id'), ('blocks', 'id'),
- ('inode_blocks', 'inode, blockno'),
- ('inodes', 'id'), ('symlink_targets', 'inode'),
- ('names', 'id'), ('contents', 'parent_inode, name_id'),
- ('ext_attributes', 'inode, name')]
-
- columns = dict()
- for (table, _) in tables_to_dump:
- columns[table] = list()
- for row in conn.query('PRAGMA table_info(%s)' % table):
- columns[table].append(row[1])
-
- pickler.dump((tables_to_dump, columns))
-
- for (table, order) in tables_to_dump:
- log.info('..%s..' % table)
- pickler.clear_memo()
- i = 0
- for row in conn.query('SELECT %s FROM %s ORDER BY %s'
- % (','.join(columns[table]), table, order)):
- buf[i] = row
- i += 1
- if i == bufsize:
- pickler.dump(buf)
- pickler.clear_memo()
- i = 0
-
- if i != 0:
- pickler.dump(buf[:i])
-
- pickler.dump(None)
-
- # Then compress and send
- log.info("Compressing and uploading metadata...")
- compr = lzma.LZMACompressor(options={ 'level': 7 })
- tmp.seek(0)
+ compr = bz2.BZ2Compressor(9)
while True:
- buf = tmp.read(BUFSIZE)
+ buf = ifh.read(BUFSIZE)
if not buf:
break
buf = compr.compress(buf)
@@ -190,45 +140,19 @@ def dump_metadata(ofh, conn):
buf = compr.flush()
if buf:
ofh.write(buf)
- del compr # Free memory ASAP, LZMA level 7 needs 186 MB
- tmp.close()
-
-def restore_metadata(ifh, conn):
- # Note: unpickling is terribly slow if fh is not a real file object, so
- # uncompressing to a temporary file also gives a performance boost
- log.info('Downloading and decompressing metadata...')
- tmp = tempfile.TemporaryFile()
- decompressor = lzma.LZMADecompressor()
+def stream_read_bz2(ifh, ofh):
+ '''Uncompress bz2 compressed *ifh* into *ofh*'''
+
+ decompressor = bz2.BZ2Decompressor()
while True:
buf = ifh.read(BUFSIZE)
if not buf:
break
buf = decompressor.decompress(buf)
if buf:
- tmp.write(buf)
- del decompressor
- tmp.seek(0)
-
- log.info("Reading metadata...")
- unpickler = pickle.Unpickler(tmp)
- (to_dump, columns) = unpickler.load()
- create_tables(conn)
- for (table, _) in to_dump:
- log.info('..%s..', table)
- col_str = ', '.join(columns[table])
- val_str = ', '.join('?' for _ in columns[table])
- sql_str = 'INSERT INTO %s (%s) VALUES(%s)' % (table, col_str, val_str)
- while True:
- buf = unpickler.load()
- if not buf:
- break
- for row in buf:
- conn.execute(sql_str, row)
-
- tmp.close()
- conn.execute('ANALYZE')
-
+ ofh.write(buf)
+
class QuietError(Exception):
'''
QuietError is the base class for exceptions that should not result
@@ -246,6 +170,72 @@ class QuietError(Exception):
def __str__(self):
return self.msg
+# Adapted from cgitb.text, but less verbose
+def format_tb(einfo):
+ """Return a plain text document describing a given traceback."""
+
+ etype, evalue, etb = einfo
+ if type(etype) is types.ClassType:
+ etype = etype.__name__
+
+ frames = [ 'Traceback (most recent call last):' ]
+ records = inspect.getinnerframes(etb, context=7)
+ for (frame, file_, lnum, func, lines, index) in records:
+ (args, varargs, varkw, locals_) = inspect.getargvalues(frame)
+ sig = inspect.formatargvalues(args, varargs, varkw, locals_,
+ formatvalue=lambda value: '=' + pydoc.text.repr(value))
+
+ rows = [' File %r, line %d, in %s%s' % (file_, lnum, func, sig) ]
+
+ # To print just current line
+ if index is not None:
+ rows.append(' %s' % lines[index].strip())
+
+# # To print with context:
+# if index is not None:
+# i = lnum - index
+# for line in lines:
+# num = '%5d ' % i
+# rows.append(num+line.rstrip())
+# i += 1
+
+ def reader(lnum=[lnum]): #pylint: disable=W0102
+ try:
+ return linecache.getline(file_, lnum[0])
+ finally:
+ lnum[0] += 1
+
+ printed = set()
+ rows.append(' Current bindings:')
+ for (name, where, value) in scanvars(reader, frame, locals_):
+ if name in printed:
+ continue
+ printed.add(name)
+ if value is not __UNDEF__:
+ if where == 'global':
+ where = '(global)'
+ elif where != 'local':
+ name = where + name.split('.')[-1]
+ where = '(local)'
+ else:
+ where = ''
+ rows.append(' %s = %s %s' % (name, pydoc.text.repr(value), where))
+ else:
+ rows.append(name + ' undefined')
+
+ rows.append('')
+ frames.extend(rows)
+
+ exception = ['Exception: %s: %s' % (etype.__name__, evalue)]
+ if isinstance(evalue, BaseException):
+ for name in dir(evalue):
+ if name.startswith('__'):
+ continue
+ value = pydoc.text.repr(getattr(evalue, name))
+ exception.append(' %s = %s' % (name, value))
+
+ return '%s\n%s' % ('\n'.join(frames), '\n'.join(exception))
+
def setup_excepthook():
'''Modify sys.excepthook to log exceptions
@@ -258,9 +248,14 @@ def setup_excepthook():
if isinstance(val, QuietError):
root_logger.error(val.msg)
else:
- root_logger.error('Uncaught top-level exception',
- exc_info=(type_, val, tb))
-
+ try:
+ msg = format_tb((type_, val, tb))
+ except:
+ root_logger.error('Uncaught top-level exception -- and tb handler failed!',
+ exc_info=(type_, val, tb))
+ else:
+ root_logger.error('Uncaught top-level exception. %s', msg)
+
sys.excepthook = excepthook
def inode_for_path(path, conn):
@@ -352,123 +347,4 @@ def sha256_fh(fh):
return sha.digest()
-def init_tables(conn):
- # Insert root directory
- timestamp = time.time() - time.timezone
- conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount) "
- "VALUES (?,?,?,?,?,?,?,?)",
- (ROOT_INODE, stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
- | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
- os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
-
- # Insert control inode, the actual values don't matter that much
- conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount) "
- "VALUES (?,?,?,?,?,?,?,?)",
- (CTRL_INODE, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
- 0, 0, timestamp, timestamp, timestamp, 42))
-
- # Insert lost+found directory
- inode = conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
- "VALUES (?,?,?,?,?,?,?)",
- (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,
- os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
- name_id = conn.rowid('INSERT INTO names (name, refcount) VALUES(?,?)',
- (b'lost+found', 1))
- conn.execute("INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)",
- (name_id, inode, ROOT_INODE))
-
-def create_tables(conn):
- # Table of storage objects
- # Refcount is included for performance reasons
- conn.execute("""
- CREATE TABLE objects (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- refcount INT NOT NULL,
- size INT
- )""")
-
- # Table of known data blocks
- # Refcount is included for performance reasons
- conn.execute("""
- CREATE TABLE blocks (
- id INTEGER PRIMARY KEY,
- hash BLOB(16) UNIQUE,
- refcount INT NOT NULL,
- size INT NOT NULL,
- obj_id INTEGER NOT NULL REFERENCES objects(id)
- )""")
-
- # Table with filesystem metadata
- # The number of links `refcount` to an inode can in theory
- # be determined from the `contents` table. However, managing
- # this separately should be significantly faster (the information
- # is required for every getattr!)
- conn.execute("""
- CREATE TABLE inodes (
- -- id has to specified *exactly* as follows to become
- -- an alias for the rowid.
- id INTEGER PRIMARY KEY,
- uid INT NOT NULL,
- gid INT NOT NULL,
- mode INT NOT NULL,
- mtime REAL NOT NULL,
- atime REAL NOT NULL,
- ctime REAL NOT NULL,
- refcount INT NOT NULL,
- size INT NOT NULL DEFAULT 0,
- rdev INT NOT NULL DEFAULT 0,
- locked BOOLEAN NOT NULL DEFAULT 0
- )""")
-
- # Further Blocks used by inode (blockno >= 1)
- conn.execute("""
- CREATE TABLE inode_blocks (
- inode INTEGER NOT NULL REFERENCES inodes(id),
- blockno INT NOT NULL,
- block_id INTEGER NOT NULL REFERENCES blocks(id),
- PRIMARY KEY (inode, blockno)
- )""")
-
- # Symlinks
- conn.execute("""
- CREATE TABLE symlink_targets (
- inode INTEGER PRIMARY KEY REFERENCES inodes(id),
- target BLOB NOT NULL
- )""")
-
- # Names of file system objects
- conn.execute("""
- CREATE TABLE names (
- id INTEGER PRIMARY KEY,
- name BLOB NOT NULL,
- refcount INT NOT NULL,
- UNIQUE (name)
- )""")
-
- # Table of filesystem objects
- # rowid is used by readdir() to restart at the correct position
- conn.execute("""
- CREATE TABLE contents (
- rowid INTEGER PRIMARY KEY AUTOINCREMENT,
- name_id INT NOT NULL REFERENCES names(id),
- inode INT NOT NULL REFERENCES inodes(id),
- parent_inode INT NOT NULL REFERENCES inodes(id),
-
- UNIQUE (parent_inode, name_id)
- )""")
-
- # Extended attributes
- conn.execute("""
- CREATE TABLE ext_attributes (
- inode INTEGER NOT NULL REFERENCES inodes(id),
- name BLOB NOT NULL,
- value BLOB NOT NULL,
-
- PRIMARY KEY (inode, name)
- )""")
-
- # Shortcurts
- conn.execute("""
- CREATE VIEW contents_v AS
- SELECT * FROM contents JOIN names ON names.id = name_id
- """) \ No newline at end of file
+ \ No newline at end of file
diff --git a/src/s3ql/cli/cp.py b/src/s3ql/cp.py
index 1c3a3d2..e5a6ebe 100644
--- a/src/s3ql/cli/cp.py
+++ b/src/s3ql/cp.py
@@ -7,17 +7,16 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-
+from .common import setup_logging, CTRL_NAME, QuietError
+from .parse_args import ArgumentParser
+import cPickle as pickle
+import errno
import llfuse
-import os
import logging
-from s3ql.common import (setup_logging, CTRL_NAME, QuietError)
-from s3ql.parse_args import ArgumentParser
-import cPickle as pickle
+import os
import stat
-import textwrap
-import errno
import sys
+import textwrap
log = logging.getLogger("cp")
diff --git a/src/s3ql/cli/ctrl.py b/src/s3ql/ctrl.py
index e975fc2..bfa00f9 100644
--- a/src/s3ql/cli/ctrl.py
+++ b/src/s3ql/ctrl.py
@@ -7,15 +7,14 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-
+from .common import CTRL_NAME, QuietError, setup_logging
+from .parse_args import ArgumentParser
+import cPickle as pickle
import llfuse
-import os
import logging
-from s3ql.common import (CTRL_NAME, QuietError, setup_logging)
-from s3ql.parse_args import ArgumentParser
-import textwrap
+import os
import sys
-import cPickle as pickle
+import textwrap
log = logging.getLogger("ctrl")
diff --git a/src/s3ql/daemonize.py b/src/s3ql/daemonize.py
index a43bf81..eeae9a7 100644
--- a/src/s3ql/daemonize.py
+++ b/src/s3ql/daemonize.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-
'''
daemonize.py - this file is part of S3QL (http://s3ql.googlecode.com)
@@ -33,9 +32,6 @@ import logging
log = logging.getLogger('daemonize')
-__all__ = [ 'daemonize' ]
-
-
def daemonize(workdir='/'):
'''Daemonize the process'''
diff --git a/src/s3ql/database.py b/src/s3ql/database.py
index aedfd45..c334e38 100644
--- a/src/s3ql/database.py
+++ b/src/s3ql/database.py
@@ -22,8 +22,6 @@ import os
import types
from .common import QuietError
-__all__ = ['Connection', 'NoUniqueValueError', 'NoSuchRowError' ]
-
log = logging.getLogger("database")
sqlite_ver = tuple([ int(x) for x in apsw.sqlitelibversion().split('.') ])
diff --git a/src/s3ql/deltadump.py b/src/s3ql/deltadump.py
new file mode 100644
index 0000000..49cfaf5
--- /dev/null
+++ b/src/s3ql/deltadump.py
@@ -0,0 +1,66 @@
+'''
+deltadump.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+# This is a wrapper for _deltadump to work around
+# http://trac.cython.org/cython_trac/ticket/475
+
+#@PydevCodeAnalysisIgnore
+
+from __future__ import division, print_function, absolute_import
+
+import apsw
+from . import _deltadump
+from ._deltadump import *
+import subprocess
+import re
+
+def get_libraries(pathname):
+ '''Return shared libraries required for *pathname*'''
+
+ libs = dict()
+ ldd = subprocess.Popen(['ldd', pathname], stdout=subprocess.PIPE)
+ for line in ldd.stdout:
+ if '=>' in line:
+ (soname, path) = line.split('=>')
+ else:
+ path = line.strip()
+ soname = None
+
+ hit = re.match(r'^\s*(.+)\s+\(0x[0-9a-fA-F]+\)$', path)
+ if hit:
+ path = hit.group(1).strip()
+ else:
+ path = path.strip()
+
+ if path == 'not found':
+ path = None
+
+ if not soname:
+ soname = path
+
+ libs[soname.strip()] = path
+
+ if ldd.wait() != 0:
+ raise ImportError('ldd call failed')
+
+ return libs
+
+
+# We need to make sure that apsw and _deltadump are linked against the same
+# sqlite library.
+apsw_libs = get_libraries(apsw.__file__)
+s3ql_libs = get_libraries(_deltadump.__file__)
+
+if 'libsqlite3.so.0' not in apsw_libs:
+ raise ImportError("python-apsw must be linked dynamically to sqlite3")
+
+if 'libsqlite3.so.0' not in s3ql_libs:
+ raise ImportError('s3ql._deltadump must be linked dynamically to sqlite3')
+
+if apsw_libs['libsqlite3.so.0'] != s3ql_libs['libsqlite3.so.0']:
+ raise ImportError('python-apsw and s3ql._deltadump not linked against same sqlite3 library')
diff --git a/src/s3ql/fs.py b/src/s3ql/fs.py
index 9f9139e..11210f3 100644
--- a/src/s3ql/fs.py
+++ b/src/s3ql/fs.py
@@ -10,7 +10,8 @@ from __future__ import division, print_function, absolute_import
from .backends.common import NoSuchObject, ChecksumError
from .common import (get_path, CTRL_NAME, CTRL_INODE, LoggerFilter)
from .database import NoSuchRowError
-from .inode_cache import InodeCache, OutOfInodesError
+from .inode_cache import OutOfInodesError
+from . import deltadump
from cStringIO import StringIO
from llfuse import FUSEError
import cPickle as pickle
@@ -68,10 +69,11 @@ class Operations(llfuse.Operations):
explicitly checks the st_mode attribute.
"""
- def __init__(self, block_cache, db, blocksize, upload_event=None):
+ def __init__(self, block_cache, db, blocksize, inode_cache,
+ upload_event=None):
super(Operations, self).__init__()
- self.inodes = InodeCache(db)
+ self.inodes = inode_cache
self.db = db
self.upload_event = upload_event
self.open_inodes = collections.defaultdict(lambda: 0)
@@ -172,7 +174,7 @@ class Operations(llfuse.Operations):
else:
try:
- value = self.db.get_val('SELECT value FROM ext_attributes WHERE inode=? AND name=?',
+ value = self.db.get_val('SELECT value FROM ext_attributes_v WHERE inode=? AND name=?',
(id_, name))
except NoSuchRowError:
raise llfuse.FUSEError(llfuse.ENOATTR)
@@ -180,7 +182,7 @@ class Operations(llfuse.Operations):
def listxattr(self, id_):
names = list()
- for (name,) in self.db.query('SELECT name FROM ext_attributes WHERE inode=?', (id_,)):
+ for (name,) in self.db.query('SELECT name FROM ext_attributes_v WHERE inode=?', (id_,)):
names.append(name)
return names
@@ -211,8 +213,11 @@ class Operations(llfuse.Operations):
if self.inodes[id_].locked:
raise FUSEError(errno.EPERM)
- self.db.execute('INSERT OR REPLACE INTO ext_attributes (inode, name, value) '
- 'VALUES(?, ?, ?)', (id_, name, value))
+ if len(value) > deltadump.MAX_BLOB_SIZE:
+ raise FUSEError(errno.EINVAL)
+
+ self.db.execute('INSERT OR REPLACE INTO ext_attributes (inode, name_id, value) '
+ 'VALUES(?, ?, ?)', (id_, self._add_name(name), value))
self.inodes[id_].ctime = time.time()
def removexattr(self, id_, name):
@@ -220,10 +225,16 @@ class Operations(llfuse.Operations):
if self.inodes[id_].locked:
raise FUSEError(errno.EPERM)
- changes = self.db.execute('DELETE FROM ext_attributes WHERE inode=? AND name=?',
- (id_, name))
+ try:
+ name_id = self._del_name(name)
+ except NoSuchRowError:
+ raise llfuse.FUSEError(llfuse.ENOATTR)
+
+ changes = self.db.execute('DELETE FROM ext_attributes WHERE inode=? AND name_id=?',
+ (id_, name_id))
if changes == 0:
raise llfuse.FUSEError(llfuse.ENOATTR)
+
self.inodes[id_].ctime = time.time()
def lock_tree(self, id0):
@@ -371,14 +382,21 @@ class Operations(llfuse.Operations):
db.execute('INSERT INTO symlink_targets (inode, target) '
'SELECT ?, target FROM symlink_targets WHERE inode=?',
(id_new, id_))
-
+
+ db.execute('INSERT INTO ext_attributes (inode, name_id, value) '
+ 'SELECT ?, name_id, value FROM ext_attributes WHERE inode=?',
+ (id_new, id_))
+ db.execute('UPDATE names SET refcount = refcount + 1 WHERE '
+ 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)',
+ (id_,))
+
processed += db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) '
'SELECT ?, blockno, block_id FROM inode_blocks '
'WHERE inode=?', (id_new, id_))
db.execute('REPLACE INTO blocks (id, hash, refcount, size, obj_id) '
- 'SELECT id, hash, refcount+1, size, obj_id '
+ 'SELECT id, hash, refcount+COUNT(id), size, obj_id '
'FROM inode_blocks JOIN blocks ON block_id = id '
- 'WHERE inode = ?', (id_new,))
+ 'WHERE inode = ? GROUP BY id', (id_new,))
if db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)):
queue.append((id_, id_new, 0))
@@ -474,6 +492,10 @@ class Operations(llfuse.Operations):
self.cache.remove(id_, 0, int(math.ceil(inode.size / self.blocksize)))
# Since the inode is not open, it's not possible that new blocks
# get created at this point and we can safely delete the inode
+ self.db.execute('UPDATE names SET refcount = refcount - 1 WHERE '
+ 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)',
+ (id_,))
+ self.db.execute('DELETE FROM names WHERE refcount=0')
self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_,))
self.db.execute('DELETE FROM symlink_targets WHERE inode=?', (id_,))
del self.inodes[id_]
@@ -613,6 +635,10 @@ class Operations(llfuse.Operations):
int(math.ceil(inode_new.size / self.blocksize)))
# Since the inode is not open, it's not possible that new blocks
# get created at this point and we can safely delete the inode
+ self.db.execute('UPDATE names SET refcount = refcount - 1 WHERE '
+ 'id IN (SELECT name_id FROM ext_attributes WHERE inode=?)',
+ (id_new,))
+ self.db.execute('DELETE FROM names WHERE refcount=0')
self.db.execute('DELETE FROM ext_attributes WHERE inode=?', (id_new,))
self.db.execute('DELETE FROM symlink_targets WHERE inode=?', (id_new,))
del self.inodes[id_new]
diff --git a/src/s3ql/fsck.py b/src/s3ql/fsck.py
index 7d63dd0..e0be5ce 100644
--- a/src/s3ql/fsck.py
+++ b/src/s3ql/fsck.py
@@ -1,26 +1,36 @@
'''
fsck.py - this file is part of S3QL (http://s3ql.googlecode.com)
-Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-from .backends.common import NoSuchObject
-from .common import ROOT_INODE, CTRL_INODE, inode_for_path, sha256_fh, get_path, BUFSIZE
-from .database import NoSuchRowError
+from . import CURRENT_FS_REV
+from .backends.common import NoSuchObject, get_bucket
+from .common import (ROOT_INODE, inode_for_path, sha256_fh, get_path, BUFSIZE,
+ get_bucket_cachedir, setup_logging, QuietError, get_seq_no, CTRL_INODE,
+ stream_write_bz2, stream_read_bz2)
+from .database import NoSuchRowError, Connection
+from .metadata import (restore_metadata, cycle_metadata, dump_metadata,
+ create_tables)
+from .parse_args import ArgumentParser
from os.path import basename
-from random import randint
-from .inode_cache import MIN_INODE, MAX_INODE, OutOfInodesError
+from s3ql.backends.common import NoSuchBucket
import apsw
+import cPickle as pickle
import logging
import os
import re
import shutil
import stat
+import sys
+import tempfile
+import textwrap
import time
+
log = logging.getLogger("fsck")
S_IFMT = (stat.S_IFDIR | stat.S_IFREG | stat.S_IFSOCK | stat.S_IFBLK |
@@ -45,6 +55,10 @@ class Fsck(object):
# Similarly for objects
self.unlinked_objects = set()
+
+ # Set of inodes that have been moved to lost+found (so that we
+ # don't move them there repeatedly)
+ self.moved_inodes = set()
def check(self):
"""Check file system
@@ -54,30 +68,48 @@ class Fsck(object):
# Create indices required for reference checking
log.info('Creating temporary extra indices...')
- self.conn.execute('DROP INDEX IF EXISTS tmp1')
- self.conn.execute('DROP INDEX IF EXISTS tmp2')
- self.conn.execute('DROP INDEX IF EXISTS tmp3')
+ for idx in ('tmp1', 'tmp2', 'tmp3', 'tmp4', 'tmp5'):
+ self.conn.execute('DROP INDEX IF EXISTS %s' % idx)
self.conn.execute('CREATE INDEX tmp1 ON blocks(obj_id)')
self.conn.execute('CREATE INDEX tmp2 ON inode_blocks(block_id)')
self.conn.execute('CREATE INDEX tmp3 ON contents(inode)')
+ self.conn.execute('CREATE INDEX tmp4 ON contents(name_id)')
+ self.conn.execute('CREATE INDEX tmp5 ON ext_attributes(name_id)')
try:
- self.check_foreign_keys()
- self.check_cache()
self.check_lof()
- self.check_name_refcount()
- self.check_contents()
- self.check_inode_refcount()
+ self.check_cache()
+ self.check_names_refcount()
+
+ self.check_contents_name()
+ self.check_contents_inode()
+ self.check_contents_parent_inode()
+
+ self.check_objects_refcount()
+ self.check_objects_id()
+ self.check_objects_size()
+
+ self.check_blocks_obj_id()
+ self.check_blocks_refcount()
+
+ self.check_inode_blocks_block_id()
+ self.check_inode_blocks_inode()
+
+ self.check_inodes_refcount()
+ self.check_inodes_size()
+
+ self.check_ext_attributes_name()
+ self.check_ext_attributes_inode()
+
+ self.check_symlinks_inode()
+
self.check_loops()
- self.check_inode_sizes()
- self.check_inode_unix()
- self.check_block_refcount()
- self.check_obj_refcounts()
- self.check_keylist()
+ self.check_unix()
+ self.check_foreign_keys()
finally:
log.info('Dropping temporary indices...')
- for idx in ('tmp1', 'tmp2', 'tmp3'):
+ for idx in ('tmp1', 'tmp2', 'tmp3', 'tmp4', 'tmp5'):
self.conn.execute('DROP INDEX %s' % idx)
-
+
def log_error(self, *a, **kw):
'''Log file system error if not expected'''
@@ -90,38 +122,33 @@ class Fsck(object):
Checks that all foreign keys in the SQLite tables actually resolve.
This is necessary, because we disable runtime checking by SQLite
for performance reasons.
+
+ Note: any problems should have already been caught by the more
+ specific checkers.
'''
log.info("Checking referential integrity...")
-
- errors_found = True
- while errors_found:
- errors_found = False
-
- for (table,) in self.conn.query("SELECT name FROM sqlite_master WHERE type='table'"):
- for row in self.conn.query('PRAGMA foreign_key_list(%s)' % table):
- sql_objs = { 'src_table': table,
- 'dst_table': row[2],
- 'src_col': row[3],
- 'dst_col': row[4] }
- to_delete = []
- for (val,) in self.conn.query('SELECT %(src_table)s.%(src_col)s '
- 'FROM %(src_table)s LEFT JOIN %(dst_table)s '
- 'ON %(src_table)s.%(src_col)s = %(dst_table)s.%(dst_col)s '
- 'WHERE %(dst_table)s.%(dst_col)s IS NULL '
- 'AND %(src_table)s.%(src_col)s IS NOT NULL'
- % sql_objs):
- self.found_errors = True
- sql_objs['val'] = val
- self.log_error('%(src_table)s.%(src_col)s refers to non-existing key %(val)s '
- 'in %(dst_table)s.%(dst_col)s, deleting.', sql_objs)
- to_delete.append(val)
-
- for val in to_delete:
- self.conn.execute('DELETE FROM %(src_table)s WHERE %(src_col)s = ?'
- % sql_objs, (val,))
- if to_delete:
- errors_found = True
+
+ for (table,) in self.conn.query("SELECT name FROM sqlite_master WHERE type='table'"):
+ for row in self.conn.query('PRAGMA foreign_key_list(%s)' % table):
+ sql_objs = { 'src_table': table,
+ 'dst_table': row[2],
+ 'src_col': row[3],
+ 'dst_col': row[4] }
+
+ for (val,) in self.conn.query('SELECT %(src_table)s.%(src_col)s '
+ 'FROM %(src_table)s LEFT JOIN %(dst_table)s '
+ 'ON %(src_table)s.%(src_col)s = %(dst_table)s.%(dst_col)s '
+ 'WHERE %(dst_table)s.%(dst_col)s IS NULL '
+ 'AND %(src_table)s.%(src_col)s IS NOT NULL'
+ % sql_objs):
+ self.found_errors = True
+ sql_objs['val'] = val
+ self.log_error('%(src_table)s.%(src_col)s refers to non-existing key %(val)s '
+ 'in %(dst_table)s.%(dst_col)s, deleting.', sql_objs)
+ log.error('This should not happen, please report a bug.')
+ self.uncorrectable_errors = True
+
def check_cache(self):
"""Commit uncommitted cache files"""
@@ -140,8 +167,7 @@ class Fsck(object):
else:
raise RuntimeError('Strange file in cache directory: %s' % filename)
- self.log_error("Committing block %d of inode %d to backend",
- blockno, inode)
+ self.log_error("Committing block %d of inode %d to backend", blockno, inode)
fh = open(os.path.join(self.cachedir, filename), "rb")
size = os.fstat(fh.fileno()).st_size
@@ -161,13 +187,11 @@ class Fsck(object):
obj_size = self.bucket.perform_write(do_write, 's3ql_data_%d' % obj_id).get_obj_size()
- self.conn.execute('UPDATE objects SET size=? WHERE id=?',
- (obj_size, obj_id))
+ self.conn.execute('UPDATE objects SET size=? WHERE id=?', (obj_size, obj_id))
else:
self.conn.execute('UPDATE blocks SET refcount=refcount+1 WHERE id=?', (block_id,))
-
try:
old_block_id = self.conn.get_val('SELECT block_id FROM inode_blocks '
'WHERE inode=? AND blockno=?', (inode, blockno))
@@ -220,52 +244,175 @@ class Fsck(object):
self.conn.execute('UPDATE contents SET inode=? WHERE name_id=? AND parent_inode=?',
(inode_l, name_id, ROOT_INODE))
- def check_contents(self):
- """Check direntry names"""
+ def check_contents_name(self):
+ """Check contents.name_id"""
- log.info('Checking directory entry names...')
-
- for (name, id_p) in self.conn.query('SELECT name, parent_inode FROM contents_v '
- 'WHERE LENGTH(name) > 255'):
- path = get_path(id_p, self.conn, name)
- self.log_error('Entry name %s... in %s has more than 255 characters, '
- 'this could cause problems', name[:40], path[:-len(name)])
- self.found_errors = True
+ log.info('Checking contents (names)...')
+
+ for (rowid, name_id, inode_p,
+ inode) in self.conn.query('SELECT contents.rowid, name_id, parent_inode, inode '
+ 'FROM contents LEFT JOIN names '
+ 'ON name_id = names.id WHERE names.id IS NULL'):
+ self.found_errors = True
+ try:
+ path = get_path(inode_p, self.conn)[1:]
+ except NoSuchRowError:
+ newname = '-%d' % inode
+ else:
+ newname = path.replace('_', '__').replace('/', '_') + '-%d' % inode
+ (id_p_new, newname) = self.resolve_free(b"/lost+found", newname)
+
+ self.log_error('Content entry for inode %d refers to non-existing name with id %d, '
+ 'moving to /lost+found/%s', inode, name_id, newname)
+
+ self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? WHERE rowid=?',
+ (self._add_name(newname), id_p_new, rowid))
+
+
+ def check_contents_parent_inode(self):
+ """Check contents.parent_inode"""
+
+ log.info('Checking contents (parent inodes)...')
+
+ for (rowid, inode_p,
+ name_id) in self.conn.query('SELECT contents.rowid, parent_inode, name_id '
+ 'FROM contents LEFT JOIN inodes '
+ 'ON parent_inode = inodes.id WHERE inodes.id IS NULL'):
+ self.found_errors = True
+ name = self.conn.get_val('SELECT name FROM names WHERE id = ?', (name_id,))
+ (id_p_new, newname) = self.resolve_free(b"/lost+found", '[%d]-%s' % (inode_p, name))
+
+ self.log_error('Parent inode %d for "%s" vanished, moving to /lost+found', inode_p, name)
+ self._del_name(name_id)
+ self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? WHERE rowid=?',
+ (self._add_name(newname), id_p_new, rowid))
+
+
+ def check_contents_inode(self):
+ """Check contents.inode"""
+
+ log.info('Checking contents (inodes)...')
+
+ to_delete = list()
+ for (rowid, inode_p, inode, name_id) in self.conn.query('SELECT contents.rowid, parent_inode, inode, '
+ 'name_id FROM contents LEFT JOIN inodes '
+ 'ON inode = inodes.id WHERE inodes.id IS NULL'):
+ self.found_errors = True
+ try:
+ path = get_path(inode, self.conn)[1:]
+ except NoSuchRowError:
+ path = '[inode %d, parent %d]' % (inode, inode_p)
+
+ self.log_error('Inode for %s vanished, deleting', path)
+ self._del_name(name_id)
+ to_delete.append(rowid)
-
+ for rowid in to_delete:
+ self.conn.execute('DELETE FROM contents WHERE rowid=?', (rowid,))
+
+ def check_ext_attributes_name(self):
+ """Check ext_attributes.name_id"""
+
+ log.info('Checking extended attributes (names)...')
+
+ for (rowid, name_id, inode) in self.conn.query('SELECT ext_attributes.rowid, name_id, inode '
+ 'FROM ext_attributes LEFT JOIN names '
+ 'ON name_id = names.id WHERE names.id IS NULL'):
+
+ self.found_errors = True
+ for (name, id_p) in self.conn.query('SELECT name, parent_inode '
+ 'FROM contents_v WHERE inode=?', (inode,)):
+ path = get_path(id_p, self.conn, name)
+ self.log_error('Extended attribute %d of %s refers to non-existing name %d, renaming..',
+ rowid, path, name_id)
+
+ while True:
+ name_id = self._add_name('lost+found_%d' % rowid)
+ if not self.conn.has_val("SELECT 1 FROM ext_attributes WHERE name_id=? AND inode=?",
+ (name_id, inode)):
+ self.conn.execute('UPDATE ext_attributes SET name_id=? WHERE rowid=?',
+ (name_id, rowid))
+ break
+ self._del_name('lost+found_%d' % rowid)
+ rowid += 1
+
+ def check_ext_attributes_inode(self):
+ """Check ext_attributes.inode"""
+
+ log.info('Checking extended attributes (inodes)...')
+
+ to_delete = list()
+ for (rowid, inode, name_id) in self.conn.query('SELECT ext_attributes.rowid, inode, name_id '
+ 'FROM ext_attributes LEFT JOIN inodes '
+ 'ON inode = inodes.id WHERE inodes.id IS NULL'):
+ self.found_errors = True
+ self.log_error('Extended attribute %d refers to non-existing inode %d, deleting',
+ rowid, inode)
+ to_delete.append(rowid)
+ self._del_name(name_id)
+
+ for rowid in to_delete:
+ self.conn.execute('DELETE FROM ext_attributes WHERE rowid=?', (rowid,))
+
def check_loops(self):
"""Ensure that all directories can be reached from root"""
log.info('Checking directory reachability...')
self.conn.execute('CREATE TEMPORARY TABLE loopcheck (inode INTEGER PRIMARY KEY, '
- 'parent_inode INTEGER)')
+ 'parent_inode INTEGER)')
self.conn.execute('CREATE INDEX ix_loopcheck_parent_inode ON loopcheck(parent_inode)')
- self.conn.execute('INSERT INTO loopcheck (inode, parent_inode) '
- 'SELECT inode, parent_inode FROM contents JOIN inodes ON inode == id '
- 'WHERE mode & ? == ?', (S_IFMT, stat.S_IFDIR))
+ self.conn.execute('INSERT INTO loopcheck (inode) '
+ 'SELECT parent_inode FROM contents GROUP BY parent_inode')
+ self.conn.execute('UPDATE loopcheck SET parent_inode = '
+ '(SELECT contents.parent_inode FROM contents '
+ ' WHERE contents.inode = loopcheck.inode LIMIT 1)')
self.conn.execute('CREATE TEMPORARY TABLE loopcheck2 (inode INTEGER PRIMARY KEY)')
self.conn.execute('INSERT INTO loopcheck2 (inode) SELECT inode FROM loopcheck')
+
def delete_tree(inode_p):
for (inode,) in self.conn.query("SELECT inode FROM loopcheck WHERE parent_inode=?",
- (inode_p,)):
+ (inode_p,)):
delete_tree(inode)
self.conn.execute('DELETE FROM loopcheck2 WHERE inode=?', (inode_p,))
- delete_tree(ROOT_INODE)
+ root = ROOT_INODE
+ while True:
+ delete_tree(root)
- if self.conn.has_val("SELECT 1 FROM loopcheck2"):
+ if not self.conn.has_val("SELECT 1 FROM loopcheck2"):
+ break
+
self.found_errors = True
- self.uncorrectable_errors = True
- self.log_error("Found unreachable filesystem entries!\n"
- "This problem cannot be corrected automatically yet.")
-
+
+ # Try obvious culprits first
+ try:
+ inode = self.conn.get_val('SELECT loopcheck2.inode FROM loopcheck2 JOIN contents '
+ 'ON loopcheck2.inode = contents.inode '
+ 'WHERE parent_inode = contents.inode LIMIT 1')
+ except NoSuchRowError:
+ inode = self.conn.get_val("SELECT inode FROM loopcheck2 ORDER BY inode ASC LIMIT 1")
+
+ (name, name_id) = self.conn.get_row("SELECT name, name_id FROM contents_v "
+ "WHERE inode=? LIMIT 1", (inode,))
+ (id_p, name) = self.resolve_free(b"/lost+found", name)
+
+ self.log_error("Found unreachable filesystem entries, re-anchoring %s [%d] "
+ "in /lost+found", name, inode)
+ self.conn.execute('UPDATE contents SET parent_inode=?, name_id=? '
+ 'WHERE inode=? AND name_id=?',
+ (id_p, self._add_name(name), inode, name_id))
+ self._del_name(name_id)
+ self.conn.execute('UPDATE loopcheck SET parent_inode=? WHERE inode=?',
+ (id_p, inode))
+ root = inode
+
self.conn.execute("DROP TABLE loopcheck")
self.conn.execute("DROP TABLE loopcheck2")
- def check_inode_sizes(self):
- """Check if inode sizes agree with blocks"""
+ def check_inodes_size(self):
+ """Check inodes.size"""
log.info('Checking inodes (sizes)...')
@@ -294,30 +441,9 @@ class Fsck(object):
finally:
self.conn.execute('DROP TABLE min_sizes')
self.conn.execute('DROP TABLE IF EXISTS wrong_sizes')
-
- def _add_name(self, name):
- '''Get id for *name* and increase refcount
-
- Name is inserted in table if it does not yet exist.
- '''
-
- try:
- name_id = self.conn.get_val('SELECT id FROM names WHERE name=?', (name,))
- except NoSuchRowError:
- name_id = self.conn.rowid('INSERT INTO names (name, refcount) VALUES(?,?)',
- (name, 1))
- else:
- self.conn.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,))
- return name_id
-
- def _del_name(self, name_id):
- '''Decrease refcount for name_id, remove if it reaches 0'''
-
- self.conn.execute('UPDATE names SET refcount=refcount-1 WHERE id=?', (name_id,))
- self.conn.execute('DELETE FROM names WHERE refcount=0 AND id=?', (name_id,))
-
- def check_inode_refcount(self):
- """Check inode reference counters"""
+
+ def check_inodes_refcount(self):
+ """Check inodes.refcount"""
log.info('Checking inodes (refcounts)...')
@@ -355,9 +481,106 @@ class Fsck(object):
self.conn.execute('DROP TABLE refcounts')
self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts')
-
- def check_block_refcount(self):
- """Check block reference counters"""
+ def check_blocks_obj_id(self):
+ """Check blocks.obj_id"""
+
+ log.info('Checking blocks (referenced objects)...')
+
+ for (block_id, obj_id) in self.conn.query('SELECT blocks.id, obj_id FROM blocks LEFT JOIN objects '
+ 'ON obj_id = objects.id WHERE objects.id IS NULL'):
+ self.found_errors = True
+ self.log_error('Block %d refers to non-existing object %d', block_id, obj_id)
+ for (inode,) in self.conn.query('SELECT inode FROM inode_blocks WHERE block_id = ? ',
+ (block_id,)):
+ if inode in self.moved_inodes:
+ continue
+ self.moved_inodes.add(inode)
+
+ for (name, name_id, id_p) in self.conn.query('SELECT name, name_id, parent_inode '
+ 'FROM contents_v WHERE inode=?', (inode,)):
+ path = get_path(id_p, self.conn, name)
+ self.log_error("File may lack data, moved to /lost+found: %s", path)
+ (lof_id, newname) = self.resolve_free(b"/lost+found",
+ path[1:].replace('_', '__').replace('/', '_'))
+
+ self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? '
+ 'WHERE name_id=? AND parent_inode=?',
+ (self._add_name(newname), lof_id, name_id, id_p))
+ self._del_name(name_id)
+
+ self.conn.execute('DELETE FROM inode_blocks WHERE block_id=?', (block_id,))
+ self.conn.execute("DELETE FROM blocks WHERE id=?", (block_id,))
+
+
+ def check_inode_blocks_inode(self):
+ """Check inode_blocks.inode"""
+
+ log.info('Checking inode-block mapping (inodes)...')
+
+ to_delete = list()
+ for (rowid, inode, block_id) in self.conn.query('SELECT inode_blocks.rowid, inode, block_id '
+ 'FROM inode_blocks LEFT JOIN inodes '
+ 'ON inode = inodes.id WHERE inodes.id IS NULL'):
+ self.found_errors = True
+ self.log_error('Inode-block mapping %d refers to non-existing inode %d, deleting',
+ rowid, inode)
+ to_delete.append(rowid)
+ self.unlinked_blocks.add(block_id)
+
+ for rowid in to_delete:
+ self.conn.execute('DELETE FROM inode_blocks WHERE rowid=?', (rowid,))
+
+ def check_inode_blocks_block_id(self):
+ """Check inode_blocks.block_id"""
+
+ log.info('Checking inode-block mapping (blocks)...')
+
+ to_delete = list()
+ for (rowid, block_id, inode) in self.conn.query('SELECT inode_blocks.rowid, block_id, inode FROM inode_blocks '
+ 'LEFT JOIN blocks ON block_id = blocks.id '
+ 'WHERE blocks.id IS NULL'):
+ self.found_errors = True
+ self.log_error('Inode-block mapping for inode %d refers to non-existing block %d',
+ inode, block_id)
+ to_delete.append(rowid)
+
+ if inode in self.moved_inodes:
+ continue
+ self.moved_inodes.add(inode)
+
+ for (name, name_id, id_p) in self.conn.query('SELECT name, name_id, parent_inode '
+ 'FROM contents_v WHERE inode=?', (inode,)):
+ path = get_path(id_p, self.conn, name)
+ self.log_error("File may lack data, moved to /lost+found: %s", path)
+ (lof_id, newname) = self.resolve_free(b"/lost+found",
+ path[1:].replace('_', '__').replace('/', '_'))
+
+ self.conn.execute('UPDATE contents SET name_id=?, parent_inode=? '
+ 'WHERE name_id=? AND parent_inode=?',
+ (self._add_name(newname), lof_id, name_id, id_p))
+ self._del_name(name_id)
+
+ for rowid in to_delete:
+ self.conn.execute('DELETE FROM inode_blocks WHERE rowid=?', (rowid,))
+
+ def check_symlinks_inode(self):
+ """Check symlinks.inode"""
+
+ log.info('Checking symlinks (inodes)...')
+
+ to_delete = list()
+ for (rowid, inode) in self.conn.query('SELECT symlink_targets.rowid, inode FROM symlink_targets '
+ 'LEFT JOIN inodes ON inode = inodes.id WHERE inodes.id IS NULL'):
+ self.found_errors = True
+ self.log_error('Symlink %d refers to non-existing inode %d, deleting',
+ rowid, inode)
+ to_delete.append(rowid)
+
+ for rowid in to_delete:
+ self.conn.execute('DELETE FROM symlink_targets WHERE rowid=?', (rowid,))
+
+ def check_blocks_refcount(self):
+ """Check blocks.refcount"""
log.info('Checking blocks (refcounts)...')
@@ -379,7 +602,7 @@ class Fsck(object):
OR refcounts.refcount IS NULL''')
for (id_, cnt, cnt_old, obj_id) in self.conn.query('SELECT * FROM wrong_refcounts'):
- if cnt is None and id_ in self.unlinked_blocks and cnt_old == 0:
+ if cnt is None and id_ in self.unlinked_blocks:
# Block was unlinked by check_cache and can now really be
# removed (since we have checked that there are truly no
# other references)
@@ -418,35 +641,29 @@ class Fsck(object):
def create_inode(self, mode, uid=os.getuid(), gid=os.getgid(),
mtime=None, atime=None, ctime=None, refcount=None,
size=0):
- '''Create inode with id fitting into 32bit'''
+ '''Create inode'''
- for _ in range(100):
- id_ = randint(MIN_INODE, MAX_INODE)
- try:
- self.conn.execute('INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,'
- 'refcount,size) VALUES (?,?,?,?,?,?,?,?,?)',
- (id_, mode, uid, gid, mtime, atime, ctime, refcount, size))
- except apsw.ConstraintError:
- pass
- else:
- break
- else:
- raise OutOfInodesError()
-
+ id_ = self.conn.rowid('INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,'
+ 'refcount,size) VALUES (?,?,?,?,?,?,?,?)',
+ (mode, uid, gid, mtime, atime, ctime, refcount, size))
+
return id_
- def check_name_refcount(self):
- """Check name reference counters"""
+ def check_names_refcount(self):
+ """Check names.refcount"""
log.info('Checking names (refcounts)...')
self.conn.execute('CREATE TEMPORARY TABLE refcounts '
'(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)')
try:
- self.conn.execute('''
- INSERT INTO refcounts (id, refcount)
- SELECT name_id, COUNT(name_id) FROM contents GROUP BY name_id''')
-
+ self.conn.execute('INSERT INTO refcounts (id, refcount) '
+ 'SELECT id, 0 FROM names')
+ self.conn.execute('UPDATE refcounts SET refcount='
+ '(SELECT COUNT(name_id) FROM contents WHERE name_id = refcounts.id)'
+ '+ (SELECT COUNT(name_id) FROM ext_attributes '
+ ' WHERE name_id = refcounts.id)')
+
self.conn.execute('''
CREATE TEMPORARY TABLE wrong_refcounts AS
SELECT id, refcounts.refcount, names.refcount
@@ -468,8 +685,8 @@ class Fsck(object):
self.conn.execute('DROP TABLE refcounts')
self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts')
- def check_inode_unix(self):
- """Check inode attributes for agreement with UNIX conventions
+ def check_unix(self):
+ """Check if file systems for agreement with UNIX conventions
This means:
- Only directories should have child entries
@@ -477,14 +694,15 @@ class Fsck(object):
- Only symlinks should have a target
- Only devices should have a device number
- symlink size is length of target
+ - names are not longer than 255 bytes
- Note that none of this is enforced by S3QL. However, as long
- as S3QL only communicates with the UNIX FUSE module, none of
- the above should happen (and if it does, it would probably
- confuse the system quite a lot).
+ Note that none of this is enforced by S3QL. However, as long as S3QL
+ only communicates with the UNIX FUSE module, none of the above should
+ happen (and if it does, it would probably confuse the system quite a
+ lot).
"""
- log.info('Checking inodes (types)...')
+ log.info('Checking unix conventions...')
for (inode, mode, size, target, rdev) \
in self.conn.query("SELECT id, mode, size, target, rdev "
@@ -537,11 +755,17 @@ class Fsck(object):
'This is probably going to confuse your system!',
inode, get_path(inode, self.conn))
+ for (name, id_p) in self.conn.query('SELECT name, parent_inode FROM contents_v '
+ 'WHERE LENGTH(name) > 255'):
+ path = get_path(id_p, self.conn, name)
+ self.log_error('Entry name %s... in %s has more than 255 characters, '
+ 'this could cause problems', name[:40], path[:-len(name)])
+ self.found_errors = True
+
+ def check_objects_refcount(self):
+ """Check objects.refcount"""
- def check_obj_refcounts(self):
- """Check object reference counts"""
-
- log.info('Checking object reference counts...')
+ log.info('Checking objects (reference counts)...')
self.conn.execute('CREATE TEMPORARY TABLE refcounts '
'(id INTEGER PRIMARY KEY, refcount INTEGER NOT NULL)')
@@ -576,17 +800,16 @@ class Fsck(object):
self.conn.execute('DROP TABLE refcounts')
self.conn.execute('DROP TABLE IF EXISTS wrong_refcounts')
+ # Delete objects which (correctly had) refcount=0
+ for obj_id in self.conn.query('SELECT id FROM objects WHERE refcount = 0'):
+ del self.bucket['s3ql_data_%d' % obj_id]
+ self.conn.execute("DELETE FROM objects WHERE refcount = 0")
+
- def check_keylist(self):
- """Check the list of objects.
-
- Checks that:
- - all objects are referred in the object table
- - all objects in the object table exist
- - object has correct hash
- """
-
- log.info('Checking object list...')
+ def check_objects_id(self):
+ """Check objects.id"""
+
+ log.info('Checking objects (backend)...')
lof_id = self.conn.get_val("SELECT inode FROM contents_v "
"WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE))
@@ -623,7 +846,6 @@ class Fsck(object):
self.conn.execute('CREATE TEMPORARY TABLE missing AS '
'SELECT id FROM objects EXCEPT SELECT id FROM obj_ids')
- moved_inodes = set()
for (obj_id,) in self.conn.query('SELECT * FROM missing'):
if (not self.bucket.is_list_create_consistent()
and ('s3ql_data_%d' % obj_id) in self.bucket):
@@ -638,9 +860,9 @@ class Fsck(object):
# Same file may lack several blocks, but we want to move it
# only once
- if id_ in moved_inodes:
+ if id_ in self.moved_inodes:
continue
- moved_inodes.add(id_)
+ self.moved_inodes.add(id_)
for (name, name_id, id_p) in self.conn.query('SELECT name, name_id, parent_inode '
'FROM contents_v WHERE inode=?', (id_,)):
@@ -664,7 +886,20 @@ class Fsck(object):
self.conn.execute('DROP TABLE obj_ids')
self.conn.execute('DROP TABLE IF EXISTS missing')
+
+ def check_objects_size(self):
+ """Check objects.size"""
+
+ log.info('Checking objects (sizes)...')
+ for (obj_id,) in self.conn.query('SELECT id FROM objects WHERE size IS NULL'):
+ self.found_errors = True
+ self.log_error("Object %d has no size information, retrieving from backend...", obj_id)
+
+ self.conn.execute('UPDATE objects SET size=? WHERE id=?',
+ (self.bucket.get_size('s3ql_data_%d' % obj_id), obj_id))
+
+
def resolve_free(self, path, name):
'''Return parent inode and name of an unused directory entry
@@ -696,3 +931,341 @@ class Fsck(object):
pass
return (inode_p, newname)
+
+ def _add_name(self, name):
+ '''Get id for *name* and increase refcount
+
+ Name is inserted in table if it does not yet exist.
+ '''
+
+ try:
+ name_id = self.conn.get_val('SELECT id FROM names WHERE name=?', (name,))
+ except NoSuchRowError:
+ name_id = self.conn.rowid('INSERT INTO names (name, refcount) VALUES(?,?)',
+ (name, 1))
+ else:
+ self.conn.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,))
+ return name_id
+
+ def _del_name(self, name_id):
+ '''Decrease refcount for name_id, remove if it reaches 0'''
+
+ self.conn.execute('UPDATE names SET refcount=refcount-1 WHERE id=?', (name_id,))
+ self.conn.execute('DELETE FROM names WHERE refcount=0 AND id=?', (name_id,))
+
+
+class ROFsck(Fsck):
+ '''
+ Check file system database only, and don't correct any errors.
+ '''
+
+ def __init__(self, path):
+
+ db = Connection(path + '.db')
+ db.execute('PRAGMA journal_mode = WAL')
+
+ param = pickle.load(open(path + '.params', 'rb'))
+ super(ROFsck, self).__init__(None, None, param, db)
+
+ def check(self):
+
+ self.conn.execute('BEGIN TRANSACTION')
+ try:
+ log.info('Creating temporary indices...')
+ for idx in ('tmp1', 'tmp2', 'tmp3', 'tmp4', 'tmp5'):
+ self.conn.execute('DROP INDEX IF EXISTS %s' % idx)
+ self.conn.execute('CREATE INDEX tmp1 ON blocks(obj_id)')
+ self.conn.execute('CREATE INDEX tmp2 ON inode_blocks(block_id)')
+ self.conn.execute('CREATE INDEX tmp3 ON contents(inode)')
+ self.conn.execute('CREATE INDEX tmp4 ON contents(name_id)')
+ self.conn.execute('CREATE INDEX tmp5 ON ext_attributes(name_id)')
+
+ self.check_lof()
+ self.check_names_refcount()
+
+ self.check_contents_name()
+ self.check_contents_inode()
+ self.check_contents_parent_inode()
+
+ self.check_objects_refcount()
+ self.check_objects_size()
+
+ self.check_blocks_obj_id()
+ self.check_blocks_refcount()
+
+ self.check_inode_blocks_block_id()
+ self.check_inode_blocks_inode()
+
+ self.check_inodes_refcount()
+ self.check_inodes_size()
+
+ self.check_ext_attributes_name()
+ self.check_ext_attributes_inode()
+
+ self.check_symlinks_inode()
+
+ self.check_loops()
+ self.check_unix()
+ self.check_foreign_keys()
+
+ finally:
+ log.info('Dropping temporary indices...')
+ self.conn.execute('ROLLBACK')
+
+ def check_objects_size(self):
+ """Check objects.size"""
+
+ log.info('Checking objects (sizes)...')
+
+ for (obj_id,) in self.conn.query('SELECT id FROM objects WHERE size IS NULL'):
+ self.found_errors = True
+ self.log_error("Object %d has no size information, setting to zero...", obj_id)
+
+ self.conn.execute('UPDATE objects SET size=? WHERE id=?', (0, obj_id))
+
+
+def parse_args(args):
+
+ parser = ArgumentParser(
+ description="Checks and repairs an S3QL filesystem.")
+
+ parser.add_log('~/.s3ql/fsck.log')
+ parser.add_cachedir()
+ parser.add_authfile()
+ parser.add_debug_modules()
+ parser.add_quiet()
+ parser.add_version()
+ parser.add_storage_url()
+
+ parser.add_argument("--batch", action="store_true", default=False,
+ help="If user input is required, exit without prompting.")
+ parser.add_argument("--force", action="store_true", default=False,
+ help="Force checking even if file system is marked clean.")
+ options = parser.parse_args(args)
+
+ return options
+
+def main(args=None):
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ # Check if fs is mounted on this computer
+ # This is not foolproof but should prevent common mistakes
+ match = options.storage_url + ' /'
+ with open('/proc/mounts', 'r') as fh:
+ for line in fh:
+ if line.startswith(match):
+ raise QuietError('Can not check mounted file system.')
+
+ try:
+ bucket = get_bucket(options)
+ except NoSuchBucket as exc:
+ raise QuietError(str(exc))
+
+ cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)
+ seq_no = get_seq_no(bucket)
+ param_remote = bucket.lookup('s3ql_metadata')
+ db = None
+
+ if os.path.exists(cachepath + '.params'):
+ assert os.path.exists(cachepath + '.db')
+ param = pickle.load(open(cachepath + '.params', 'rb'))
+ if param['seq_no'] < seq_no:
+ log.info('Ignoring locally cached metadata (outdated).')
+ param = bucket.lookup('s3ql_metadata')
+ else:
+ log.info('Using cached metadata.')
+ db = Connection(cachepath + '.db')
+ assert not os.path.exists(cachepath + '-cache') or param['needs_fsck']
+
+ if param_remote['seq_no'] != param['seq_no']:
+ log.warn('Remote metadata is outdated.')
+ param['needs_fsck'] = True
+
+ else:
+ param = param_remote
+ assert not os.path.exists(cachepath + '-cache')
+ # .db might exist if mount.s3ql is killed at exactly the right instant
+ # and should just be ignored.
+
+ # Check revision
+ if param['revision'] < CURRENT_FS_REV:
+ raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
+ elif param['revision'] > CURRENT_FS_REV:
+ raise QuietError('File system revision too new, please update your '
+ 'S3QL installation.')
+
+ if param['seq_no'] < seq_no:
+ if bucket.is_get_consistent():
+ print(textwrap.fill(textwrap.dedent('''\
+ Up to date metadata is not available. Probably the file system has not
+ been properly unmounted and you should try to run fsck on the computer
+ where the file system has been mounted most recently.
+ ''')))
+ else:
+ print(textwrap.fill(textwrap.dedent('''\
+ Up to date metadata is not available. Either the file system has not
+ been unmounted cleanly or the data has not yet propagated through the backend.
+ In the later case, waiting for a while should fix the problem, in
+ the former case you should try to run fsck on the computer where
+ the file system has been mounted most recently
+ ''')))
+
+ print('Enter "continue" to use the outdated data anyway:',
+ '> ', sep='\n', end='')
+ if options.batch:
+ raise QuietError('(in batch mode, exiting)')
+ if sys.stdin.readline().strip() != 'continue':
+ raise QuietError()
+
+ param['seq_no'] = seq_no
+ param['needs_fsck'] = True
+
+
+ if (not param['needs_fsck']
+ and param['max_inode'] < 2**31
+ and ((time.time() - time.timezone) - param['last_fsck'])
+ < 60 * 60 * 24 * 31): # last check more than 1 month ago
+ if options.force:
+ log.info('File system seems clean, checking anyway.')
+ else:
+ log.info('File system is marked as clean. Use --force to force checking.')
+ return
+
+ # If using local metadata, check consistency
+ if db:
+ log.info('Checking DB integrity...')
+ try:
+ # get_list may raise CorruptError itself
+ res = db.get_list('PRAGMA integrity_check(20)')
+ if res[0][0] != u'ok':
+ log.error('\n'.join(x[0] for x in res ))
+ raise apsw.CorruptError()
+ except apsw.CorruptError:
+ raise QuietError('Local metadata is corrupted. Remove or repair the following '
+ 'files manually and re-run fsck:\n'
+ + cachepath + '.db (corrupted)\n'
+ + cachepath + '.param (intact)')
+ else:
+ def do_read(fh):
+ tmpfh = tempfile.TemporaryFile()
+ stream_read_bz2(fh, tmpfh)
+ return tmpfh
+ log.info('Downloading and decompressing metadata...')
+ tmpfh = bucket.perform_read(do_read, "s3ql_metadata")
+ os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR))
+ db = Connection(cachepath + '.db.tmp', fast_mode=True)
+ log.info("Reading metadata...")
+ tmpfh.seek(0)
+ restore_metadata(tmpfh, db)
+ db.close()
+ os.rename(cachepath + '.db.tmp', cachepath + '.db')
+ db = Connection(cachepath + '.db')
+
+ # Increase metadata sequence no
+ param['seq_no'] += 1
+ param['needs_fsck'] = True
+ bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
+ pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
+
+ fsck = Fsck(cachepath + '-cache', bucket, param, db)
+ fsck.check()
+ param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes')
+
+ if fsck.uncorrectable_errors:
+ raise QuietError("Uncorrectable errors found, aborting.")
+
+ if os.path.exists(cachepath + '-cache'):
+ os.rmdir(cachepath + '-cache')
+
+ if param['max_inode'] >= 2**31:
+ renumber_inodes(db)
+ param['inode_gen'] += 1
+ param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes')
+
+ cycle_metadata(bucket)
+ param['needs_fsck'] = False
+ param['last_fsck'] = time.time() - time.timezone
+ param['last-modified'] = time.time() - time.timezone
+
+ log.info('Dumping metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(db, fh)
+ def do_write(obj_fh):
+ fh.seek(0)
+ stream_write_bz2(fh, obj_fh)
+ return obj_fh
+
+ log.info("Compressing and uploading metadata...")
+ obj_fh = bucket.perform_write(do_write, "s3ql_metadata", metadata=param,
+ is_compressed=True)
+ log.info('Wrote %.2f MB of compressed metadata.', obj_fh.get_obj_size() / 1024**2)
+ pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
+
+ db.execute('ANALYZE')
+ db.execute('VACUUM')
+ db.close()
+
+def renumber_inodes(db):
+ '''Renumber inodes'''
+
+ log.info('Renumbering inodes...')
+ for table in ('inodes', 'inode_blocks', 'symlink_targets',
+ 'contents', 'names', 'blocks', 'objects', 'ext_attributes'):
+ db.execute('ALTER TABLE %s RENAME TO %s_old' % (table, table))
+
+ for table in ('contents_v', 'ext_attributes_v'):
+ db.execute('DROP VIEW %s' % table)
+
+ create_tables(db)
+ for table in ('names', 'blocks', 'objects'):
+ db.execute('DROP TABLE %s' % table)
+ db.execute('ALTER TABLE %s_old RENAME TO %s' % (table, table))
+
+ log.info('..mapping..')
+ db.execute('CREATE TEMPORARY TABLE inode_map (rowid INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER UNIQUE)')
+ db.execute('INSERT INTO inode_map (rowid, id) VALUES(?,?)', (ROOT_INODE, ROOT_INODE))
+ db.execute('INSERT INTO inode_map (rowid, id) VALUES(?,?)', (CTRL_INODE, CTRL_INODE))
+ db.execute('INSERT INTO inode_map (id) SELECT id FROM inodes_old WHERE id > ? ORDER BY id ASC',
+ (CTRL_INODE,))
+
+ log.info('..inodes..')
+ db.execute('INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size,locked,rdev) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = inodes_old.id), '
+ ' mode,uid,gid,mtime,atime,ctime,refcount,size,locked,rdev FROM inodes_old')
+
+ log.info('..inode_blocks..')
+ db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = inode_blocks_old.inode), '
+ ' blockno, block_id FROM inode_blocks_old')
+
+ log.info('..contents..')
+ db.execute('INSERT INTO contents (inode, parent_inode, name_id) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = contents_old.inode), '
+ ' (SELECT rowid FROM inode_map WHERE inode_map.id = contents_old.parent_inode), '
+ ' name_id FROM contents_old')
+
+ log.info('..symlink_targets..')
+ db.execute('INSERT INTO symlink_targets (inode, target) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = symlink_targets_old.inode), '
+ ' target FROM symlink_targets_old')
+
+ log.info('..ext_attributes..')
+ db.execute('INSERT INTO ext_attributes (inode, name_id, value) '
+ 'SELECT (SELECT rowid FROM inode_map WHERE inode_map.id = ext_attributes_old.inode), '
+ ' name_id, value FROM ext_attributes_old')
+
+ for table in ('inodes', 'inode_blocks', 'symlink_targets',
+ 'contents', 'ext_attributes'):
+ db.execute('DROP TABLE %s_old' % table)
+
+ db.execute('DROP TABLE inode_map')
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/src/s3ql/inode_cache.py b/src/s3ql/inode_cache.py
index d3ec5f0..0d856e8 100644
--- a/src/s3ql/inode_cache.py
+++ b/src/s3ql/inode_cache.py
@@ -10,12 +10,8 @@ from __future__ import division, print_function, absolute_import
import time
import logging
-from random import randint
-import apsw
from .database import NoSuchRowError
-from s3ql.common import CTRL_INODE
-__all__ = [ 'InodeCache', 'OutOfInodesError' ]
log = logging.getLogger('inode_cache')
CACHE_SIZE = 100
@@ -28,16 +24,16 @@ UPDATE_STR = ', '.join('%s=?' % x for x in UPDATE_ATTRS)
TIMEZONE = time.timezone
MAX_INODE = 2**32 - 1
-MIN_INODE = CTRL_INODE+1
class _Inode(object):
'''An inode with its attributes'''
- __slots__ = ATTRIBUTES + ('dirty',)
+ __slots__ = ATTRIBUTES + ('dirty', 'generation')
- def __init__(self):
+ def __init__(self, generation):
super(_Inode, self).__init__()
self.dirty = False
+ self.generation = generation
# This allows access to all st_* attributes, even if they're
# not defined in the table
@@ -46,7 +42,7 @@ class _Inode(object):
return self.refcount
elif key == 'st_blocks':
- return self.size // 512
+ return (self.size+511) // 512
elif key == 'st_ino':
return self.id
@@ -61,10 +57,6 @@ class _Inode(object):
elif key == 'st_blksize':
return 128 * 1024
- # Our inodes are already unique
- elif key == 'generation':
- return 1
-
elif key.startswith('st_'):
return getattr(self, key[3:])
@@ -80,7 +72,7 @@ class _Inode(object):
def copy(self):
- copy = _Inode()
+ copy = _Inode(self.generation)
for attr in ATTRIBUTES:
setattr(copy, attr, getattr(self, attr))
@@ -130,10 +122,11 @@ class InodeCache(object):
to the effects of the current method call.
'''
- def __init__(self, db):
+ def __init__(self, db, inode_gen):
self.attrs = dict()
self.cached_rows = list()
self.db = db
+ self.generation = inode_gen
# Fill the cache with dummy data, so that we don't have to
# check if the cache is full or not (it will always be full)
@@ -175,10 +168,10 @@ class InodeCache(object):
self.attrs[id_] = inode
return inode
- def getattr(self, id_):
+ def getattr(self, id_): #@ReservedAssignment
attrs = self.db.get_row("SELECT %s FROM inodes WHERE id=? " % ATTRIBUTE_STR,
(id_,))
- inode = _Inode()
+ inode = _Inode(self.generation)
for (i, id_) in enumerate(ATTRIBUTES):
setattr(inode, id_, attrs[i])
@@ -203,19 +196,10 @@ class InodeCache(object):
columns = ', '.join(x for x in ATTRIBUTES if x in kw)
values = ', '.join('?' * len(kw))
- # We want to restrict inodes to 32bit, and we do not want to
- # immediately reuse deleted inodes (so that the lack of generation
- # numbers isn't too likely to cause problems with NFS)
- sql = 'INSERT INTO inodes (id, %s) VALUES(?, %s)' % (columns, values)
- for _ in range(100):
- id_ = randint(MIN_INODE, MAX_INODE)
- try:
- self.db.execute(sql, (id_,) + bindings)
- except apsw.ConstraintError:
- pass
- else:
- break
- else:
+ id_ = self.db.rowid('INSERT INTO inodes (%s) VALUES(%s)' % (columns, values),
+ bindings)
+ if id_ > MAX_INODE-1:
+ self.db.execute('DELETE FROM inodes WHERE id=?', (id_,))
raise OutOfInodesError()
return self[id_]
diff --git a/src/s3ql/cli/lock.py b/src/s3ql/lock.py
index 683d458..e02f7eb 100644
--- a/src/s3ql/cli/lock.py
+++ b/src/s3ql/lock.py
@@ -7,15 +7,14 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-
+from .common import setup_logging, CTRL_NAME, QuietError
+from .parse_args import ArgumentParser
+import cPickle as pickle
import llfuse
-import os
import logging
-from s3ql.common import (setup_logging, CTRL_NAME, QuietError)
-from s3ql.parse_args import ArgumentParser
-import cPickle as pickle
-import textwrap
+import os
import sys
+import textwrap
log = logging.getLogger("lock")
diff --git a/src/s3ql/metadata.py b/src/s3ql/metadata.py
new file mode 100644
index 0000000..7424e44
--- /dev/null
+++ b/src/s3ql/metadata.py
@@ -0,0 +1,199 @@
+'''
+metadata.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from .deltadump import (INTEGER, BLOB, TIME, dump_table, load_table)
+import logging
+
+log = logging.getLogger('metadata')
+
+# Has to be kept in sync with create_tables()!
+DUMP_SPEC = [
+ ('objects', 'id', (('id', INTEGER, 1),
+ ('size', INTEGER),
+ ('refcount', INTEGER))),
+
+ ('blocks', 'id', (('id', INTEGER, 1),
+ ('hash', BLOB, 32),
+ ('size', INTEGER),
+ ('obj_id', INTEGER, 1),
+ ('refcount', INTEGER))),
+
+ ('inodes', 'id', (('id', INTEGER, 1),
+ ('uid', INTEGER),
+ ('gid', INTEGER),
+ ('mode', INTEGER),
+ ('mtime', TIME),
+ ('atime', TIME),
+ ('ctime', TIME),
+ ('size', INTEGER),
+ ('rdev', INTEGER),
+ ('locked', INTEGER),
+ ('refcount', INTEGER))),
+
+ ('inode_blocks', 'inode, blockno',
+ (('inode', INTEGER),
+ ('blockno', INTEGER, 1),
+ ('block_id', INTEGER, 1))),
+
+ ('symlink_targets', 'inode', (('inode', INTEGER, 1),
+ ('target', BLOB))),
+
+ ('names', 'id', (('id', INTEGER, 1),
+ ('name', BLOB),
+ ('refcount', INTEGER))),
+
+ ('contents', 'parent_inode, name_id',
+ (('name_id', INTEGER, 1),
+ ('inode', INTEGER, 1),
+ ('parent_inode', INTEGER))),
+
+ ('ext_attributes', 'inode', (('inode', INTEGER),
+ ('name_id', INTEGER),
+ ('value', BLOB))),
+]
+
+
+
+def restore_metadata(fh, db):
+ '''Read metadata from *fh* and write into *db*
+
+ *fh* must be able to return an actual file descriptor from
+ its `fileno` method.
+ '''
+
+ create_tables(db)
+ for (table, _, columns) in DUMP_SPEC:
+ log.info('..%s..', table)
+ load_table(table, columns, db=db, fh=fh)
+ db.execute('ANALYZE')
+
+def cycle_metadata(bucket):
+ from .backends.common import NoSuchObject
+
+ log.info('Backing up old metadata...')
+ for i in reversed(range(10)):
+ try:
+ bucket.copy("s3ql_metadata_bak_%d" % i, "s3ql_metadata_bak_%d" % (i + 1))
+ except NoSuchObject:
+ pass
+
+ bucket.copy("s3ql_metadata", "s3ql_metadata_bak_0")
+
+def dump_metadata(db, fh):
+ '''Dump metadata into fh
+
+ *fh* must be able to return an actual file descriptor from
+ its `fileno` method.
+ '''
+
+ for (table, order, columns) in DUMP_SPEC:
+ log.info('..%s..', table)
+ dump_table(table, order, columns, db=db, fh=fh)
+
+def create_tables(conn):
+ # Table of storage objects
+ # Refcount is included for performance reasons
+ conn.execute("""
+ CREATE TABLE objects (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ refcount INT NOT NULL,
+ size INT
+ )""")
+
+ # Table of known data blocks
+ # Refcount is included for performance reasons
+ conn.execute("""
+ CREATE TABLE blocks (
+ id INTEGER PRIMARY KEY,
+ hash BLOB(16) UNIQUE,
+ refcount INT,
+ size INT NOT NULL,
+ obj_id INTEGER NOT NULL REFERENCES objects(id)
+ )""")
+
+ # Table with filesystem metadata
+ # The number of links `refcount` to an inode can in theory
+ # be determined from the `contents` table. However, managing
+ # this separately should be significantly faster (the information
+ # is required for every getattr!)
+ conn.execute("""
+ CREATE TABLE inodes (
+ -- id has to specified *exactly* as follows to become
+ -- an alias for the rowid.
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ uid INT NOT NULL,
+ gid INT NOT NULL,
+ mode INT NOT NULL,
+ mtime REAL NOT NULL,
+ atime REAL NOT NULL,
+ ctime REAL NOT NULL,
+ refcount INT NOT NULL,
+ size INT NOT NULL DEFAULT 0,
+ rdev INT NOT NULL DEFAULT 0,
+ locked BOOLEAN NOT NULL DEFAULT 0
+ )""")
+
+ # Further Blocks used by inode (blockno >= 1)
+ conn.execute("""
+ CREATE TABLE inode_blocks (
+ inode INTEGER NOT NULL REFERENCES inodes(id),
+ blockno INT NOT NULL,
+ block_id INTEGER NOT NULL REFERENCES blocks(id),
+ PRIMARY KEY (inode, blockno)
+ )""")
+
+ # Symlinks
+ conn.execute("""
+ CREATE TABLE symlink_targets (
+ inode INTEGER PRIMARY KEY REFERENCES inodes(id),
+ target BLOB NOT NULL
+ )""")
+
+ # Names of file system objects
+ conn.execute("""
+ CREATE TABLE names (
+ id INTEGER PRIMARY KEY,
+ name BLOB NOT NULL,
+ refcount INT NOT NULL,
+ UNIQUE (name)
+ )""")
+
+ # Table of filesystem objects
+ # rowid is used by readdir() to restart at the correct position
+ conn.execute("""
+ CREATE TABLE contents (
+ rowid INTEGER PRIMARY KEY AUTOINCREMENT,
+ name_id INT NOT NULL REFERENCES names(id),
+ inode INT NOT NULL REFERENCES inodes(id),
+ parent_inode INT NOT NULL REFERENCES inodes(id),
+
+ UNIQUE (parent_inode, name_id)
+ )""")
+
+ # Extended attributes
+ conn.execute("""
+ CREATE TABLE ext_attributes (
+ inode INTEGER NOT NULL REFERENCES inodes(id),
+ name_id INTEGER NOT NULL REFERENCES names(id),
+ value BLOB NOT NULL,
+
+ PRIMARY KEY (inode, name_id)
+ )""")
+
+ # Shortcuts
+ conn.execute("""
+ CREATE VIEW contents_v AS
+ SELECT * FROM contents JOIN names ON names.id = name_id
+ """)
+ conn.execute("""
+ CREATE VIEW ext_attributes_v AS
+ SELECT * FROM ext_attributes JOIN names ON names.id = name_id
+ """)
+ \ No newline at end of file
diff --git a/src/s3ql/cli/mkfs.py b/src/s3ql/mkfs.py
index 80abb5e..51afb26 100644
--- a/src/s3ql/cli/mkfs.py
+++ b/src/s3ql/mkfs.py
@@ -7,21 +7,25 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
+from . import CURRENT_FS_REV
+from .backends.common import get_bucket, BetterBucket
+from .common import (get_bucket_cachedir, setup_logging, QuietError,
+ stream_write_bz2, CTRL_INODE)
+from .database import Connection
+from .metadata import dump_metadata, create_tables
+from .parse_args import ArgumentParser
from getpass import getpass
-from s3ql import CURRENT_FS_REV
-from s3ql.backends.common import get_bucket, BetterBucket
-from s3ql.common import (get_bucket_cachedir, setup_logging, QuietError,
- dump_metadata, create_tables, init_tables)
-from s3ql.database import Connection
-from s3ql.parse_args import ArgumentParser
+from llfuse import ROOT_INODE
+from s3ql.backends.common import NoSuchBucket
import cPickle as pickle
import logging
import os
import shutil
+import stat
import sys
+import tempfile
import time
-
log = logging.getLogger("mkfs")
def parse_args(args):
@@ -49,6 +53,31 @@ def parse_args(args):
return options
+def init_tables(conn):
+ # Insert root directory
+ timestamp = time.time() - time.timezone
+ conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (ROOT_INODE, stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
+ os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
+
+ # Insert control inode, the actual values don't matter that much
+ conn.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (CTRL_INODE, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, timestamp, timestamp, timestamp, 42))
+
+ # Insert lost+found directory
+ inode = conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,
+ os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
+ name_id = conn.rowid('INSERT INTO names (name, refcount) VALUES(?,?)',
+ (b'lost+found', 1))
+ conn.execute("INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)",
+ (name_id, inode, ROOT_INODE))
+
def main(args=None):
if args is None:
@@ -57,7 +86,10 @@ def main(args=None):
options = parse_args(args)
setup_logging(options)
- plain_bucket = get_bucket(options, plain=True)
+ try:
+ plain_bucket = get_bucket(options, plain=True)
+ except NoSuchBucket as exc:
+ raise QuietError(str(exc))
if 's3ql_metadata' in plain_bucket:
if not options.force:
@@ -111,6 +143,8 @@ def main(args=None):
param['label'] = options.label
param['blocksize'] = options.blocksize * 1024
param['needs_fsck'] = False
+ param['inode_gen'] = 0
+ param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes')
param['last_fsck'] = time.time() - time.timezone
param['last-modified'] = time.time() - time.timezone
@@ -118,9 +152,19 @@ def main(args=None):
# in BetterBucket is not required for this file system.
param['bucket_revision'] = 1
+ log.info('Dumping metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(db, fh)
+ def do_write(obj_fh):
+ fh.seek(0)
+ stream_write_bz2(fh, obj_fh)
+ return obj_fh
+
+ log.info("Compressing and uploading metadata...")
bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')
- bucket.perform_write(lambda fh: dump_metadata(fh, db) , "s3ql_metadata",
- metadata=param, is_compressed=True)
+ obj_fh = bucket.perform_write(do_write, "s3ql_metadata", metadata=param,
+ is_compressed=True)
+ log.info('Wrote %.2f MB of compressed metadata.', obj_fh.get_obj_size() / 1024**2)
pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
diff --git a/src/s3ql/cli/mount.py b/src/s3ql/mount.py
index 2c3fd48..3f9084e 100644
--- a/src/s3ql/cli/mount.py
+++ b/src/s3ql/mount.py
@@ -7,21 +7,22 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-from s3ql import fs, CURRENT_FS_REV
-from s3ql.backends.common import get_bucket_factory, BucketPool
-from s3ql.block_cache import BlockCache
-from s3ql.common import (setup_logging, get_bucket_cachedir, get_seq_no,
- QuietError, cycle_metadata, dump_metadata, BUFSIZE,
- restore_metadata)
-from s3ql.daemonize import daemonize
-from s3ql.database import Connection
-from s3ql.parse_args import ArgumentParser
+from . import fs, CURRENT_FS_REV
+from .backends.common import get_bucket_factory, BucketPool
+from .block_cache import BlockCache
+from .common import (setup_logging, get_bucket_cachedir, get_seq_no, QuietError,
+ stream_write_bz2, stream_read_bz2)
+from .daemonize import daemonize
+from .database import Connection
+from .inode_cache import InodeCache
+from .metadata import cycle_metadata, dump_metadata, restore_metadata
+from .parse_args import ArgumentParser
+from s3ql.backends.common import NoSuchBucket
from threading import Thread
import cPickle as pickle
import llfuse
import logging
import os
-import shutil
import signal
import stat
import sys
@@ -88,9 +89,12 @@ def main(args=None):
cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)
# Retrieve metadata
- with bucket_pool() as bucket:
- (param, db) = get_metadata(bucket, cachepath)
-
+ try:
+ with bucket_pool() as bucket:
+ (param, db) = get_metadata(bucket, cachepath)
+ except NoSuchBucket as exc:
+ raise QuietError(str(exc))
+
if options.nfs:
# NFS may try to look up '..', so we have to speed up this kind of query
log.info('Creating NFS indices...')
@@ -105,6 +109,7 @@ def main(args=None):
options.cachesize * 1024, options.max_cache_entries)
commit_thread = CommitThread(block_cache)
operations = fs.Operations(block_cache, db, blocksize=param['blocksize'],
+ inode_cache=InodeCache(db, param['inode_gen']),
upload_event=metadata_upload_thread.event)
log.info('Mounting filesystem...')
@@ -193,7 +198,7 @@ def main(args=None):
# Do not update .params yet, dump_metadata() may fail if the database is
# corrupted, in which case we want to force an fsck.
-
+ param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes')
with bucket_pool() as bucket:
seq_no = get_seq_no(bucket)
if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime:
@@ -204,8 +209,19 @@ def main(args=None):
elif seq_no == param['seq_no']:
cycle_metadata(bucket)
param['last-modified'] = time.time() - time.timezone
- bucket.perform_write(lambda fh: dump_metadata(fh, db) , "s3ql_metadata",
- metadata=param, is_compressed=True)
+
+ log.info('Dumping metadata...')
+ fh = tempfile.TemporaryFile()
+ dump_metadata(db, fh)
+ def do_write(obj_fh):
+ fh.seek(0)
+ stream_write_bz2(fh, obj_fh)
+ return obj_fh
+
+ log.info("Compressing and uploading metadata...")
+ obj_fh = bucket.perform_write(do_write, "s3ql_metadata", metadata=param,
+ is_compressed=True)
+ log.info('Wrote %.2f MB of compressed metadata.', obj_fh.get_obj_size() / 1024**2)
pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
else:
log.error('Remote metadata is newer than local (%d vs %d), '
@@ -262,14 +278,7 @@ def determine_threads(options):
def get_metadata(bucket, cachepath):
- '''Retrieve metadata
-
- Checks:
- - Revision
- - Unclean mounts
-
- Locally cached metadata is used if up-to-date.
- '''
+ '''Retrieve metadata'''
seq_no = get_seq_no(bucket)
@@ -317,22 +326,29 @@ def get_metadata(bucket, cachepath):
log.warn('Last file system check was more than 1 month ago, '
'running fsck.s3ql is recommended.')
+ if param['max_inode'] > 2**32 - 50000:
+ raise QuietError('Insufficient free inodes, fsck run required.')
+ elif param['max_inode'] > 2**31:
+ log.warn('Few free inodes remaining, running fsck is recommended')
+
# Download metadata
if not db:
def do_read(fh):
- os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
- stat.S_IRUSR | stat.S_IWUSR))
- db = Connection(cachepath + '.db.tmp', fast_mode=True)
- try:
- restore_metadata(fh, db)
- finally:
- # If metata reading has to be retried, we don't want to hold
- # a lock on the database.
- db.close()
- bucket.perform_read(do_read, "s3ql_metadata")
+ tmpfh = tempfile.TemporaryFile()
+ stream_read_bz2(fh, tmpfh)
+ return tmpfh
+ log.info('Downloading and decompressing metadata...')
+ tmpfh = bucket.perform_read(do_read, "s3ql_metadata")
+ os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
+ stat.S_IRUSR | stat.S_IWUSR))
+ db = Connection(cachepath + '.db.tmp', fast_mode=True)
+ log.info("Reading metadata...")
+ tmpfh.seek(0)
+ restore_metadata(tmpfh, db)
+ db.close()
os.rename(cachepath + '.db.tmp', cachepath + '.db')
db = Connection(cachepath + '.db')
-
+
# Increase metadata sequence no
param['seq_no'] += 1
param['needs_fsck'] = True
@@ -506,11 +522,10 @@ class MetadataUploadThread(Thread):
log.info('File system unchanged, not uploading metadata.')
continue
- # We dump to a file first, so that we don't hold the
- # lock for quite so long.
- log.info('Saving metadata...')
+ log.info('Dumping metadata...')
fh = tempfile.TemporaryFile()
- dump_metadata(fh, self.db)
+ dump_metadata(self.db, fh)
+
with self.bucket_pool() as bucket:
seq_no = get_seq_no(bucket)
@@ -528,9 +543,12 @@ class MetadataUploadThread(Thread):
self.param['seq_no'] -= 1
def do_write(obj_fh):
fh.seek(0)
- shutil.copyfileobj(fh, obj_fh, BUFSIZE)
- bucket.perform_write(do_write, "s3ql_metadata", metadata=self.param,
- is_compressed=True)
+ stream_write_bz2(fh, obj_fh)
+ return obj_fh
+ log.info("Compressing and uploading metadata...")
+ obj_fh = bucket.perform_write(do_write, "s3ql_metadata", metadata=self.param,
+ is_compressed=True)
+ log.info('Wrote %.2f MB of compressed metadata.', obj_fh.get_obj_size() / 1024**2)
self.param['seq_no'] += 1
fh.close()
diff --git a/src/s3ql/ordered_dict.py b/src/s3ql/ordered_dict.py
index a6eb534..47cf5b9 100644
--- a/src/s3ql/ordered_dict.py
+++ b/src/s3ql/ordered_dict.py
@@ -10,9 +10,6 @@ from __future__ import division, print_function
import collections
-
-__all__ = [ "OrderedDict" ]
-
class OrderedDictElement(object):
"""An element in an OrderedDict
diff --git a/src/s3ql/parse_args.py b/src/s3ql/parse_args.py
index 21bdc7a..320cf38 100644
--- a/src/s3ql/parse_args.py
+++ b/src/s3ql/parse_args.py
@@ -36,15 +36,13 @@ are:
#pylint: disable-all
from __future__ import division, print_function, absolute_import
-
-import s3ql
+from . import VERSION
import argparse
-import re
-import os
import logging.handlers
+import os
+import re
import sys
-__all__ = [ 'ArgumentParser', 'DEFAULT_USAGE']
DEFAULT_USAGE = object()
@@ -137,7 +135,7 @@ class ArgumentParser(argparse.ArgumentParser):
def add_version(self):
self.add_argument('--version', action='version',
help="just print program version and exit",
- version='S3QL %s' % s3ql.VERSION)
+ version='S3QL %s' % VERSION)
def add_quiet(self):
self.add_argument("--quiet", action="store_true", default=False,
diff --git a/src/s3ql/cli/remove.py b/src/s3ql/remove.py
index d70017b..3a775c5 100644
--- a/src/s3ql/cli/remove.py
+++ b/src/s3ql/remove.py
@@ -7,15 +7,14 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-
+from .common import setup_logging, CTRL_NAME, QuietError
+from .parse_args import ArgumentParser
+import cPickle as pickle
import llfuse
-import os
import logging
-from s3ql.common import (setup_logging, CTRL_NAME, QuietError)
-from s3ql.parse_args import ArgumentParser
-import cPickle as pickle
-import textwrap
+import os
import sys
+import textwrap
log = logging.getLogger("remove")
diff --git a/src/s3ql/cli/statfs.py b/src/s3ql/statfs.py
index ad55b0c..c83dd6f 100644
--- a/src/s3ql/cli/statfs.py
+++ b/src/s3ql/statfs.py
@@ -7,16 +7,16 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-
+from .common import CTRL_NAME, QuietError, setup_logging
+from .parse_args import ArgumentParser
import llfuse
-import os
import logging
-from s3ql.common import (CTRL_NAME, QuietError, setup_logging)
-from s3ql.parse_args import ArgumentParser
+import os
import posixpath
import struct
import sys
+
log = logging.getLogger("stat")
def parse_args(args):
diff --git a/src/s3ql/cli/umount.py b/src/s3ql/umount.py
index 57cc792..1a141a1 100644
--- a/src/s3ql/cli/umount.py
+++ b/src/s3ql/umount.py
@@ -7,17 +7,16 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-
+from .common import CTRL_NAME, QuietError, setup_logging
+from .parse_args import ArgumentParser
import llfuse
-import sys
-import os
import logging
-from s3ql.common import (CTRL_NAME, QuietError, setup_logging)
-from s3ql.parse_args import ArgumentParser
+import os
import posixpath
import subprocess
-import time
+import sys
import textwrap
+import time
log = logging.getLogger("umount")
diff --git a/tests/t1_dump.py b/tests/t1_dump.py
new file mode 100644
index 0000000..3fae6c2
--- /dev/null
+++ b/tests/t1_dump.py
@@ -0,0 +1,213 @@
+'''
+t1_dump.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (c) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+from __future__ import division, print_function, absolute_import
+
+from _common import TestCase
+import unittest2 as unittest
+from s3ql import deltadump
+import tempfile
+from s3ql.database import Connection
+import random
+import time
+
+class DumpTests(TestCase):
+ def setUp(self):
+ self.src = Connection(":memory:")
+ self.dst = Connection(":memory:")
+ self.fh = tempfile.TemporaryFile()
+
+ self.create_table(self.src)
+ self.create_table(self.dst)
+
+ def test_1_vals_1(self):
+ self.fill_vals(self.src)
+ dumpspec = (('id', deltadump.INTEGER, 0),)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_1_vals_2(self):
+ self.fill_vals(self.src)
+ dumpspec = (('id', deltadump.INTEGER, 1),)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_1_vals_3(self):
+ self.fill_vals(self.src)
+ dumpspec = (('id', deltadump.INTEGER, -1),)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_2_buf_auto(self):
+ self.fill_vals(self.src)
+ self.fill_buf(self.src)
+ dumpspec = (('id', deltadump.INTEGER),
+ ('buf', deltadump.BLOB))
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_2_buf_fixed(self):
+ BUFLEN=32
+ self.fill_vals(self.src)
+ self.fill_buf(self.src, BUFLEN)
+ dumpspec = (('id', deltadump.INTEGER),
+ ('buf', deltadump.BLOB, BUFLEN))
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_3_deltas_1(self):
+ self.fill_deltas(self.src)
+ dumpspec = (('id', deltadump.INTEGER, 0),)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_3_deltas_2(self):
+ self.fill_deltas(self.src)
+ dumpspec = (('id', deltadump.INTEGER, 1),)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_3_deltas_3(self):
+ self.fill_deltas(self.src)
+ dumpspec = (('id', deltadump.INTEGER, -1),)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+ def test_4_time(self):
+ self.fill_vals(self.src)
+
+ t1 = 0.5 * time.time()
+ t2 = 2 * time.time()
+ for (id_,) in self.src.query('SELECT id FROM test'):
+ val = random.uniform(t1, t2)
+ self.src.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))
+
+ dumpspec = (('id', deltadump.INTEGER),
+ ('buf', deltadump.TIME))
+
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+
+ self.compare_tables(self.src, self.dst)
+
+
+ def test_5_multi(self):
+ self.fill_vals(self.src)
+ dumpspec = (('id', deltadump.INTEGER, 0),)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ deltadump.dump_table(table='test', order='id', columns=dumpspec,
+ db=self.src, fh=self.fh)
+ self.fh.seek(0)
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.dst.execute('DELETE FROM test')
+ deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
+ fh=self.fh)
+ self.compare_tables(self.src, self.dst)
+
+
+ def compare_tables(self, db1, db2):
+ i1 = db1.query('SELECT id, buf FROM test ORDER BY id')
+ i2 = db2.query('SELECT id, buf FROM test ORDER BY id')
+
+ for (id1, buf1) in i1:
+ (id2, buf2) = i2.next()
+
+ self.assertEqual(id1, id2)
+ if isinstance(buf1, float):
+ self.assertAlmostEqual(buf1, buf2, places=9)
+ else:
+ self.assertEqual(buf1, buf2)
+
+ self.assertRaises(StopIteration, i2.next)
+
+ def fill_buf(self, db, len_=None):
+ rfh = open('/dev/urandom', 'rb')
+
+ first = True
+ for (id_,) in db.query('SELECT id FROM test'):
+ if len_ is None and first:
+ val = '' # We always want to check this case
+ first = False
+ elif len_ is None:
+ val = rfh.read(random.randint(0, 140))
+ else:
+ val = rfh.read(len_)
+
+ db.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))
+
+ def fill_vals(self, db):
+ vals = []
+ for exp in [7,8,9,15,16,17,31,32,33,62]:
+ vals += range(2**exp - 5, 2**exp+6)
+ vals += range(2**63 - 5, 2**63)
+ vals += [ -v for v in vals ]
+ vals.append(-(2**63))
+
+ vals = range(5)
+ for val in vals:
+ db.execute('INSERT INTO test (id) VALUES(?)', (val,))
+
+ def fill_deltas(self, db):
+ deltas = []
+ for exp in [7,8,9,15,16,17,31,32,33]:
+ deltas += range(2**exp - 5, 2**exp+6)
+ deltas += [ -v for v in deltas ]
+
+ last = 0
+ for delta in deltas:
+ val = last + delta
+ last = val
+ db.execute('INSERT INTO test (id) VALUES(?)', (val,))
+
+ def create_table(self, db):
+ db.execute('''CREATE TABLE test (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ buf BLOB)''')
+
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(DumpTests)
diff --git a/tests/t2_block_cache.py b/tests/t2_block_cache.py
index 79bfb85..daf3853 100644
--- a/tests/t2_block_cache.py
+++ b/tests/t2_block_cache.py
@@ -12,7 +12,8 @@ from contextlib import contextmanager
from s3ql.backends import local
from s3ql.backends.common import BucketPool, AbstractBucket
from s3ql.block_cache import BlockCache
-from s3ql.common import create_tables, init_tables
+from s3ql.mkfs import init_tables
+from s3ql.metadata import create_tables
from s3ql.database import Connection
import llfuse
import os
@@ -356,7 +357,11 @@ class TestBucketPool(AbstractBucket):
"""
return self.bucket.rename(src, dest)
-
+ def get_size(self, key):
+ '''Return size of object stored under *key*'''
+
+ return self.bucket.get_size(key)
+
def commit(cache, inode, block=None):
"""Upload data for `inode`
diff --git a/tests/t3_fs_api.py b/tests/t3_fs_api.py
index dd9ee46..db689c4 100644
--- a/tests/t3_fs_api.py
+++ b/tests/t3_fs_api.py
@@ -14,9 +14,12 @@ from s3ql import fs
from s3ql.backends import local
from s3ql.backends.common import BucketPool
from s3ql.block_cache import BlockCache
-from s3ql.common import ROOT_INODE, create_tables, init_tables
+from s3ql.common import ROOT_INODE
+from s3ql.mkfs import init_tables
+from s3ql.metadata import create_tables
from s3ql.database import Connection
from s3ql.fsck import Fsck
+from s3ql.inode_cache import InodeCache
import errno
import llfuse
import os
@@ -64,7 +67,8 @@ class fs_api_tests(TestCase):
self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir + "/cache",
self.blocksize * 5)
- self.server = fs.Operations(self.block_cache, self.db, self.blocksize)
+ self.server = fs.Operations(self.block_cache, self.db, self.blocksize,
+ InodeCache(self.db, 0))
self.server.init()
@@ -473,7 +477,28 @@ class fs_api_tests(TestCase):
self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!')
self.fsck()
+
+ def test_names(self):
+ name1 = self.newname()
+ name2 = self.newname()
+ (fh, _) = self.server.create(ROOT_INODE, name1, self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ (fh, inode) = self.server.create(ROOT_INODE, name2, self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.server.setxattr(inode.id, name1, 'strabumm!')
+ self.fsck()
+
+ self.server.removexattr(inode.id, name1)
+ self.fsck()
+
+ self.server.setxattr(inode.id, name1, 'strabumm karacho!!')
+ self.server.unlink(ROOT_INODE, name1)
+ self.fsck()
+
+
def test_statfs(self):
# Test with zero contents
self.server.statfs()
@@ -613,7 +638,9 @@ class fs_api_tests(TestCase):
self.fsck()
def test_copy_tree(self):
-
+ ext_attr_name = 'system.foo.brazl'
+ ext_attr_val = 'schulla dku woumm bramp'
+
src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx())
@@ -622,6 +649,7 @@ class fs_api_tests(TestCase):
self.file_mode(), Ctx())
self.server.write(fh, 0, 'file1 contents')
self.server.release(fh)
+ self.server.setxattr(f1_inode.id, ext_attr_name, ext_attr_val)
# Create hardlink
(fh, f2_inode) = self.server.create(src_inode.id, 'file2',
@@ -662,6 +690,8 @@ class fs_api_tests(TestCase):
self.assertEqual(self.server.read(fh, 0, 42), 'file1 contents')
self.server.release(fh)
self.assertNotEqual(f1_inode.id, f1_inode_c.id)
+ self.assertEqual(self.server.getxattr(f1_inode_c.id, ext_attr_name),
+ ext_attr_val)
# Check file2
fh = self.server.open(f2_inode_c.id, os.O_RDWR)
@@ -678,6 +708,21 @@ class fs_api_tests(TestCase):
self.fsck()
+ def test_copy_tree_2(self):
+ src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
+ dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx())
+
+ # Create file
+ (fh, _) = self.server.create(src_inode.id, 'file1',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'block 1 contents')
+ self.server.write(fh, self.blocksize, 'block 1 contents')
+ self.server.release(fh)
+
+ self.server.copy_tree(src_inode.id, dst_inode.id)
+
+ self.fsck()
+
def test_lock_tree(self):
inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
diff --git a/tests/t3_fsck.py b/tests/t3_fsck.py
index 2750f63..8fedb71 100644
--- a/tests/t3_fsck.py
+++ b/tests/t3_fsck.py
@@ -9,7 +9,9 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function
from _common import TestCase
from s3ql.backends import local
-from s3ql.common import ROOT_INODE, create_tables, init_tables
+from s3ql.common import ROOT_INODE
+from s3ql.mkfs import init_tables
+from s3ql.metadata import create_tables
from s3ql.database import Connection, NoSuchRowError
from s3ql.fsck import Fsck
import os
@@ -96,7 +98,7 @@ class fsck_tests(TestCase):
def check():
self.fsck.check_lof()
- self.fsck.check_inode_refcount()
+ self.fsck.check_inodes_refcount()
self.assert_fsck(check)
@@ -120,7 +122,7 @@ class fsck_tests(TestCase):
0, 0, time.time(), time.time(), time.time(), 1, 0))
self._link('name1', inode)
self._link('name2', inode)
- self.assert_fsck(self.fsck.check_inode_refcount)
+ self.assert_fsck(self.fsck.check_inodes_refcount)
def test_orphaned_inode(self):
@@ -128,7 +130,7 @@ class fsck_tests(TestCase):
"VALUES (?,?,?,?,?,?,?,?)",
(stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
0, 0, time.time(), time.time(), time.time(), 1, 0))
- self.assert_fsck(self.fsck.check_inode_refcount)
+ self.assert_fsck(self.fsck.check_inodes_refcount)
def test_name_refcount(self):
@@ -141,22 +143,42 @@ class fsck_tests(TestCase):
self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', ('name1',))
- self.assert_fsck(self.fsck.check_name_refcount)
+ self.assert_fsck(self.fsck.check_names_refcount)
def test_orphaned_name(self):
self._add_name('zupbrazl')
- self.assert_fsck(self.fsck.check_name_refcount)
+ self.assert_fsck(self.fsck.check_names_refcount)
- def test_ref_integrity(self):
-
+ def test_contents_inode(self):
+
self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
(self._add_name('foobar'), 124, ROOT_INODE))
- self.fsck.found_errors = False
- self.fsck.check_foreign_keys()
- self.assertTrue(self.fsck.found_errors)
-
+ self.assert_fsck(self.fsck.check_contents_inode)
+
+ def test_contents_inode_p(self):
+
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1, 0))
+ self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
+ (self._add_name('foobar'), inode, 123))
+
+ self.assert_fsck(self.fsck.check_contents_parent_inode)
+
+ def test_contents_name(self):
+
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1, 0))
+ self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
+ (42, inode, ROOT_INODE))
+
+ self.assert_fsck(self.fsck.check_contents_name)
+
def _add_name(self, name):
'''Get id for *name* and increase refcount
@@ -178,7 +200,7 @@ class fsck_tests(TestCase):
self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
(self._add_name(name), inode, ROOT_INODE))
- def test_inode_sizes(self):
+ def test_inodes_size(self):
id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
"VALUES (?,?,?,?,?,?,?,?)",
@@ -186,7 +208,7 @@ class fsck_tests(TestCase):
0, 0, time.time(), time.time(), time.time(), 1, 128))
self._link('test-entry', id_)
- obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
+ obj_id = self.db.rowid('INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36))
block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 512))
self.bucket['s3ql_data_%d' % obj_id] = 'foo'
@@ -195,14 +217,14 @@ class fsck_tests(TestCase):
self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.blocksize + 120, id_))
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
(id_, 1, block_id))
- self.assert_fsck(self.fsck.check_inode_sizes)
+ self.assert_fsck(self.fsck.check_inodes_size)
# Case 2
self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,))
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
(id_, 0, block_id))
self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_))
- self.assert_fsck(self.fsck.check_inode_sizes)
+ self.assert_fsck(self.fsck.check_inodes_size)
# Case 3
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
@@ -211,21 +233,36 @@ class fsck_tests(TestCase):
(self.blocksize + 120, id_))
self.db.execute('UPDATE blocks SET refcount = refcount + 1 WHERE id = ?',
(block_id,))
- self.assert_fsck(self.fsck.check_inode_sizes)
+ self.assert_fsck(self.fsck.check_inodes_size)
- def test_keylist(self):
+ def test_objects_id(self):
# Create an object that only exists in the bucket
self.bucket['s3ql_data_4364'] = 'Testdata'
- self.assert_fsck(self.fsck.check_keylist)
+ self.assert_fsck(self.fsck.check_objects_id)
# Create an object that does not exist in the bucket
- self.db.execute('INSERT INTO objects (id, refcount) VALUES(?, ?)', (34, 1))
- self.assert_fsck(self.fsck.check_keylist)
+ self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (34, 1, 27))
+ self.assert_fsck(self.fsck.check_objects_id)
+ def test_blocks_obj_id(self):
+
+ block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
+ (1, 48, 128))
+
+ id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1, 128))
+ self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
+ (id_, 0, block_id))
+
+ self._link('test-entry', id_)
+ self.assert_fsck(self.fsck.check_blocks_obj_id)
+
def test_missing_obj(self):
- obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)')
block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 128))
@@ -237,8 +274,61 @@ class fsck_tests(TestCase):
(id_, 0, block_id))
self._link('test-entry', id_)
- self.assert_fsck(self.fsck.check_keylist)
+ self.assert_fsck(self.fsck.check_objects_id)
+
+ def test_inode_blocks_inode(self):
+
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)')
+ self.bucket['s3ql_data_%d' % obj_id] = 'foo'
+
+ block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
+ (1, obj_id, 34))
+
+ self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
+ (27, 0, block_id))
+
+ self.assert_fsck(self.fsck.check_inode_blocks_inode)
+
+ def test_inode_blocks_block_id(self):
+
+ id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1, 128))
+ self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
+ (id_, 0, 35))
+
+ self._link('test-entry', id_)
+ self.assert_fsck(self.fsck.check_inode_blocks_block_id)
+
+ def test_symlinks_inode(self):
+
+ self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)',
+ (42, b'somewhere else'))
+
+ self.assert_fsck(self.fsck.check_symlinks_inode)
+
+ def test_ext_attrs_inode(self):
+
+ self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)',
+ (self._add_name('some name'), 34, b'some value'))
+
+ self.assert_fsck(self.fsck.check_ext_attributes_inode)
+
+ def test_ext_attrs_name(self):
+
+ id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1, 128))
+ self._link('test-entry', id_)
+
+ self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)',
+ (34, id_, b'some value'))
+
+ self.assert_fsck(self.fsck.check_ext_attributes_name)
+
@staticmethod
def random_data(len_):
with open("/dev/urandom", "rb") as fd:
@@ -260,16 +350,11 @@ class fsck_tests(TestCase):
(self._add_name(bytes(inode)), inode, last))
last = inode
- self.fsck.found_errors = False
- self.fsck.check_inode_refcount()
- self.assertFalse(self.fsck.found_errors)
- self.fsck.check_loops()
- self.assertTrue(self.fsck.found_errors)
- # We can't fix loops yet
+ self.assert_fsck(self.fsck.check_loops)
def test_obj_refcounts(self):
- obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)')
block_id_1 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 0))
block_id_2 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
@@ -287,16 +372,16 @@ class fsck_tests(TestCase):
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
(inode, 2, block_id_2))
- self.assert_fsck(self.fsck.check_obj_refcounts)
+ self.assert_fsck(self.fsck.check_objects_refcount)
def test_orphaned_obj(self):
- self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
- self.assert_fsck(self.fsck.check_obj_refcounts)
+ self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 33)')
+ self.assert_fsck(self.fsck.check_objects_refcount)
def test_wrong_block_refcount(self):
- obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 23)')
self.bucket['s3ql_data_%d' % obj_id] = 'foo'
block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 0))
@@ -312,15 +397,15 @@ class fsck_tests(TestCase):
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
(inode, 1, block_id))
- self.assert_fsck(self.fsck.check_block_refcount)
+ self.assert_fsck(self.fsck.check_blocks_refcount)
def test_orphaned_block(self):
- obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 24)')
self.bucket['s3ql_data_%d' % obj_id] = 'foo'
self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 3))
- self.assert_fsck(self.fsck.check_block_refcount)
+ self.assert_fsck(self.fsck.check_blocks_refcount)
def test_unix_size(self):
@@ -332,11 +417,11 @@ class fsck_tests(TestCase):
self._link('test-entry', inode)
self.fsck.found_errors = False
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertFalse(self.fsck.found_errors)
self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,))
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertTrue(self.fsck.found_errors)
@@ -353,11 +438,11 @@ class fsck_tests(TestCase):
self._link('test-entry', inode)
self.fsck.found_errors = False
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertFalse(self.fsck.found_errors)
self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,))
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertTrue(self.fsck.found_errors)
def test_unix_target(self):
@@ -370,11 +455,11 @@ class fsck_tests(TestCase):
self._link('test-entry', inode)
self.fsck.found_errors = False
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertFalse(self.fsck.found_errors)
self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo'))
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertTrue(self.fsck.found_errors)
def test_symlink_no_target(self):
@@ -384,7 +469,7 @@ class fsck_tests(TestCase):
(stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR,
os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
self._link('test-entry', inode)
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertTrue(self.fsck.found_errors)
def test_unix_rdev(self):
@@ -397,11 +482,11 @@ class fsck_tests(TestCase):
self._link('test-entry', inode)
self.fsck.found_errors = False
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertFalse(self.fsck.found_errors)
self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode))
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertTrue(self.fsck.found_errors)
def test_unix_child(self):
@@ -413,11 +498,11 @@ class fsck_tests(TestCase):
self._link('test-entry', inode)
self.fsck.found_errors = False
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertFalse(self.fsck.found_errors)
self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
(self._add_name('foo'), ROOT_INODE, inode))
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertTrue(self.fsck.found_errors)
def test_unix_blocks(self):
@@ -429,17 +514,17 @@ class fsck_tests(TestCase):
self._link('test-entry', inode)
self.fsck.found_errors = False
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertFalse(self.fsck.found_errors)
- obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)')
block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 0))
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
(inode, 1, block_id))
- self.fsck.check_inode_unix()
+ self.fsck.check_unix()
self.assertTrue(self.fsck.found_errors)
diff --git a/tests/t3_inode_cache.py b/tests/t3_inode_cache.py
index e5678da..631da32 100644
--- a/tests/t3_inode_cache.py
+++ b/tests/t3_inode_cache.py
@@ -10,7 +10,8 @@ from __future__ import division, print_function
from s3ql import inode_cache
-from s3ql.common import create_tables, init_tables
+from s3ql.mkfs import init_tables
+from s3ql.metadata import create_tables
from s3ql.database import Connection
from _common import TestCase
import unittest2 as unittest
@@ -24,7 +25,7 @@ class cache_tests(TestCase):
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
- self.cache = inode_cache.InodeCache(self.db)
+ self.cache = inode_cache.InodeCache(self.db, 0)
def tearDown(self):
self.cache.destroy()
diff --git a/tests/t4_fuse.py b/tests/t4_fuse.py
index dfede15..afd8391 100644
--- a/tests/t4_fuse.py
+++ b/tests/t4_fuse.py
@@ -121,6 +121,31 @@ class TimeoutError(Exception):
pass
+def skip_if_no_fusermount():
+ '''Raise SkipTest if fusermount is not available'''
+
+ which = subprocess.Popen(['which', 'fusermount'], stdout=subprocess.PIPE)
+ fusermount_path = which.communicate()[0].strip()
+
+ if not fusermount_path or which.wait() != 0:
+ raise unittest.SkipTest("Can't find fusermount executable")
+
+ if not os.path.exists('/dev/fuse'):
+ raise unittest.SkipTest("FUSE kernel module does not seem to be loaded")
+
+ if os.getuid() == 0:
+ return
+
+ mode = os.stat(fusermount_path).st_mode
+ if mode & stat.S_ISUID == 0:
+ raise unittest.SkipTest('fusermount executable not setuid, and we are not root.')
+
+ try:
+ subprocess.check_call([fusermount_path, '-V'],
+ stdout=open('/dev/null', 'wb'))
+ except subprocess.CalledProcessError:
+ raise unittest.SkipTest('Unable to execute fusermount')
+
if __name__ == '__main__':
mypath = sys.argv[0]
else:
@@ -130,6 +155,8 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(mypath), '..'))
class fuse_tests(TestCase):
def setUp(self):
+ skip_if_no_fusermount()
+
# We need this to test multi block operations
self.src = __file__
if os.path.getsize(self.src) < 1048:
@@ -165,7 +192,11 @@ class fuse_tests(TestCase):
stdin=subprocess.PIPE)
print(self.passphrase, file=self.mount_process.stdin)
self.mount_process.stdin.close()
- retry(30, os.path.ismount, self.mnt_dir)
+ def poll():
+ if os.path.ismount(self.mnt_dir):
+ return True
+ self.assertIsNone(self.mount_process.poll())
+ retry(30, poll)
def umount(self):
devnull = open('/dev/null', 'wb')
@@ -174,7 +205,7 @@ class fuse_tests(TestCase):
proc = subprocess.Popen([os.path.join(BASEDIR, 'bin', 'umount.s3ql'),
'--quiet', self.mnt_dir])
- retry(30, lambda : proc.poll() is not None)
+ retry(60, lambda : proc.poll() is not None)
self.assertEquals(proc.wait(), 0)
self.assertEqual(self.mount_process.wait(), 0)
@@ -195,7 +226,8 @@ class fuse_tests(TestCase):
os.rmdir(self.mnt_dir)
# Give mount process a little while to terminate
- retry(10, lambda : self.mount_process.poll() is not None)
+ if self.mount_process is not None:
+ retry(10, lambda : self.mount_process.poll() is not None)
shutil.rmtree(self.cache_dir)
shutil.rmtree(self.bucket_dir)
diff --git a/tests/t5_ctrl.py b/tests/t5_ctrl.py
new file mode 100644
index 0000000..ac8ca26
--- /dev/null
+++ b/tests/t5_ctrl.py
@@ -0,0 +1,40 @@
+'''
+t5_ctrl.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+from __future__ import division, print_function, absolute_import
+import s3ql.ctrl
+import sys
+import t4_fuse
+import unittest2 as unittest
+
+class CtrlTests(t4_fuse.fuse_tests):
+
+ def runTest(self):
+ self.mkfs()
+ self.mount()
+ self.tst_ctrl_flush()
+ self.umount()
+ self.fsck()
+
+ def tst_ctrl_flush(self):
+
+ try:
+ s3ql.ctrl.main(['flushcache', self.mnt_dir])
+ except:
+ sys.excepthook(*sys.exc_info())
+ self.fail("s3qlctrl raised exception")
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(CtrlTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t5_fsck.py b/tests/t5_fsck.py
new file mode 100644
index 0000000..1191c28
--- /dev/null
+++ b/tests/t5_fsck.py
@@ -0,0 +1,89 @@
+'''
+t5_full.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU GPLv3.
+'''
+
+from __future__ import division, print_function, absolute_import
+from s3ql.common import get_bucket_cachedir
+from s3ql.database import Connection
+import errno
+import os.path
+import shutil
+import subprocess
+import t4_fuse
+import tarfile
+import tempfile
+import unittest2 as unittest
+
+class FsckTests(t4_fuse.fuse_tests):
+
+ def runTest(self):
+ try:
+ subprocess.call(['rsync', '--version'],
+ stderr=subprocess.STDOUT,
+ stdout=open('/dev/null', 'wb'))
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ raise unittest.SkipTest('rsync not installed')
+ raise
+
+ data_file = os.path.join(os.path.dirname(__file__), 'data.tar.bz2')
+ ref_dir = tempfile.mkdtemp()
+ try:
+ tarfile.open(data_file).extractall(ref_dir)
+
+ # Make file system and fake high inode number
+ self.mkfs()
+ db = Connection(get_bucket_cachedir(self.bucketname, self.cache_dir) + '.db')
+ db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
+ (2**31+10, u'inodes'))
+ db.close()
+
+ # Copy source data
+ self.mount()
+ subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
+ self.mnt_dir + '/'])
+ self.umount()
+
+ # Check that inode watermark is high
+ db = Connection(get_bucket_cachedir(self.bucketname, self.cache_dir) + '.db')
+ self.assertGreater(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2**31+10)
+ self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2**31+10)
+ db.close()
+
+ # Renumber inodes
+ self.fsck()
+
+ # Check if renumbering was done
+ db = Connection(get_bucket_cachedir(self.bucketname, self.cache_dir) + '.db')
+ self.assertLess(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2**31)
+ self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2**31)
+ db.close()
+
+ # Compare
+ self.mount()
+ rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete',
+ ref_dir + '/', self.mnt_dir + '/'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out = rsync.communicate()[0]
+ if out:
+ self.fail('Copy not equal to original, rsync says:\n' + out)
+ elif rsync.returncode != 0:
+ self.fail('rsync failed with ' + out)
+
+ self.umount()
+ finally:
+ shutil.rmtree(ref_dir)
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(FsckTests)
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main() \ No newline at end of file
diff --git a/tests/t5_cli.py b/tests/t5_lock_rm.py
index c2bae22..7f976d5 100644
--- a/tests/t5_cli.py
+++ b/tests/t5_lock_rm.py
@@ -1,40 +1,31 @@
'''
-t5_cli.py - this file is part of S3QL (http://s3ql.googlecode.com)
+t5_lock_rm.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
-from __future__ import division, print_function
+from __future__ import division, print_function, absolute_import
+
import errno
import llfuse
import os.path
-import s3ql.cli.ctrl
-import s3ql.cli.lock
-import s3ql.cli.remove
+import s3ql.lock
+import s3ql.remove
import sys
import t4_fuse
import unittest2 as unittest
-class cliTests(t4_fuse.fuse_tests):
+class LockRemoveTests(t4_fuse.fuse_tests):
def runTest(self):
self.mkfs()
self.mount()
self.tst_lock_rm()
- self.tst_ctrl_flush()
self.umount()
self.fsck()
- def tst_ctrl_flush(self):
-
- try:
- s3ql.cli.ctrl.main(['flushcache', self.mnt_dir])
- except:
- sys.excepthook(*sys.exc_info())
- self.fail("s3qlctrl raised exception")
-
def tst_lock_rm(self):
# Extract tar
@@ -46,7 +37,7 @@ class cliTests(t4_fuse.fuse_tests):
# copy
try:
- s3ql.cli.lock.main([tempdir])
+ s3ql.lock.main([tempdir])
except:
sys.excepthook(*sys.exc_info())
self.fail("s3qllock raised exception")
@@ -63,7 +54,7 @@ class cliTests(t4_fuse.fuse_tests):
# delete properly
try:
- s3ql.cli.remove.main([tempdir])
+ s3ql.remove.main([tempdir])
except:
sys.excepthook(*sys.exc_info())
self.fail("s3qlrm raised exception")
@@ -72,7 +63,7 @@ class cliTests(t4_fuse.fuse_tests):
# Somehow important according to pyunit documentation
def suite():
- return unittest.makeSuite(cliTests)
+ return unittest.makeSuite(LockRemoveTests)
# Allow calling from command line