summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/s3ql.egg-info/PKG-INFO6
-rw-r--r--src/s3ql/__init__.py2
-rw-r--r--src/s3ql/adm.py8
-rw-r--r--src/s3ql/block_cache.py55
-rw-r--r--src/s3ql/fs.py5
-rw-r--r--src/s3ql/fsck.py13
-rw-r--r--src/s3ql/metadata.py2
-rw-r--r--src/s3ql/mkfs.py3
-rw-r--r--src/s3ql/mount.py1
-rw-r--r--src/s3ql/multi_lock.py2
-rw-r--r--src/s3ql/parse_args.py17
-rw-r--r--src/s3ql/verify.py1
12 files changed, 61 insertions, 54 deletions
diff --git a/src/s3ql.egg-info/PKG-INFO b/src/s3ql.egg-info/PKG-INFO
index 70d2335..ae42fb4 100644
--- a/src/s3ql.egg-info/PKG-INFO
+++ b/src/s3ql.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: s3ql
-Version: 2.28
+Version: 2.29
Summary: a full-featured file system for online data storage
Home-page: https://bitbucket.org/nikratio/s3ql/
Author: Nikolaus Rath
@@ -144,7 +144,7 @@ Description: ..
`s3ql+subscribe@googlegroups.com
<mailto:s3ql+subscribe@googlegroups.com>`_.
- Please report any bugs you may encounter in the `Bitbucket Issue Tracker`_.
+ Please report any bugs you may encounter in the `GitHub Issue Tracker`_.
Contributing
============
@@ -161,7 +161,7 @@ Description: ..
.. _`Installation Instructions`: https://bitbucket.org/nikratio/s3ql/wiki/Installation
.. _`S3QL FAQ`: https://bitbucket.org/nikratio/s3ql/wiki/FAQ
.. _`S3QL Mailing List`: http://groups.google.com/group/s3ql
- .. _`Bitbucket Issue Tracker`: https://bitbucket.org/nikratio/s3ql/issues
+ .. _`GitHub Issue Tracker`: https://github.com/s3ql/s3ql/issues
.. _BitBucket: https://bitbucket.org/nikratio/s3ql/
.. _GitHub: https://github.com/s3ql/main
.. _`Rath Consulting`: http://www.rath-consulting.biz/
diff --git a/src/s3ql/__init__.py b/src/s3ql/__init__.py
index 2e19b9a..185766f 100644
--- a/src/s3ql/__init__.py
+++ b/src/s3ql/__init__.py
@@ -22,7 +22,7 @@ __all__ = [ 'adm', 'backends', 'block_cache', 'common', 'calc_mro',
'REV_VER_MAP', 'RELEASE', 'BUFSIZE',
'CTRL_NAME', 'CTRL_INODE' ]
-VERSION = '2.28'
+VERSION = '2.29'
RELEASE = '%s' % VERSION
# TODO: On next revision bump, remove upgrade code from backend/comprenc.py and
diff --git a/src/s3ql/adm.py b/src/s3ql/adm.py
index 99fac6f..5544cb6 100644
--- a/src/s3ql/adm.py
+++ b/src/s3ql/adm.py
@@ -44,7 +44,6 @@ def parse_args(args):
pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\
Hint: run `%(prog)s --help` to get help on other available actions and
optional arguments that can be used with all actions.'''))
- pparser.add_storage_url()
subparsers = parser.add_subparsers(metavar='<action>', dest='action',
help='may be either of')
@@ -61,10 +60,10 @@ def parse_args(args):
sparser.add_argument("--threads", type=int, default=20,
help='Number of threads to use')
+ parser.add_storage_url()
parser.add_debug()
parser.add_quiet()
parser.add_log()
- parser.add_authfile()
parser.add_backend_options()
parser.add_cachedir()
parser.add_version()
@@ -155,6 +154,11 @@ def change_passphrase(backend):
data_pw = backend.passphrase
+ print(textwrap.dedent('''\
+ NOTE: If your password has been compromised already, then changing
+ it WILL NOT PROTECT YOUR DATA, because an attacker may have already
+ retrieved the master key.
+ '''))
if sys.stdin.isatty():
wrap_pw = getpass("Enter new encryption password: ")
if not wrap_pw == getpass("Confirm new encryption password: "):
diff --git a/src/s3ql/block_cache.py b/src/s3ql/block_cache.py
index 4678260..a8ebb64 100644
--- a/src/s3ql/block_cache.py
+++ b/src/s3ql/block_cache.py
@@ -321,14 +321,14 @@ class BlockCache(object):
else:
self.mlock.release(obj_id, noerror=noerror)
- def _lock_entry(self, inode, blockno, release_global=False):
+ def _lock_entry(self, inode, blockno, release_global=False, timeout=None):
'''Acquire lock on cache entry'''
if release_global:
with lock_released:
- self.mlock.acquire((inode, blockno))
+ return self.mlock.acquire((inode, blockno), timeout=timeout)
else:
- self.mlock.acquire((inode, blockno))
+ return self.mlock.acquire((inode, blockno), timeout=timeout)
def _unlock_entry(self, inode, blockno, release_global=False,
noerror=False):
@@ -887,30 +887,39 @@ class BlockCache(object):
if end_no is None:
end_no = start_no + 1
-
- for blockno in range(start_no, end_no):
- self._lock_entry(inode, blockno, release_global=True)
- try:
- if (inode, blockno) in self.cache:
- log.debug('removing from cache')
- self.cache.remove((inode, blockno))
-
- try:
- block_id = self.db.get_val('SELECT block_id FROM inode_blocks '
- 'WHERE inode=? AND blockno=?', (inode, blockno))
- except NoSuchRowError:
- log.debug('block not in db')
+ blocknos = set(range(start_no, end_no))
+
+ # First do an opportunistic pass and remove everything where we can
+ # immediately get a lock. This is important when removing a file right
+ # after it has been created. If the upload of the first block has
+ # already started , removal would be stuck behind the upload procedure,
+ # waiting for every block to be uploaded only to remove it afterwards.
+ for timeout in (0, None):
+ for blockno in list(blocknos):
+ if not self._lock_entry(inode, blockno, release_global=True, timeout=timeout):
continue
+ blocknos.remove(blockno)
+ try:
+ if (inode, blockno) in self.cache:
+ log.debug('removing from cache')
+ self.cache.remove((inode, blockno))
+
+ try:
+ block_id = self.db.get_val('SELECT block_id FROM inode_blocks '
+ 'WHERE inode=? AND blockno=?', (inode, blockno))
+ except NoSuchRowError:
+ log.debug('block not in db')
+ continue
- # Detach inode from block
- self.db.execute('DELETE FROM inode_blocks WHERE inode=? AND blockno=?',
- (inode, blockno))
+ # Detach inode from block
+ self.db.execute('DELETE FROM inode_blocks WHERE inode=? AND blockno=?',
+ (inode, blockno))
- finally:
- self._unlock_entry(inode, blockno, release_global=True)
+ finally:
+ self._unlock_entry(inode, blockno, release_global=True)
- # Decrease block refcount
- self._deref_block(block_id)
+ # Decrease block refcount
+ self._deref_block(block_id)
log.debug('finished')
diff --git a/src/s3ql/fs.py b/src/s3ql/fs.py
index 2b46e7f..5bd3518 100644
--- a/src/s3ql/fs.py
+++ b/src/s3ql/fs.py
@@ -226,9 +226,8 @@ class Operations(llfuse.Operations):
def listxattr(self, id_, ctx):
log.debug('started with %d', id_)
names = list()
- with self.db.query('SELECT name FROM ext_attributes_v WHERE inode=?', (id_,)) as res:
- for (name,) in res:
- names.append(name)
+ for (name,) in self.db.query('SELECT name FROM ext_attributes_v WHERE inode=?', (id_,)):
+ names.append(name)
return names
def setxattr(self, id_, name, value, ctx):
diff --git a/src/s3ql/fsck.py b/src/s3ql/fsck.py
index 1017613..b3cd834 100644
--- a/src/s3ql/fsck.py
+++ b/src/s3ql/fsck.py
@@ -544,8 +544,8 @@ class Fsck(object):
continue
self.moved_inodes.add(inode)
- affected_entries = list(self.conn.query('SELECT name, name_id, parent_inode '
- 'FROM contents_v WHERE inode=?', (inode,)))
+ affected_entries = self.conn.get_list('SELECT name, name_id, parent_inode '
+ 'FROM contents_v WHERE inode=?', (inode,))
for (name, name_id, id_p) in affected_entries:
path = get_path(id_p, self.conn, name)
self.log_error("File may lack data, moved to /lost+found: %s", to_str(path))
@@ -691,8 +691,8 @@ class Fsck(object):
log.info('Checking blocks (checksums)...')
- for (block_id, obj_id) in list(self.conn.query('SELECT id, obj_id FROM blocks '
- 'WHERE hash IS NULL')):
+ for (block_id, obj_id) in self.conn.get_list('SELECT id, obj_id FROM blocks '
+ 'WHERE hash IS NULL'):
self.found_errors = True
# This should only happen when there was an error during upload,
@@ -989,8 +989,8 @@ class Fsck(object):
# Copy the list, or we may pick up the same entry again and again
# (first from the original location, then from lost+found)
- affected_entries = list(self.conn.query('SELECT name, name_id, parent_inode '
- 'FROM contents_v WHERE inode=?', (id_,)))
+ affected_entries = self.conn.get_list('SELECT name, name_id, parent_inode '
+ 'FROM contents_v WHERE inode=?', (id_,))
for (name, name_id, id_p) in affected_entries:
path = get_path(id_p, self.conn, name)
self.log_error("File may lack data, moved to /lost+found: %s", to_str(path))
@@ -1169,7 +1169,6 @@ def parse_args(args):
parser.add_log('~/.s3ql/fsck.log')
parser.add_cachedir()
- parser.add_authfile()
parser.add_debug()
parser.add_quiet()
parser.add_backend_options()
diff --git a/src/s3ql/metadata.py b/src/s3ql/metadata.py
index f465c6b..5a8bffb 100644
--- a/src/s3ql/metadata.py
+++ b/src/s3ql/metadata.py
@@ -177,7 +177,7 @@ def create_tables(conn):
conn.execute("""
CREATE TABLE blocks (
id INTEGER PRIMARY KEY,
- hash BLOB(16) UNIQUE,
+ hash BLOB(32) UNIQUE,
refcount INT,
size INT NOT NULL,
obj_id INTEGER NOT NULL REFERENCES objects(id)
diff --git a/src/s3ql/mkfs.py b/src/s3ql/mkfs.py
index ecf81d0..f5cdf04 100644
--- a/src/s3ql/mkfs.py
+++ b/src/s3ql/mkfs.py
@@ -31,7 +31,6 @@ def parse_args(args):
description="Initializes an S3QL file system")
parser.add_cachedir()
- parser.add_authfile()
parser.add_debug()
parser.add_quiet()
parser.add_backend_options()
@@ -95,7 +94,7 @@ def main(args=None):
atexit.register(plain_backend.close)
log.info("Before using S3QL, make sure to read the user's guide, especially\n"
- "the 'Important Rules to Avoid Loosing Data' section.")
+ "the 'Important Rules to Avoid Losing Data' section.")
if isinstance(plain_backend, s3.Backend) and '.' in plain_backend.bucket_name:
log.warning('S3 Buckets with names containing dots cannot be '
diff --git a/src/s3ql/mount.py b/src/s3ql/mount.py
index 8c61345..2773fc4 100644
--- a/src/s3ql/mount.py
+++ b/src/s3ql/mount.py
@@ -514,7 +514,6 @@ def parse_args(args):
parser.add_log('~/.s3ql/mount.log')
parser.add_cachedir()
- parser.add_authfile()
parser.add_debug()
parser.add_quiet()
parser.add_backend_options()
diff --git a/src/s3ql/multi_lock.py b/src/s3ql/multi_lock.py
index 59b23a8..1177d1d 100644
--- a/src/s3ql/multi_lock.py
+++ b/src/s3ql/multi_lock.py
@@ -52,6 +52,8 @@ class MultiLock:
self.locked_keys.add(key)
+ return True
+
def release(self, *key, noerror=False):
"""Release lock on given key
diff --git a/src/s3ql/parse_args.py b/src/s3ql/parse_args.py
index f557c6d..b2f5be0 100644
--- a/src/s3ql/parse_args.py
+++ b/src/s3ql/parse_args.py
@@ -162,12 +162,6 @@ class ArgumentParser(argparse.ArgumentParser):
help="Activate debugging output from all S3QL modules. "
+ destnote)
- def add_authfile(self):
- self.add_argument("--authfile", type=str, metavar='<path>',
- default=os.path.expanduser("~/.s3ql/authinfo2"),
- help='Read authentication credentials from this file '
- '(default: `~/.s3ql/authinfo2)`')
-
def add_cachedir(self):
self.add_argument("--cachedir", type=str, metavar='<path>',
default=os.path.expanduser("~/.s3ql"),
@@ -186,6 +180,10 @@ class ArgumentParser(argparse.ArgumentParser):
self.add_argument("storage_url", metavar='<storage-url>',
type=storage_url_type,
help='Storage URL of the backend that contains the file system')
+ self.add_argument("--authfile", type=str, metavar='<path>',
+ default=os.path.expanduser("~/.s3ql/authinfo2"),
+ help='Read authentication credentials from this file '
+ '(default: `~/.s3ql/authinfo2)`')
def add_subparsers(self, **kw):
'''Pass parent and set prog to default usage message'''
@@ -211,9 +209,8 @@ class ArgumentParser(argparse.ArgumentParser):
except ArgumentError as exc:
self.error(str(exc))
- if hasattr(options, 'authfile'):
- assert options.storage_url
- self._read_authfile(options)
+ if hasattr(options, 'storage_url'):
+ self._init_backend_factory(options)
if hasattr(options, 'cachedir'):
assert options.storage_url
@@ -232,7 +229,7 @@ class ArgumentParser(argparse.ArgumentParser):
return options
- def _read_authfile(self, options):
+ def _init_backend_factory(self, options):
storage_url = options.storage_url
hit = re.match(r'^([a-zA-Z0-9]+)://', storage_url)
if not hit:
diff --git a/src/s3ql/verify.py b/src/s3ql/verify.py
index ca3075d..b8913a2 100644
--- a/src/s3ql/verify.py
+++ b/src/s3ql/verify.py
@@ -52,7 +52,6 @@ def parse_args(args):
parser.add_quiet()
parser.add_version()
parser.add_cachedir()
- parser.add_authfile()
parser.add_backend_options()
parser.add_storage_url()