summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/s3ql.egg-info/PKG-INFO2
-rw-r--r--src/s3ql.egg-info/SOURCES.txt15
-rw-r--r--src/s3ql/__init__.py4
-rw-r--r--src/s3ql/adm.py40
-rw-r--r--src/s3ql/backends/__init__.py2
-rw-r--r--src/s3ql/backends/common.py52
-rw-r--r--src/s3ql/backends/gs.py38
-rw-r--r--src/s3ql/backends/gss.py45
-rw-r--r--src/s3ql/backends/local.py24
-rw-r--r--src/s3ql/backends/s3.py59
-rw-r--r--src/s3ql/backends/s3c.py36
-rw-r--r--src/s3ql/backends/s3cs.py56
-rw-r--r--src/s3ql/backends/s3s.py45
-rw-r--r--src/s3ql/backends/swift.py26
-rw-r--r--src/s3ql/block_cache.py12
-rw-r--r--src/s3ql/fsck.py36
-rw-r--r--src/s3ql/mkfs.py15
-rw-r--r--src/s3ql/mount.py35
-rw-r--r--src/s3ql/parse_args.py6
-rw-r--r--src/s3ql/umount.py127
20 files changed, 203 insertions, 472 deletions
diff --git a/src/s3ql.egg-info/PKG-INFO b/src/s3ql.egg-info/PKG-INFO
index 5f378f7..ee0e64c 100644
--- a/src/s3ql.egg-info/PKG-INFO
+++ b/src/s3ql.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: s3ql
-Version: 1.9
+Version: 1.10
Summary: a full-featured file system for online data storage
Home-page: http://code.google.com/p/s3ql/
Author: Nikolaus Rath
diff --git a/src/s3ql.egg-info/SOURCES.txt b/src/s3ql.egg-info/SOURCES.txt
index 6082f9a..b8e165e 100644
--- a/src/s3ql.egg-info/SOURCES.txt
+++ b/src/s3ql.egg-info/SOURCES.txt
@@ -1,3 +1,4 @@
+CREDITS.txt
Changes.txt
INSTALL.txt
LICENSE
@@ -24,10 +25,13 @@ doc/manual.pdf
doc/html/.buildinfo
doc/html/about.html
doc/html/adm.html
+doc/html/authinfo.html
doc/html/backends.html
doc/html/contrib.html
+doc/html/durability.html
doc/html/fsck.html
doc/html/general.html
+doc/html/impl_details.html
doc/html/index.html
doc/html/installation.html
doc/html/issues.html
@@ -42,10 +46,13 @@ doc/html/tips.html
doc/html/umount.html
doc/html/_sources/about.txt
doc/html/_sources/adm.txt
+doc/html/_sources/authinfo.txt
doc/html/_sources/backends.txt
doc/html/_sources/contrib.txt
+doc/html/_sources/durability.txt
doc/html/_sources/fsck.txt
doc/html/_sources/general.txt
+doc/html/_sources/impl_details.txt
doc/html/_sources/index.txt
doc/html/_sources/installation.txt
doc/html/_sources/issues.txt
@@ -127,11 +134,13 @@ doc/man/s3qlstat.1
doc/man/umount.s3ql.1
rst/about.rst
rst/adm.rst
+rst/authinfo.rst
rst/backends.rst
rst/conf.py
rst/contrib.rst
+rst/durability.rst
rst/fsck.rst
-rst/general.rst
+rst/impl_details.rst
rst/index.rst
rst/installation.rst
rst/issues.rst
@@ -192,12 +201,9 @@ src/s3ql.egg-info/zip-safe
src/s3ql/backends/__init__.py
src/s3ql/backends/common.py
src/s3ql/backends/gs.py
-src/s3ql/backends/gss.py
src/s3ql/backends/local.py
src/s3ql/backends/s3.py
src/s3ql/backends/s3c.py
-src/s3ql/backends/s3cs.py
-src/s3ql/backends/s3s.py
src/s3ql/backends/swift.py
tests/__init__.py
tests/_common.py
@@ -214,6 +220,7 @@ tests/t4_fuse.py
tests/t5_cp.py
tests/t5_ctrl.py
tests/t5_fsck.py
+tests/t5_fsck.py.orig
tests/t5_full.py
tests/t5_lock_rm.py
util/cmdline_lexer.py
diff --git a/src/s3ql/__init__.py b/src/s3ql/__init__.py
index 7331937..558b31c 100644
--- a/src/s3ql/__init__.py
+++ b/src/s3ql/__init__.py
@@ -14,12 +14,12 @@ __all__ = [ 'adm', 'backends', 'block_cache', 'cleanup_manager', 'common',
'parse_args', 'remove', 'statfs', 'umount', 'VERSION',
'CURRENT_FS_REV', 'REV_VER_MAP' ]
-VERSION = '1.9'
+VERSION = '1.10'
CURRENT_FS_REV = 15
# Maps file system revisions to the last S3QL version that
# supported this revision.
-REV_VER_MAP = { 15: '1.9',
+REV_VER_MAP = { 15: '1.10',
14: '1.8.1',
13: '1.6',
12: '1.3',
diff --git a/src/s3ql/adm.py b/src/s3ql/adm.py
index 1644632..deb7e7f 100644
--- a/src/s3ql/adm.py
+++ b/src/s3ql/adm.py
@@ -8,19 +8,15 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function, absolute_import
from . import CURRENT_FS_REV, REV_VER_MAP
-from .backends.common import BetterBucket, get_bucket
-from .common import (QuietError, BUFSIZE, setup_logging, get_bucket_cachedir,
- get_seq_no, stream_write_bz2, CTRL_INODE)
+from .backends.common import BetterBucket, get_bucket, NoSuchBucket
+from .common import (QuietError, BUFSIZE, setup_logging, get_bucket_cachedir, get_seq_no,
+ stream_write_bz2, stream_read_bz2, CTRL_INODE)
from .database import Connection, NoSuchRowError
-from .fsck import Fsck
-from .metadata import (restore_metadata, cycle_metadata, dump_metadata,
- create_tables)
+from .metadata import restore_metadata, cycle_metadata, dump_metadata, create_tables
from .parse_args import ArgumentParser
from datetime import datetime as Datetime
from getpass import getpass
from llfuse import ROOT_INODE
-from s3ql.backends.common import NoSuchBucket
-from s3ql.common import stream_read_bz2
import cPickle as pickle
import logging
import lzma
@@ -65,6 +61,7 @@ def parse_args(args):
parser.add_quiet()
parser.add_log()
parser.add_authfile()
+ parser.add_ssl()
parser.add_cachedir()
parser.add_version()
@@ -213,10 +210,8 @@ def clear(bucket, cachepath):
bucket.clear()
- print('File system deleted.')
-
- if not bucket.is_get_consistent():
- log.info('Note: it may take a while for the removals to propagate through the backend.')
+ log.info('File system deleted.')
+ log.info('Note: it may take a while for the removals to propagate through the backend.')
def get_old_rev_msg(rev, prog):
return textwrap.dedent('''\
@@ -263,20 +258,13 @@ def upgrade(bucket, cachepath):
# Check for unclean shutdown
if param['seq_no'] < seq_no:
- if bucket.is_get_consistent():
- print(textwrap.fill(textwrap.dedent('''\
- It appears that the file system is still mounted somewhere else. If this is not
- the case, the file system may have not been unmounted cleanly and you should try
- to run fsck on the computer where the file system has been mounted most recently.
- ''')))
- else:
- print(textwrap.fill(textwrap.dedent('''\
- It appears that the file system is still mounted somewhere else. If this is not the
- case, the file system may have not been unmounted cleanly or the data from the
- most-recent mount may have not yet propagated through the backend. In the later case,
- waiting for a while should fix the problem, in the former case you should try to run
- fsck on the computer where the file system has been mounted most recently.
- ''')))
+ print(textwrap.fill(textwrap.dedent('''\
+ Backend reports that fs is still mounted. If this is not the case, the file system may
+ have not been unmounted cleanly or the data from the most-recent mount may have not yet
+ propagated through the backend. In the later case, waiting for a while should fix the
+ problem, in the former case you should try to run fsck on the computer where the file
+ system has been mounted most recently.
+ ''')))
print(get_old_rev_msg(param['revision'], 'fsck.s3ql'))
raise QuietError()
diff --git a/src/s3ql/backends/__init__.py b/src/s3ql/backends/__init__.py
index 0eebcd8..6b9fcf0 100644
--- a/src/s3ql/backends/__init__.py
+++ b/src/s3ql/backends/__init__.py
@@ -8,4 +8,4 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function, absolute_import
-__all__ = [ 'common', 'local', 's3', 's3s', 'gs', 'gss', 's3c', 's3cs', 'swift' ]
+__all__ = [ 'common', 'local', 's3', 'gs', 's3c', 'swift' ]
diff --git a/src/s3ql/backends/common.py b/src/s3ql/backends/common.py
index e506719..8d12d44 100644
--- a/src/s3ql/backends/common.py
+++ b/src/s3ql/backends/common.py
@@ -322,25 +322,6 @@ class AbstractBucket(object):
pass
@abstractmethod
- def is_get_consistent(self):
- '''If True, objects retrievals are guaranteed to be up-to-date
-
- If this method returns True, then creating, deleting, or overwriting an
- object is guaranteed to be immediately reflected in subsequent object
- retrieval attempts.
- '''
- pass
-
- @abstractmethod
- def is_list_create_consistent(self):
- '''If True, new objects are guaranteed to show up in object listings
-
- If this method returns True, creation of objects will immediately be
- reflected when retrieving the list of available objects.
- '''
- pass
-
- @abstractmethod
def clear(self):
"""Delete all objects in bucket"""
pass
@@ -572,23 +553,6 @@ class BetterBucket(AbstractBucket):
return fh
- def is_get_consistent(self):
- '''If True, objects retrievals are guaranteed to be up-to-date
-
- If this method returns True, then creating, deleting, or overwriting an
- object is guaranteed to be immediately reflected in subsequent object
- retrieval attempts.
- '''
- return self.bucket.is_get_consistent()
-
- def is_list_create_consistent(self):
- '''If True, new objects are guaranteed to show up in object listings
-
- If this method returns True, creation of objects will immediately be
- reflected when retrieving the list of available objects.
- '''
- return self.bucket.is_get_consistent()
-
def clear(self):
"""Delete all objects in bucket"""
return self.bucket.clear()
@@ -1136,7 +1100,7 @@ def get_bucket(options, plain=False):
return get_bucket_factory(options, plain)()
-def get_bucket_factory(options, plain=False):
+def get_bucket_factory(options, plain=False, ssl=False):
'''Return factory producing bucket objects for given storage-url
If *plain* is true, don't attempt to unlock and don't wrap into
@@ -1194,10 +1158,9 @@ def get_bucket_factory(options, plain=False):
else:
backend_pw = sys.stdin.readline().rstrip()
-
-
try:
- bucket = bucket_class(options.storage_url, backend_login, backend_pw)
+ bucket = bucket_class(options.storage_url, backend_login, backend_pw,
+ options.ssl)
# Do not use bucket.lookup(), this would use a HEAD request and
# not provide any useful error messages if something goes wrong
@@ -1220,7 +1183,8 @@ def get_bucket_factory(options, plain=False):
encrypted = True
if plain:
- return lambda: bucket_class(options.storage_url, backend_login, backend_pw)
+ return lambda: bucket_class(options.storage_url, backend_login, backend_pw,
+ options.ssl)
if encrypted and not bucket_passphrase:
if sys.stdin.isatty():
@@ -1237,7 +1201,8 @@ def get_bucket_factory(options, plain=False):
if not encrypted:
return lambda: BetterBucket(None, compress,
- bucket_class(options.storage_url, backend_login, backend_pw))
+ bucket_class(options.storage_url, backend_login, backend_pw,
+ options.ssl))
tmp_bucket = BetterBucket(bucket_passphrase, compress, bucket)
@@ -1247,4 +1212,5 @@ def get_bucket_factory(options, plain=False):
raise QuietError('Wrong bucket passphrase')
return lambda: BetterBucket(data_pw, compress,
- bucket_class(options.storage_url, backend_login, backend_pw))
+ bucket_class(options.storage_url, backend_login, backend_pw,
+ options.ssl))
diff --git a/src/s3ql/backends/gs.py b/src/s3ql/backends/gs.py
index 2206dda..f746a21 100644
--- a/src/s3ql/backends/gs.py
+++ b/src/s3ql/backends/gs.py
@@ -28,13 +28,13 @@ class Bucket(s3c.Bucket):
consistency.
"""
- def __init__(self, storage_url, gs_key, gs_secret):
- super(Bucket, self).__init__(storage_url, gs_key, gs_secret)
+ def __init__(self, storage_url, gs_key, gs_secret, use_ssl):
+ super(Bucket, self).__init__(storage_url, gs_key, gs_secret, use_ssl)
self.namespace = 'http://doc.s3.amazonaws.com/2006-03-01'
@staticmethod
- def _parse_storage_url(storage_url):
+ def _parse_storage_url(storage_url, use_ssl):
hit = re.match(r'^gs://([^/]+)(?:/(.*))?$', storage_url)
if not hit:
raise QuietError('Invalid storage URL')
@@ -42,36 +42,8 @@ class Bucket(s3c.Bucket):
bucket_name = hit.group(1)
hostname = '%s.commondatastorage.googleapis.com' % bucket_name
prefix = hit.group(2) or ''
- return (hostname, 80, bucket_name, prefix)
-
- @retry
- def _get_region(self):
- ''''Return bucket region'''
-
- log.debug('_get_region()')
- resp = self._do_request('GET', '/', subres='location')
- region = ElementTree.parse(resp).getroot().text
-
- return region
-
- def is_get_consistent(self):
- '''If True, objects retrievals are guaranteed to be up-to-date
-
- If this method returns True, then creating, deleting, or overwriting an
- object is guaranteed to be immediately reflected in subsequent object
- retrieval attempts.
- '''
-
- return True
-
- def is_list_create_consistent(self):
- '''If True, new objects are guaranteed to show up in object listings
-
- If this method returns True, creation of objects will immediately be
- reflected when retrieving the list of available objects.
- '''
-
- return False
+ port = 443 if use_ssl else 80
+ return (hostname, port, bucket_name, prefix)
def __str__(self):
return 'gs://%s/%s' % (self.bucket_name, self.prefix)
diff --git a/src/s3ql/backends/gss.py b/src/s3ql/backends/gss.py
deleted file mode 100644
index 95e4191..0000000
--- a/src/s3ql/backends/gss.py
+++ /dev/null
@@ -1,45 +0,0 @@
-'''
-backends/gss.py - this file is part of S3QL (http://s3ql.googlecode.com)
-
-Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
-
-This program can be distributed under the terms of the GNU GPLv3.
-'''
-
-from __future__ import division, print_function, absolute_import
-from . import gs
-from ..common import QuietError
-from .common import http_connection
-import re
-
-
-# Pylint goes berserk with false positives
-#pylint: disable=E1002,E1101,W0232
-
-class Bucket(gs.Bucket):
- """A bucket stored in Google Storage
-
- This class uses secure (SSL) connections to connect to GS.
-
- The bucket guarantees immediate get consistency and eventual list
- consistency.
- """
-
- @staticmethod
- def _parse_storage_url(storage_url):
- hit = re.match(r'^gss://([^/]+)(?:/(.*))?$', storage_url)
- if not hit:
- raise QuietError('Invalid storage URL: %s' % storage_url)
-
- bucket_name = hit.group(1)
- hostname = '%s.commondatastorage.googleapis.com' % bucket_name
- prefix = hit.group(2) or ''
- return (hostname, 443, bucket_name, prefix)
-
- def _get_conn(self):
- '''Return connection to server'''
-
- return http_connection(self.hostname, self.port, ssl=True)
-
- def __str__(self):
- return 'gss://%s/%s' % (self.bucket_name, self.prefix)
diff --git a/src/s3ql/backends/local.py b/src/s3ql/backends/local.py
index a1e4e77..423cfec 100644
--- a/src/s3ql/backends/local.py
+++ b/src/s3ql/backends/local.py
@@ -26,11 +26,14 @@ class Bucket(AbstractBucket):
needs_login = False
- def __init__(self, storage_url, backend_login, backend_pw): #IGNORE:W0613
+ def __init__(self, storage_url, backend_login, backend_pw, use_ssl=False):
'''Initialize local bucket
Login and password are ignored.
'''
+ # Unused argument
+ #pylint: disable=W0613
+
super(Bucket, self).__init__()
name = storage_url[len('local://'):]
self.name = name
@@ -141,25 +144,6 @@ class Bucket(AbstractBucket):
pickle.dump(metadata, dest, 2)
return dest
- def is_get_consistent(self):
- '''If True, objects retrievals are guaranteed to be up-to-date
-
- If this method returns True, then creating, deleting, or overwriting an
- object is guaranteed to be immediately reflected in subsequent object
- retrieval attempts.
- '''
-
- return True
-
- def is_list_create_consistent(self):
- '''If True, new objects are guaranteed to show up in object listings
-
- If this method returns True, creation of objects will immediately be
- reflected when retrieving the list of available objects.
- '''
-
- return True
-
def clear(self):
"""Delete all objects in bucket"""
diff --git a/src/s3ql/backends/s3.py b/src/s3ql/backends/s3.py
index 9793a96..f485d6e 100644
--- a/src/s3ql/backends/s3.py
+++ b/src/s3ql/backends/s3.py
@@ -8,9 +8,7 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function, absolute_import
from . import s3c
-from s3ql.backends.common import retry
from s3ql.common import QuietError
-import xml.etree.cElementTree as ElementTree
import logging
import re
@@ -18,14 +16,6 @@ log = logging.getLogger("backend.s3")
# Pylint goes berserk with false positives
#pylint: disable=E1002,E1101
-
-
-# These regions provide read after write consistency for new objects
-# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/LocationSelection.html
-GOOD_REGIONS=('EU', 'us-west-1', 'us-west-2', 'ap-southeast-1', 'ap-northeast-1', 'sa-east-1')
-
-# These don't
-BAD_REGIONS=('us-standard',)
class Bucket(s3c.Bucket):
"""A bucket stored in Amazon S3
@@ -36,18 +26,13 @@ class Bucket(s3c.Bucket):
object will be immediately retrievable. Additional consistency guarantees
may or may not be available and can be queried for with instance methods.
"""
+
+ def __init__(self, storage_url, login, password, use_ssl):
+ super(Bucket, self).__init__(storage_url, login, password, use_ssl)
- def __init__(self, storage_url, login, password):
- super(Bucket, self).__init__(storage_url, login, password)
-
- self.region = self._get_region()
- if self.region in BAD_REGIONS:
- log.warn('Warning: bucket provides insufficient consistency guarantees!')
- elif self.region not in GOOD_REGIONS:
- log.warn('Unknown region: %s - please file a bug report. ')
@staticmethod
- def _parse_storage_url(storage_url):
+ def _parse_storage_url(storage_url, use_ssl):
hit = re.match(r'^s3s?://([^/]+)(?:/(.*))?$', storage_url)
if not hit:
raise QuietError('Invalid storage URL')
@@ -55,40 +40,8 @@ class Bucket(s3c.Bucket):
bucket_name = hit.group(1)
hostname = '%s.s3.amazonaws.com' % bucket_name
prefix = hit.group(2) or ''
- return (hostname, 80, bucket_name, prefix)
-
- @retry
- def _get_region(self):
- ''''Return bucket region'''
-
- log.debug('_get_region()')
- resp = self._do_request('GET', '/', subres='location')
-
- region = ElementTree.parse(resp).getroot().text
-
- if not region:
- region = 'us-standard'
-
- return region
+ port = 443 if use_ssl else 80
+ return (hostname, port, bucket_name, prefix)
def __str__(self):
return 's3://%s/%s' % (self.bucket_name, self.prefix)
-
- def is_get_consistent(self):
- '''If True, objects retrievals are guaranteed to be up-to-date
-
- If this method returns True, then creating, deleting, or overwriting an
- object is guaranteed to be immediately reflected in subsequent object
- retrieval attempts.
- '''
-
- return False
-
- def is_list_create_consistent(self):
- '''If True, new objects are guaranteed to show up in object listings
-
- If this method returns True, creation of objects will immediately be
- reflected when retrieving the list of available objects.
- '''
-
- return self.region in GOOD_REGIONS
diff --git a/src/s3ql/backends/s3c.py b/src/s3ql/backends/s3c.py
index ea0d209..53077ae 100644
--- a/src/s3ql/backends/s3c.py
+++ b/src/s3ql/backends/s3c.py
@@ -41,15 +41,16 @@ class Bucket(AbstractBucket):
The bucket guarantees only immediate get after create consistency.
"""
- def __init__(self, storage_url, login, password):
+ def __init__(self, storage_url, login, password, use_ssl):
super(Bucket, self).__init__()
- (host, port, bucket_name, prefix) = self._parse_storage_url(storage_url)
+ (host, port, bucket_name, prefix) = self._parse_storage_url(storage_url, use_ssl)
self.bucket_name = bucket_name
self.prefix = prefix
self.hostname = host
self.port = port
+ self.use_ssl = use_ssl
self.conn = self._get_conn()
self.password = password
@@ -57,7 +58,7 @@ class Bucket(AbstractBucket):
self.namespace = 'http://s3.amazonaws.com/doc/2006-03-01/'
@staticmethod
- def _parse_storage_url(storage_url):
+ def _parse_storage_url(storage_url, use_ssl):
'''Extract information from storage URL
Return a tuple * (host, port, bucket_name, prefix) * .
@@ -73,7 +74,12 @@ class Bucket(AbstractBucket):
raise QuietError('Invalid storage URL')
hostname = hit.group(1)
- port = int(hit.group(2) or '80')
+ if hit.group(2):
+ port = int(hit.group(2))
+ elif use_ssl:
+ port = 443
+ else:
+ port = 80
bucketname = hit.group(3)
prefix = hit.group(4) or ''
@@ -82,7 +88,7 @@ class Bucket(AbstractBucket):
def _get_conn(self):
'''Return connection to server'''
- return http_connection(self.hostname, self.port, ssl=False)
+ return http_connection(self.hostname, self.port, self.use_ssl)
def is_temp_failure(self, exc): #IGNORE:W0613
'''Return true if exc indicates a temporary error
@@ -408,8 +414,7 @@ class Bucket(AbstractBucket):
"""Delete all objects in bucket
Note that this method may not be able to see (and therefore also not
- delete) recently uploaded objects if `is_list_create_consistent` is
- False.
+ delete) recently uploaded objects.
"""
# We have to cache keys, because otherwise we can't use the
@@ -423,23 +428,6 @@ class Bucket(AbstractBucket):
# Ignore missing objects when clearing bucket
self.delete(s3key, True)
- def is_get_consistent(self):
- '''If True, objects retrievals are guaranteed to be up-to-date
-
- If this method returns True, then creating, deleting, or overwriting an
- object is guaranteed to be immediately reflected in subsequent object
- retrieval attempts.
- '''
- return False
-
- def is_list_create_consistent(self):
- '''If True, new objects are guaranteed to show up in object listings
-
- If this method returns True, creation of objects will immediately be
- reflected when retrieving the list of available objects.
- '''
- return False
-
def __str__(self):
return 's3c://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix)
diff --git a/src/s3ql/backends/s3cs.py b/src/s3ql/backends/s3cs.py
deleted file mode 100644
index 7dc0eb5..0000000
--- a/src/s3ql/backends/s3cs.py
+++ /dev/null
@@ -1,56 +0,0 @@
-'''
-backends/s3cs.py - this file is part of S3QL (http://s3ql.googlecode.com)
-
-Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
-
-This program can be distributed under the terms of the GNU GPLv3.
-'''
-
-from __future__ import division, print_function, absolute_import
-from . import s3c
-from ..common import QuietError
-from .common import http_connection
-import re
-
-# Pylint goes berserk with false positives
-#pylint: disable=E1002,E1101,W0232
-
-
-class Bucket(s3c.Bucket):
- """A bucket stored in some S3 compatible storage service.
-
- This class uses secure (SSL) connections.
-
- The bucket guarantees only immediate get after create consistency.
- """
-
- def _get_conn(self):
- '''Return connection to server'''
-
- return http_connection(self.hostname, self.port, ssl=True)
-
- def __str__(self):
- return 's3cs://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix)
-
- @staticmethod
- def _parse_storage_url(storage_url):
- '''Extract information from storage URL
-
- Return a tuple * (host, port, bucket_name, prefix) * .
- '''
-
- hit = re.match(r'^[a-zA-Z0-9]+://' # Backend
- r'([^/:]+)' # Hostname
- r'(?::([0-9]+))?' # Port
- r'/([^/]+)' # Bucketname
- r'(?:/(.*))?$', # Prefix
- storage_url)
- if not hit:
- raise QuietError('Invalid storage URL')
-
- hostname = hit.group(1)
- port = int(hit.group(2) or '443')
- bucketname = hit.group(3)
- prefix = hit.group(4) or ''
-
- return (hostname, port, bucketname, prefix) \ No newline at end of file
diff --git a/src/s3ql/backends/s3s.py b/src/s3ql/backends/s3s.py
deleted file mode 100644
index 7d50a21..0000000
--- a/src/s3ql/backends/s3s.py
+++ /dev/null
@@ -1,45 +0,0 @@
-'''
-s3s.py - this file is part of S3QL (http://s3ql.googlecode.com)
-
-Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
-
-This program can be distributed under the terms of the GNU GPLv3.
-'''
-
-from __future__ import division, print_function, absolute_import
-from . import s3
-from ..common import QuietError
-from .common import http_connection
-import re
-
-# Pylint goes berserk with false positives
-#pylint: disable=E1002,E1101,W0232
-
-class Bucket(s3.Bucket):
- """A bucket stored in Amazon S3
-
- This class uses secure (SSL) connections to connect to S3.
-
- The bucket guarantees get after create consistency, i.e. a newly created
- object will be immediately retrievable. Additional consistency guarantees
- may or may not be available and can be queried for with instance methods.
- """
-
- def _get_conn(self):
- '''Return connection to server'''
-
- return http_connection(self.hostname, self.port, ssl=True)
-
- def __str__(self):
- return 's3s://%s/%s' % (self.bucket_name, self.prefix)
-
- @staticmethod
- def _parse_storage_url(storage_url):
- hit = re.match(r'^s3s://([^/]+)(?:/(.*))?$', storage_url)
- if not hit:
- raise QuietError('Invalid storage URL')
-
- bucket_name = hit.group(1)
- hostname = '%s.s3.amazonaws.com' % bucket_name
- prefix = hit.group(2) or ''
- return (hostname, 443, bucket_name, prefix)
diff --git a/src/s3ql/backends/swift.py b/src/s3ql/backends/swift.py
index 95ddb47..c065734 100644
--- a/src/s3ql/backends/swift.py
+++ b/src/s3ql/backends/swift.py
@@ -31,11 +31,14 @@ class Bucket(AbstractBucket):
object will be immediately retrievable.
"""
- def __init__(self, storage_url, login, password):
+ def __init__(self, storage_url, login, password, use_ssl=True):
+ # Unused argument
+ #pylint: disable=W0613
+
super(Bucket, self).__init__()
(host, port, bucket_name, prefix) = self._parse_storage_url(storage_url)
-
+
self.hostname = host
self.port = port
self.bucket_name = bucket_name
@@ -298,25 +301,6 @@ class Bucket(AbstractBucket):
return ObjectW(key, self, headers)
- def is_get_consistent(self):
- '''If True, objects retrievals are guaranteed to be up-to-date
-
- If this method returns True, then creating, deleting, or overwriting an
- object is guaranteed to be immediately reflected in subsequent object
- retrieval attempts.
- '''
-
- return True
-
- def is_list_create_consistent(self):
- '''If True, new objects are guaranteed to show up in object listings
-
- If this method returns True, creation of objects will immediately be
- reflected when retrieving the list of available objects.
- '''
-
- return True
-
def clear(self):
"""Delete all objects in bucket"""
diff --git a/src/s3ql/block_cache.py b/src/s3ql/block_cache.py
index 70121e6..19c6b8d 100644
--- a/src/s3ql/block_cache.py
+++ b/src/s3ql/block_cache.py
@@ -7,7 +7,7 @@ This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
-from .common import sha256_fh, BUFSIZE
+from .common import sha256_fh, BUFSIZE, QuietError
from .database import NoSuchRowError
from .ordered_dict import OrderedDict
from Queue import Queue
@@ -18,6 +18,7 @@ import os
import shutil
import threading
import time
+from .backends.common import NoSuchObject
# standard logger for this module
log = logging.getLogger("BlockCache")
@@ -532,7 +533,6 @@ class BlockCache(object):
shutil.copyfileobj(fh, el, BUFSIZE)
return el
try:
-
with lock_released:
with self.bucket_pool() as bucket:
el = bucket.perform_read(do_read, 's3ql_data_%d' % obj_id)
@@ -545,8 +545,14 @@ class BlockCache(object):
el.dirty = False
self.size += el.size
+ except NoSuchObject:
+ raise QuietError('Backend claims that object %d does not exist, data '
+ 'may be corrupted or inconsisten. fsck required.'
+ % obj_id)
+
except:
- el.unlink()
+ if el is not None:
+ el.unlink()
raise
finally:
self.in_transit.remove(obj_id)
diff --git a/src/s3ql/fsck.py b/src/s3ql/fsck.py
index 1f0f154..0f890e2 100644
--- a/src/s3ql/fsck.py
+++ b/src/s3ql/fsck.py
@@ -8,16 +8,13 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function, absolute_import
from . import CURRENT_FS_REV
-from .backends.common import NoSuchObject, get_bucket
-from .common import (ROOT_INODE, inode_for_path, sha256_fh, get_path, BUFSIZE,
- get_bucket_cachedir, setup_logging, QuietError, get_seq_no, CTRL_INODE,
- stream_write_bz2, stream_read_bz2)
+from .backends.common import NoSuchObject, get_bucket, NoSuchBucket
+from .common import (ROOT_INODE, inode_for_path, sha256_fh, get_path, BUFSIZE, get_bucket_cachedir,
+ setup_logging, QuietError, get_seq_no, stream_write_bz2, stream_read_bz2, CTRL_INODE)
from .database import NoSuchRowError, Connection
-from .metadata import (restore_metadata, cycle_metadata, dump_metadata,
- create_tables)
+from .metadata import restore_metadata, cycle_metadata, dump_metadata, create_tables
from .parse_args import ArgumentParser
from os.path import basename
-from s3ql.backends.common import NoSuchBucket
import apsw
import cPickle as pickle
import logging
@@ -847,8 +844,7 @@ class Fsck(object):
self.conn.execute('CREATE TEMPORARY TABLE missing AS '
'SELECT id FROM objects EXCEPT SELECT id FROM obj_ids')
for (obj_id,) in self.conn.query('SELECT * FROM missing'):
- if (not self.bucket.is_list_create_consistent()
- and ('s3ql_data_%d' % obj_id) in self.bucket):
+ if ('s3ql_data_%d' % obj_id) in self.bucket:
# Object was just not in list yet
continue
@@ -1034,6 +1030,7 @@ def parse_args(args):
parser.add_authfile()
parser.add_debug_modules()
parser.add_quiet()
+ parser.add_ssl()
parser.add_version()
parser.add_storage_url()
@@ -1100,20 +1097,13 @@ def main(args=None):
'S3QL installation.')
if param['seq_no'] < seq_no:
- if bucket.is_get_consistent():
- print(textwrap.fill(textwrap.dedent('''\
- Up to date metadata is not available. Probably the file system has not
- been properly unmounted and you should try to run fsck on the computer
- where the file system has been mounted most recently.
- ''')))
- else:
- print(textwrap.fill(textwrap.dedent('''\
- Up to date metadata is not available. Either the file system has not
- been unmounted cleanly or the data has not yet propagated through the backend.
- In the later case, waiting for a while should fix the problem, in
- the former case you should try to run fsck on the computer where
- the file system has been mounted most recently
- ''')))
+ print(textwrap.fill(textwrap.dedent('''\
+ Backend reports that file system is still mounted elsewhere. Either the file system
+ has not been unmounted cleanly or the data has not yet propagated through the backend.
+ In the later case, waiting for a while should fix the problem, in the former case you
+ should try to run fsck on the computer where the file system has been mounted most
+ recently.
+ ''')))
print('Enter "continue" to use the outdated data anyway:',
'> ', sep='\n', end='')
diff --git a/src/s3ql/mkfs.py b/src/s3ql/mkfs.py
index 9d33558..15f329f 100644
--- a/src/s3ql/mkfs.py
+++ b/src/s3ql/mkfs.py
@@ -8,15 +8,13 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function, absolute_import
from . import CURRENT_FS_REV
-from .backends.common import get_bucket, BetterBucket
-from .common import (get_bucket_cachedir, setup_logging, QuietError,
- stream_write_bz2, CTRL_INODE)
+from .backends.common import get_bucket, BetterBucket, NoSuchBucket
+from .common import get_bucket_cachedir, setup_logging, QuietError, CTRL_INODE, stream_write_bz2
from .database import Connection
from .metadata import dump_metadata, create_tables
from .parse_args import ArgumentParser
from getpass import getpass
from llfuse import ROOT_INODE
-from s3ql.backends.common import NoSuchBucket
import cPickle as pickle
import logging
import os
@@ -37,6 +35,7 @@ def parse_args(args):
parser.add_authfile()
parser.add_debug_modules()
parser.add_quiet()
+ parser.add_ssl()
parser.add_version()
parser.add_storage_url()
@@ -97,15 +96,17 @@ def main(args=None):
except NoSuchBucket as exc:
raise QuietError(str(exc))
+ log.info("Before using S3QL, make sure to read the user's guide, especially\n"
+ "the 'Important Rules to Avoid Loosing Data' section.")
+
if 's3ql_metadata' in plain_bucket:
if not options.force:
raise QuietError("Found existing file system! Use --force to overwrite")
log.info('Purging existing file system data..')
plain_bucket.clear()
- if not plain_bucket.is_get_consistent():
- log.info('Please note that the new file system may appear inconsistent\n'
- 'for a while until the removals have propagated through the backend.')
+ log.info('Please note that the new file system may appear inconsistent\n'
+ 'for a while until the removals have propagated through the backend.')
if not options.plain:
if sys.stdin.isatty():
diff --git a/src/s3ql/mount.py b/src/s3ql/mount.py
index f261411..4744487 100644
--- a/src/s3ql/mount.py
+++ b/src/s3ql/mount.py
@@ -8,16 +8,15 @@ This program can be distributed under the terms of the GNU GPLv3.
from __future__ import division, print_function, absolute_import
from . import fs, CURRENT_FS_REV
-from .backends.common import get_bucket_factory, BucketPool
+from .backends.common import get_bucket_factory, BucketPool, NoSuchBucket
from .block_cache import BlockCache
-from .common import (setup_logging, get_bucket_cachedir, get_seq_no, QuietError,
- stream_write_bz2, stream_read_bz2)
+from .common import (setup_logging, get_bucket_cachedir, get_seq_no, QuietError, stream_write_bz2,
+ stream_read_bz2)
from .daemonize import daemonize
from .database import Connection
from .inode_cache import InodeCache
from .metadata import cycle_metadata, dump_metadata, restore_metadata
from .parse_args import ArgumentParser
-from s3ql.backends.common import NoSuchBucket
from threading import Thread
import cPickle as pickle
import llfuse
@@ -27,10 +26,10 @@ import signal
import stat
import sys
import tempfile
-import textwrap
import thread
import threading
import time
+
log = logging.getLogger("mount")
@@ -150,10 +149,10 @@ def main(args=None):
log.info("FUSE main loop terminated.")
except:
- # Tell finally handle not to raise any exceptions
+ # Tell finally block not to raise any additional exceptions
exc_info[:] = sys.exc_info()
- log.exception('Encountered exception, trying to clean up...')
+ log.warn('Encountered exception, trying to clean up...')
# We do *not* free the mountpoint on exception. Why? E.g. if someone is
# mirroring the mountpoint, and it suddenly becomes empty, all the
@@ -166,7 +165,7 @@ def main(args=None):
except:
log.exception("Exception during cleanup:")
- raise QuietError('Aborted with exception.')
+ raise
else:
# llfuse.close() still needs block_cache.
@@ -311,20 +310,7 @@ def get_metadata(bucket, cachepath):
# Check for unclean shutdown
if param['seq_no'] < seq_no:
- if bucket.is_get_consistent():
- raise QuietError(textwrap.fill(textwrap.dedent('''\
- It appears that the file system is still mounted somewhere else. If this is not
- the case, the file system may have not been unmounted cleanly and you should try
- to run fsck on the computer where the file system has been mounted most recently.
- ''')))
- else:
- raise QuietError(textwrap.fill(textwrap.dedent('''\
- It appears that the file system is still mounted somewhere else. If this is not the
- case, the file system may have not been unmounted cleanly or the data from the
- most-recent mount may have not yet propagated through the backend. In the later case,
- waiting for a while should fix the problem, in the former case you should try to run
- fsck on the computer where the file system has been mounted most recently.
- ''')))
+ raise QuietError('Backend reports that fs is still mounted elsewhere, aborting.')
# Check revision
if param['revision'] < CURRENT_FS_REV:
@@ -424,6 +410,7 @@ def parse_args(args):
parser.add_authfile()
parser.add_debug_modules()
parser.add_quiet()
+ parser.add_ssl()
parser.add_version()
parser.add_storage_url()
@@ -475,8 +462,8 @@ def parse_args(args):
default=None, metavar='<no>',
help='Number of parallel upload threads to use (default: auto).')
parser.add_argument("--nfs", action="store_true", default=False,
- help='Support export of S3QL file systems over NFS '
- '(default: %(default)s)')
+ help='Enable some optimizations for exporting the file system '
+ 'over NFS. (default: %(default)s)')
options = parser.parse_args(args)
diff --git a/src/s3ql/parse_args.py b/src/s3ql/parse_args.py
index 8e8c47f..bc108dc 100644
--- a/src/s3ql/parse_args.py
+++ b/src/s3ql/parse_args.py
@@ -141,6 +141,12 @@ class ArgumentParser(argparse.ArgumentParser):
self.add_argument("--quiet", action="store_true", default=False,
help="be really quiet")
+ def add_ssl(self):
+ self.add_argument("--ssl", action="store_true", default=False,
+ help="Always use SSL connections when connecting to remote servers. "
+ "For backends that allow only encrypted connections, "
+ "S3QL uses SSL automatically, even if this option is not set.")
+
def add_debug_modules(self):
self.add_argument("--debug", action="append", metavar='<module>',
help="activate debugging output from <module>. Use `all` "
diff --git a/src/s3ql/umount.py b/src/s3ql/umount.py
index 18017c8..cf67d0a 100644
--- a/src/s3ql/umount.py
+++ b/src/s3ql/umount.py
@@ -46,66 +46,73 @@ def parse_args(args):
return parser.parse_args(args)
+class MountError(Exception):
+ """
+ Base class for mountpoint errors.
+ """
-def main(args=None):
- '''Umount S3QL file system
+ message = ''
- This function writes to stdout/stderr and calls `system.exit()` instead
- of returning.
- '''
+ def __init__(self, mountpoint):
+ super(MountError, self).__init__()
+ self.mountpoint = mountpoint
- if args is None:
- args = sys.argv[1:]
+ def __str__(self):
+ return self.message.format(self.mountpoint)
- options = parse_args(args)
- setup_logging(options)
- mountpoint = options.mountpoint
- # Check if it's a mount point
- if not posixpath.ismount(mountpoint):
- print('Not a mount point.', file=sys.stderr)
- sys.exit(1)
+class NotMountPointError(MountError):
- # Check if it's an S3QL mountpoint
- ctrlfile = os.path.join(mountpoint, CTRL_NAME)
- if not (CTRL_NAME not in llfuse.listdir(mountpoint)
- and os.path.exists(ctrlfile)):
- print('Not an S3QL file system.', file=sys.stderr)
- sys.exit(1)
+ message = '"{}" is not a mountpoint.'
- if options.lazy:
- lazy_umount(mountpoint)
- else:
- blocking_umount(mountpoint)
+class NotS3qlFsError(MountError):
-def lazy_umount(mountpoint):
- '''Invoke fusermount -u -z for mountpoint
+ message = '"{}" is not an S3QL file system.'
+
+
+class UmountError(MountError):
+
+ message = 'Error while unmounting "{}".'
+
+
+class MountInUseError(MountError):
+
+ message = '"{}" is being used.'
+
+
+def check_mount(mountpoint):
+ '''Check that "mountpoint" is a mountpoint and a valid s3ql fs'''
- This function writes to stdout/stderr and calls `system.exit()`.
- '''
+ if not posixpath.ismount(mountpoint):
+ raise NotMountPointError(mountpoint)
+
+ ctrlfile = os.path.join(mountpoint, CTRL_NAME)
+ if not (
+ CTRL_NAME not in llfuse.listdir(mountpoint) and
+ os.path.exists(ctrlfile)
+ ):
+ raise NotS3qlFsError(mountpoint)
+
+def lazy_umount(mountpoint):
+ '''Invoke fusermount -u -z for mountpoint'''
if os.getuid() == 0:
umount_cmd = ('umount', '-l', mountpoint)
else:
umount_cmd = ('fusermount', '-u', '-z', mountpoint)
- if not subprocess.call(umount_cmd) == 0:
- sys.exit(1)
+ if subprocess.call(umount_cmd)!=0:
+ raise UmountError(mountpoint)
def blocking_umount(mountpoint):
- '''Invoke fusermount and wait for daemon to terminate.
-
- This function writes to stdout/stderr and calls `system.exit()`.
- '''
+ '''Invoke fusermount and wait for daemon to terminate.'''
devnull = open('/dev/null', 'wb')
- if subprocess.call(['fuser', '-m', mountpoint], stdout=devnull,
- stderr=devnull) == 0:
- print('Cannot unmount, the following processes still access the mountpoint:')
- subprocess.call(['fuser', '-v', '-m', mountpoint], stdout=sys.stdout,
- stderr=sys.stdout)
- raise QuietError()
+ if subprocess.call(
+ ['fuser', '-m', mountpoint], stdout=devnull, stderr=devnull
+ ) == 0:
+ raise MountInUseError(mountpoint)
ctrlfile = os.path.join(mountpoint, CTRL_NAME)
@@ -133,7 +140,7 @@ def blocking_umount(mountpoint):
umount_cmd = ['fusermount', '-u', mountpoint]
if subprocess.call(umount_cmd) != 0:
- sys.exit(1)
+ raise UmountError(mountpoint)
# Wait for daemon
log.debug('Uploading metadata...')
@@ -166,5 +173,43 @@ def blocking_umount(mountpoint):
if step < 10:
step *= 2
+def umount(mountpoint, lazy=False):
+ '''Umount "mountpoint", blocks if not "lazy".'''
+
+ check_mount(mountpoint)
+ if lazy:
+ lazy_umount(mountpoint)
+ else:
+ blocking_umount(mountpoint)
+
+def main(args=None):
+ '''Umount S3QL file system'''
+
+ if args is None:
+ args = sys.argv[1:]
+
+ options = parse_args(args)
+ setup_logging(options)
+
+ try:
+ umount(options.mountpoint, options.lazy)
+ except NotMountPointError as err:
+ print(err, file=sys.stderr)
+ sys.exit(1)
+ except NotS3qlFsError as err:
+ print(err, file=sys.stderr)
+ sys.exit(2)
+ except UmountError as err:
+ print(err, file=sys.stderr)
+ sys.exit(3)
+ except MountInUseError:
+ print('Cannot unmount, the following processes still access the mountpoint:',
+ file=sys.stderr)
+ subprocess.call(['fuser', '-v', '-m', options.mountpoint],
+ stdout=sys.stderr, stderr=sys.stderr)
+ sys.exit(4)
+ else:
+ sys.exit(0)
+
if __name__ == '__main__':
main(sys.argv[1:])