summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorNikolaus Rath <Nikolaus@rath.org>2016-03-09 10:08:21 -0800
committerNikolaus Rath <Nikolaus@rath.org>2016-03-09 10:08:21 -0800
commit5b62499deb280253671a3c468644335be9e55f04 (patch)
tree96a12cd0c8833ca23e5993614b7d0405f2422a9f /tests
Import s3ql_1.0.1.orig.tar.bz2
Diffstat (limited to 'tests')
-rw-r--r--tests/__init__.py16
-rw-r--r--tests/_common.py86
-rw-r--r--tests/data.tar.bz2bin0 -> 826340 bytes
-rw-r--r--tests/t1_backends.py192
-rw-r--r--tests/t1_multi_lock.py93
-rw-r--r--tests/t1_ordered_dict.py142
-rw-r--r--tests/t2_block_cache.py389
-rw-r--r--tests/t3_fs_api.py777
-rw-r--r--tests/t3_fsck.py329
-rw-r--r--tests/t3_inode_cache.py99
-rw-r--r--tests/t4_adm.py67
-rw-r--r--tests/t4_fuse.py301
-rw-r--r--tests/t5_cli.py74
-rw-r--r--tests/t5_cp.py75
14 files changed, 2640 insertions, 0 deletions
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..06e210b
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,16 @@
+'''
+__init__.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+
+# Export all modules
+import os
+testdir = os.path.dirname(__file__)
+__all__ = [ name[:-3] for name in os.listdir(testdir) if name.endswith(".py") and
+ name != '__init__.py' ]
diff --git a/tests/_common.py b/tests/_common.py
new file mode 100644
index 0000000..66ff40f
--- /dev/null
+++ b/tests/_common.py
@@ -0,0 +1,86 @@
+'''
+_common.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+
+This module defines a new TestCase that aborts the test run as
+soon as a test fails. The module also servers as a storage container
+for authentication data that may be required for some test cases.
+
+
+Test case policy
+----------------
+
+Each test should correspond to exactly one function in the tested module. The
+test should assume that any other functions that are called by the tested
+function work perfectly. However, the test must not rely on the result of any
+other functions when checking the correctness of the tested function.
+
+Example: if a module has methods `write_file_to_disk`, `write_some_bytes` and
+`read_file_from_disk`, then the test for `write_file_to_disk` may assume that
+the `write_some_bytes` method that is called by `write_file_to_disk` works
+correctly, but it must not use the `read_file_from_disk` method to check if the
+file has been written correctly.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+import os
+import logging
+from s3ql.common import add_stdout_logging, setup_excepthook
+#from s3ql.common import LoggerFilter
+
+__all__ = [ 'TestCase' ]
+
+log = logging.getLogger()
+
+class TestCase(unittest.TestCase):
+
+ def __init__(self, *a, **kw):
+ super(TestCase, self).__init__(*a, **kw)
+
+ # Initialize logging if not yet initialized
+ root_logger = logging.getLogger()
+ if not root_logger.handlers:
+ handler = add_stdout_logging()
+ setup_excepthook()
+ handler.setLevel(logging.DEBUG)
+ root_logger.setLevel(logging.WARN)
+
+ # For debugging:
+ #root_logger.setLevel(logging.DEBUG)
+ #handler.addFilter(LoggerFilter(['UploadManager'],
+ # logging.INFO))
+
+ def run(self, result=None):
+ if result is None:
+ result = self.defaultTestResult()
+
+ super(TestCase, self).run(result)
+
+ # Abort if any test failed
+ if result.errors or result.failures:
+ result.stop()
+
+# Try to read credentials from file. Meant for developer use only,
+# so that we can run individual tests without the setup.py
+# initialization.
+def init_credentials():
+ keyfile = os.path.expanduser("~/.awssecret")
+
+ if not os.path.isfile(keyfile):
+ return None
+
+ with open(keyfile, "r") as fh:
+ key = fh.readline().rstrip()
+ pw = fh.readline().rstrip()
+
+ return (key, pw)
+
+aws_credentials = init_credentials()
+
+
+
diff --git a/tests/data.tar.bz2 b/tests/data.tar.bz2
new file mode 100644
index 0000000..0c8711f
--- /dev/null
+++ b/tests/data.tar.bz2
Binary files differ
diff --git a/tests/t1_backends.py b/tests/t1_backends.py
new file mode 100644
index 0000000..d629466
--- /dev/null
+++ b/tests/t1_backends.py
@@ -0,0 +1,192 @@
+'''
+t1_backends.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.backends import local, s3
+from s3ql.backends.common import ChecksumError, ObjectNotEncrypted, NoSuchObject
+import tempfile
+import os
+import time
+from _common import TestCase
+import _common
+from random import randrange
+
+class BackendTests(object):
+
+ def newname(self):
+ self.name_cnt += 1
+ # Include special characters
+ return "s3ql_=/_%d" % self.name_cnt
+
+ def test_store(self):
+ key = self.newname()
+ value = self.newname()
+ metadata = { 'jimmy': 'jups@42' }
+
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key)
+ self.bucket.store(key, value, metadata)
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket.fetch(key), (value, metadata))
+ self.assertEquals(self.bucket[key], value)
+
+ def test_fetch(self):
+ key = self.newname()
+ value = self.newname()
+ metadata = { 'jimmy': 'jups@42' }
+
+ self.assertRaises(NoSuchObject, self.bucket.fetch, key)
+ self.bucket.store(key, value, metadata)
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket.fetch(key), (value, metadata))
+
+ def test_lookup(self):
+ key = self.newname()
+ value = self.newname()
+ metadata = { 'jimmy': 'jups@42' }
+
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key)
+ self.bucket.store(key, value, metadata)
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket.lookup(key), metadata)
+
+ def test_contains(self):
+ key = self.newname()
+ value = self.newname()
+
+ self.assertFalse(key in self.bucket)
+ self.bucket[key] = value
+ time.sleep(self.delay)
+ self.assertTrue(key in self.bucket)
+
+ def test_delete(self):
+ key = self.newname()
+ value = self.newname()
+ self.bucket[key] = value
+ time.sleep(self.delay)
+
+ self.assertTrue(key in self.bucket)
+ del self.bucket[key]
+ time.sleep(self.delay)
+ self.assertFalse(key in self.bucket)
+
+ def test_clear(self):
+ self.bucket[self.newname()] = self.newname()
+ self.bucket[self.newname()] = self.newname()
+
+ time.sleep(self.delay)
+ self.assertEquals(len(list(self.bucket)), 2)
+ self.bucket.clear()
+ time.sleep(self.delay)
+ self.assertEquals(len(list(self.bucket)), 0)
+
+ def test_list(self):
+
+ keys = [ self.newname() for dummy in range(12) ]
+ values = [ self.newname() for dummy in range(12) ]
+ for i in range(12):
+ self.bucket[keys[i]] = values[i]
+
+ time.sleep(self.delay)
+ self.assertEquals(sorted(self.bucket.list()), sorted(keys))
+
+ def test_encryption(self):
+ bucket = self.bucket
+ bucket.passphrase = None
+ bucket['plain'] = b'foobar452'
+
+ bucket.passphrase = 'schlurp'
+ bucket.store('encrypted', 'testdata', { 'tag': True })
+ time.sleep(self.delay)
+ self.assertEquals(bucket['encrypted'], b'testdata')
+ self.assertRaises(ObjectNotEncrypted, bucket.fetch, 'plain')
+ self.assertRaises(ObjectNotEncrypted, bucket.lookup, 'plain')
+
+ bucket.passphrase = None
+ self.assertRaises(ChecksumError, bucket.fetch, 'encrypted')
+ self.assertRaises(ChecksumError, bucket.lookup, 'encrypted')
+
+ bucket.passphrase = self.passphrase
+ self.assertRaises(ChecksumError, bucket.fetch, 'encrypted')
+ self.assertRaises(ChecksumError, bucket.lookup, 'encrypted')
+ self.assertRaises(ObjectNotEncrypted, bucket.fetch, 'plain')
+ self.assertRaises(ObjectNotEncrypted, bucket.lookup, 'plain')
+
+ def test_copy(self):
+
+ key1 = self.newname()
+ key2 = self.newname()
+ value = self.newname()
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key1)
+ self.assertRaises(NoSuchObject, self.bucket.lookup, key2)
+
+ self.bucket.store(key1, value)
+ time.sleep(self.delay)
+ self.bucket.copy(key1, key2)
+
+ time.sleep(self.delay)
+ self.assertEquals(self.bucket[key2], value)
+
+
+# This test just takes too long (because we have to wait really long so that we don't
+# get false errors due to propagation delays)
+@unittest.skip('takes too long')
+@unittest.skipUnless(_common.aws_credentials, 'no AWS credentials available')
+class S3Tests(BackendTests, TestCase):
+ @staticmethod
+ def random_name(prefix=""):
+ return "s3ql-" + prefix + str(randrange(1000, 9999, 1))
+
+ def setUp(self):
+ self.name_cnt = 0
+ self.conn = s3.Connection(*_common.aws_credentials)
+
+ self.bucketname = self.random_name()
+ tries = 10
+ while self.conn.bucket_exists(self.bucketname) and tries > 10:
+ self.bucketname = self.random_name()
+ tries -= 1
+
+ if tries == 0:
+ raise RuntimeError("Failed to find an unused bucket name.")
+
+ self.passphrase = 'flurp'
+ self.bucket = self.conn.create_bucket(self.bucketname, self.passphrase)
+
+ # This is the time in which we expect S3 changes to propagate. It may
+ # be much longer for larger objects, but for tests this is usually enough.
+ self.delay = 8
+ time.sleep(self.delay)
+
+ def tearDown(self):
+ self.conn.delete_bucket(self.bucketname, recursive=True)
+
+class LocalTests(BackendTests, TestCase):
+
+ def setUp(self):
+ self.name_cnt = 0
+ self.conn = local.Connection()
+ self.bucket_dir = tempfile.mkdtemp()
+ self.bucketname = os.path.join(self.bucket_dir, 'mybucket')
+ self.passphrase = 'flurp'
+ self.bucket = self.conn.create_bucket(self.bucketname, self.passphrase)
+ self.delay = 0
+
+ def tearDown(self):
+ self.conn.delete_bucket(self.bucketname, recursive=True)
+ os.rmdir(self.bucket_dir)
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(LocalTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t1_multi_lock.py b/tests/t1_multi_lock.py
new file mode 100644
index 0000000..eeaf0ee
--- /dev/null
+++ b/tests/t1_multi_lock.py
@@ -0,0 +1,93 @@
+'''
+t1_ordered_dict.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.multi_lock import MultiLock
+import time
+from s3ql.common import AsyncFn
+from _common import TestCase
+
+BASE_DELAY = 1
+
+@unittest.skip('takes too long')
+class MultiLockTests(TestCase):
+
+ def test_lock(self):
+ mlock = MultiLock()
+ key = (22, 'bar')
+
+ def hold():
+ mlock.acquire(key)
+ time.sleep(2 * BASE_DELAY)
+ mlock.release(key)
+
+ t = AsyncFn(hold)
+ t.start()
+ time.sleep(BASE_DELAY)
+
+ stamp = time.time()
+ with mlock(key):
+ pass
+ self.assertTrue(time.time() - stamp > BASE_DELAY)
+
+ t.join_and_raise()
+
+ def test_nolock(self):
+ mlock = MultiLock()
+ key1 = (22, 'bar')
+ key2 = (23, 'bar')
+
+ def hold():
+ mlock.acquire(key1)
+ time.sleep(2 * BASE_DELAY)
+ mlock.release(key1)
+
+ t = AsyncFn(hold)
+ t.start()
+ time.sleep(BASE_DELAY)
+
+ stamp = time.time()
+ with mlock(key2):
+ pass
+ self.assertTrue(time.time() - stamp < BASE_DELAY)
+
+ t.join_and_raise()
+
+ def test_multi(self):
+ mlock = MultiLock()
+ key = (22, 'bar')
+
+ def lock():
+ mlock.acquire(key)
+
+ def unlock():
+ time.sleep(2 * BASE_DELAY)
+ mlock.release(key)
+
+ t1 = AsyncFn(lock)
+ t1.start()
+ t1.join_and_raise()
+
+ t2 = AsyncFn(unlock)
+ t2.start()
+
+ stamp = time.time()
+ with mlock(key):
+ pass
+ self.assertTrue(time.time() - stamp > BASE_DELAY)
+
+ t2.join_and_raise()
+
+def suite():
+ return unittest.makeSuite(MultiLockTests)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t1_ordered_dict.py b/tests/t1_ordered_dict.py
new file mode 100644
index 0000000..b25bda8
--- /dev/null
+++ b/tests/t1_ordered_dict.py
@@ -0,0 +1,142 @@
+'''
+t1_ordered_dict.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.ordered_dict import OrderedDict
+from _common import TestCase
+
+class OrderedDictTests(TestCase):
+
+ def test_1_add_del(self):
+ od = OrderedDict()
+
+ key1 = 'key1'
+ val1 = 'val1'
+
+ # Add elements
+ def add_one():
+ od[key1] = val1
+ self.assertEqual(od.get_first(), val1)
+ self.assertEquals(od.get_last(), val1)
+ self.assertEqual(od.get(key1), val1)
+ self.assertTrue(od)
+ self.assertTrue(key1 in od)
+ self.assertEqual(len(od), 1)
+
+
+ add_one()
+ del od[key1]
+ self.assertFalse(od)
+ self.assertFalse(key1 in od)
+ self.assertEqual(len(od), 0)
+ self.assertRaises(IndexError, od.get_first)
+ self.assertRaises(IndexError, od.get_last)
+
+ add_one()
+ self.assertEqual(od.pop_first(), val1)
+ self.assertFalse(od)
+ self.assertFalse(key1 in od)
+ self.assertEqual(len(od), 0)
+ self.assertRaises(IndexError, od.get_first)
+ self.assertRaises(IndexError, od.get_last)
+
+ add_one()
+ self.assertEqual(od.pop_last(), val1)
+ self.assertFalse(od)
+ self.assertFalse(key1 in od)
+ self.assertEqual(len(od), 0)
+ self.assertRaises(IndexError, od.get_first)
+ self.assertRaises(IndexError, od.get_last)
+
+
+ def test_2_order_simple(self):
+ od = OrderedDict()
+
+ key1 = 'key1'
+ val1 = 'val1'
+ key2 = 'key2'
+ val2 = 'val2'
+
+ od[key1] = val1
+ od[key2] = val2
+
+ self.assertEqual(od.get_first(), val2)
+ self.assertEquals(od.get_last(), val1)
+
+ od[key1] = val1
+ self.assertEqual(od.get_first(), val2)
+ self.assertEquals(od.get_last(), val1)
+
+ od.to_tail(key1)
+ self.assertEqual(od.get_first(), val2)
+ self.assertEquals(od.get_last(), val1)
+
+ od.to_head(key1)
+ self.assertEqual(od.get_first(), val1)
+ self.assertEquals(od.get_last(), val2)
+
+ def test_3_order_cmplx(self):
+ od = OrderedDict()
+ no = 10
+
+ keys = [ 'key number %d' % i for i in range(no) ]
+ vals = [ 'value number %d' % i for i in range(no) ]
+
+ for i in range(no):
+ od[keys[i]] = vals[i]
+ keys.reverse()
+ self._compareOrder(od, keys)
+
+ # Move around different elements
+ for i in [ 0, int((no - 1) / 2), no - 1]:
+ od.to_head(keys[i])
+ keys = [ keys[i] ] + keys[:i] + keys[i + 1:]
+ self._compareOrder(od, keys)
+
+ od.to_tail(keys[i])
+ keys = keys[:i] + keys[i + 1:] + [ keys[i] ]
+ self._compareOrder(od, keys)
+
+ remove = keys[i]
+ del od[remove]
+ keys = keys[:i] + keys[i + 1:]
+ self._compareOrder(od, keys)
+
+ od[remove] = 'something new'
+ keys.insert(0, remove)
+ self._compareOrder(od, keys)
+
+ def _compareOrder(self, od, keys):
+ od_i = iter(od)
+ keys_i = iter(keys)
+ while True:
+ try:
+ key = keys_i.next()
+ except StopIteration:
+ break
+ self.assertEquals(od_i.next(), key)
+ self.assertRaises(StopIteration, od_i.next)
+
+ od_i = reversed(od)
+ keys_i = reversed(keys)
+ while True:
+ try:
+ key = keys_i.next()
+ except StopIteration:
+ break
+ self.assertEquals(od_i.next(), key)
+ self.assertRaises(StopIteration, od_i.next)
+
+
+def suite():
+ return unittest.makeSuite(OrderedDictTests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t2_block_cache.py b/tests/t2_block_cache.py
new file mode 100644
index 0000000..f80883a
--- /dev/null
+++ b/tests/t2_block_cache.py
@@ -0,0 +1,389 @@
+'''
+t2_block_cache.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+from s3ql.block_cache import BlockCache
+from s3ql.backends import local
+from s3ql.backends.common import NoSuchObject
+from s3ql.common import create_tables, init_tables
+from s3ql.database import Connection
+import os
+import tempfile
+from _common import TestCase
+import unittest2 as unittest
+import stat
+import time
+import llfuse
+import shutil
+
+class cache_tests(TestCase):
+
+ def setUp(self):
+
+ self.bucket_dir = tempfile.mkdtemp()
+ self.bucket = local.Connection().get_bucket(self.bucket_dir)
+
+ self.cachedir = tempfile.mkdtemp() + "/"
+ self.blocksize = 1024
+
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+
+ # Create an inode we can work with
+ self.inode = 42
+ self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?)",
+ (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))
+
+ self.cache = BlockCache(self.bucket, self.db, self.cachedir,
+ 100 * self.blocksize)
+ self.cache.init()
+
+ # Tested methods assume that they are called from
+ # file system request handler
+ llfuse.lock.acquire()
+
+ # We do not want background threads
+ self.cache.commit_thread.stop()
+
+
+ def tearDown(self):
+ self.cache.upload_manager.bucket = self.bucket
+ self.cache.destroy()
+ if os.path.exists(self.cachedir):
+ shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.bucket_dir)
+
+ llfuse.lock.release()
+
+ @staticmethod
+ def random_data(len_):
+ with open("/dev/urandom", "rb") as fh:
+ return fh.read(len_)
+
+ def test_get(self):
+ inode = self.inode
+ blockno = 11
+ data = self.random_data(int(0.5 * self.blocksize))
+
+ # Case 1: Object does not exist yet
+ with self.cache.get(inode, blockno) as fh:
+ fh.seek(0)
+ fh.write(data)
+
+ # Case 2: Object is in cache
+ with self.cache.get(inode, blockno) as fh:
+ fh.seek(0)
+ self.assertEqual(data, fh.read(len(data)))
+
+ # Case 3: Object needs to be downloaded
+ self.cache.clear()
+ self.cache.upload_manager.join_all()
+ with self.cache.get(inode, blockno) as fh:
+ fh.seek(0)
+ self.assertEqual(data, fh.read(len(data)))
+
+
+ def test_expire(self):
+ inode = self.inode
+
+ # Define the 4 most recently accessed ones
+ most_recent = [7,11,10,8]
+ for i in most_recent:
+ time.sleep(0.2)
+ with self.cache.get(inode, i) as fh:
+ fh.write('%d' % i)
+
+ # And some others
+ for i in range(20):
+ if i in most_recent:
+ continue
+ with self.cache.get(inode, i) as fh:
+ fh.write('%d' % i)
+
+ # Flush the 2 most recently accessed ones
+ commit(self.cache, inode, most_recent[-2])
+ commit(self.cache, inode, most_recent[-3])
+
+ # We want to expire 4 entries, 2 of which are already flushed
+ self.cache.max_entries = 16
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=2)
+ self.cache.expire()
+ self.cache.upload_manager.join_all()
+ self.cache.upload_manager.bucket.verify()
+ self.assertEqual(len(self.cache.cache), 16)
+
+ for i in range(20):
+ if i in most_recent:
+ self.assertTrue((inode, i) not in self.cache.cache)
+ else:
+ self.assertTrue((inode, i) in self.cache.cache)
+
+ def test_upload(self):
+ inode = self.inode
+ datalen = int(0.1 * self.cache.max_size)
+ blockno1 = 21
+ blockno2 = 25
+ blockno3 = 7
+
+ data1 = self.random_data(datalen)
+ data2 = self.random_data(datalen)
+ data3 = self.random_data(datalen)
+
+ mngr = self.cache.upload_manager
+
+ # Case 1: create new object
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ el1 = fh
+ mngr.add(el1)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ # Case 2: Link new object
+ self.cache.upload_manager.bucket = TestBucket(self.bucket)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ el2 = fh
+ mngr.add(el2)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ # Case 3: Upload old object, still has references
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data2)
+ mngr.add(el1)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+
+ # Case 4: Upload old object, no references left
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1, no_store=1)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data3)
+ mngr.add(el2)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ # Case 5: Link old object, no references left
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data2)
+ mngr.add(el2)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+
+ # Case 6: Link old object, still has references
+ # (Need to create another object first)
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno3) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ el3 = fh
+ mngr.add(el3)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ self.cache.upload_manager.bucket = TestBucket(self.bucket)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ mngr.add(el1)
+ mngr.join_all()
+ self.cache.removal_queue.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+
+
+ def test_remove_referenced(self):
+ inode = self.inode
+ datalen = int(0.1 * self.cache.max_size)
+ blockno1 = 21
+ blockno2 = 24
+ data = self.random_data(datalen)
+
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ with self.cache.get(inode, blockno1) as fh:
+ fh.seek(0)
+ fh.write(data)
+ with self.cache.get(inode, blockno2) as fh:
+ fh.seek(0)
+ fh.write(data)
+ self.cache.clear()
+ self.cache.upload_manager.join_all()
+ self.cache.upload_manager.bucket.verify()
+
+ self.cache.upload_manager.bucket = TestBucket(self.bucket)
+ self.cache.remove(inode, blockno1)
+ self.cache.upload_manager.bucket.verify()
+
+ def test_remove_cache(self):
+ inode = self.inode
+ data1 = self.random_data(int(0.4 * self.blocksize))
+
+ # Case 1: Elements only in cache
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ self.cache.remove(inode, 1)
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ self.assertTrue(fh.read(42) == '')
+
+ def test_remove_cache_db(self):
+ inode = self.inode
+ data1 = self.random_data(int(0.4 * self.blocksize))
+
+ # Case 2: Element in cache and db
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ commit(self.cache, inode)
+ self.cache.upload_manager.bucket.verify()
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1)
+ self.cache.remove(inode, 1)
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ self.assertTrue(fh.read(42) == '')
+
+ def test_remove_db(self):
+ inode = self.inode
+ data1 = self.random_data(int(0.4 * self.blocksize))
+
+ # Case 3: Element only in DB
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ fh.write(data1)
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_store=1)
+ self.cache.clear()
+ self.cache.upload_manager.join_all()
+ self.cache.upload_manager.bucket.verify()
+ self.cache.upload_manager.bucket = TestBucket(self.bucket, no_del=1)
+ self.cache.remove(inode, 1)
+ with self.cache.get(inode, 1) as fh:
+ fh.seek(0)
+ self.assertTrue(fh.read(42) == '')
+
+
+class TestBucket(object):
+ def __init__(self, bucket, no_fetch=0, no_store=0, no_del=0):
+ self.no_fetch = no_fetch
+ self.no_store = no_store
+ self.no_del = no_del
+ self.bucket = bucket
+
+ def read_after_create_consistent(self):
+ return self.bucket.read_after_create_consistent()
+
+ def read_after_write_consistent(self):
+ return self.bucket.read_after_write_consistent()
+
+ def verify(self):
+ if self.no_fetch != 0:
+ raise RuntimeError('Got too few fetch calls')
+ if self.no_store != 0:
+ raise RuntimeError('Got too few store calls')
+ if self.no_del != 0:
+ raise RuntimeError('Got too few delete calls')
+
+ def prep_store_fh(self, *a, **kw):
+ (size, fn) = self.bucket.prep_store_fh(*a, **kw)
+ def fn2():
+ self.no_store -= 1
+ if self.no_store < 0:
+ raise RuntimeError('Got too many store calls')
+ return fn()
+
+ return (size, fn2)
+
+ def store_fh(self, *a, **kw):
+ self.no_store -= 1
+
+ if self.no_store < 0:
+ raise RuntimeError('Got too many store calls')
+
+ return self.bucket.store_fh(*a, **kw)
+
+ def fetch_fh(self, *a, **kw):
+ self.no_fetch -= 1
+
+ if self.no_fetch < 0:
+ raise RuntimeError('Got too many fetch calls')
+
+ return self.bucket.fetch_fh(*a, **kw)
+
+ def delete(self, *a, **kw):
+ self.no_del -= 1
+
+ if self.no_del < 0:
+ raise RuntimeError('Got too many delete calls')
+
+ try:
+ return self.bucket.delete(*a, **kw)
+ except NoSuchObject:
+ # Don't count key errors
+ self.no_del += 1
+ raise
+
+
+ def __delitem__(self, key):
+ self.delete(key)
+
+ def __iter__(self):
+ return self.bucket.list()
+
+ def __contains__(self, key):
+ return self.bucket.contains(key)
+
+
+def commit(self, inode, block=None):
+ """Upload data for `inode`
+
+ This is only for testing purposes, since the method blocks
+ until all current uploads have been completed.
+ """
+
+ for el in self.cache.itervalues():
+ if el.inode != inode:
+ continue
+ if not el.dirty:
+ continue
+
+ if block is not None and el.blockno != block:
+ continue
+
+ self.upload_manager.add(el)
+
+ self.upload_manager.join_all()
+
+
+def suite():
+ return unittest.makeSuite(cache_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t3_fs_api.py b/tests/t3_fs_api.py
new file mode 100644
index 0000000..04a2249
--- /dev/null
+++ b/tests/t3_fs_api.py
@@ -0,0 +1,777 @@
+'''
+t3_fs_api.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+from random import randint
+from s3ql.fsck import Fsck
+from s3ql import fs
+from s3ql.backends import local
+from s3ql.common import ROOT_INODE, create_tables, init_tables
+from llfuse import FUSEError
+from s3ql.database import Connection
+from _common import TestCase
+import os
+import stat
+import time
+import llfuse
+import unittest2 as unittest
+import errno
+import shutil
+import tempfile
+
+# We need to access to protected members
+#pylint: disable=W0212
+
+class Ctx(object):
+ def __init__(self):
+ self.uid = randint(0, 2 ** 32)
+ self.gid = randint(0, 2 ** 32)
+
+# Determine system clock granularity
+stamp1 = time.time()
+stamp2 = stamp1
+while stamp1 == stamp2:
+ stamp2 = time.time()
+CLOCK_GRANULARITY = 2 * (stamp2 - stamp1)
+del stamp1
+del stamp2
+
+class fs_api_tests(TestCase):
+
+ def setUp(self):
+ self.bucket_dir = tempfile.mkdtemp()
+ self.bucket = local.Connection().get_bucket(self.bucket_dir)
+ self.cachedir = tempfile.mkdtemp() + "/"
+ self.blocksize = 1024
+
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+
+ self.server = fs.Operations(self.bucket, self.db, self.cachedir,
+ self.blocksize, cache_size=self.blocksize * 5)
+
+ # Tested methods assume that they are called from
+ # file system request handler
+ llfuse.lock.acquire()
+
+ self.server.init()
+
+ # We don't want background flushing
+ self.server.cache.commit_thread.stop()
+ self.server.inode_flush_thread.stop()
+
+ # Keep track of unused filenames
+ self.name_cnt = 0
+
+ def tearDown(self):
+ self.server.destroy()
+ if os.path.exists(self.cachedir):
+ shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.bucket_dir)
+ llfuse.lock.release()
+
+ @staticmethod
+ def random_data(len_):
+ with open("/dev/urandom", "rb") as fd:
+ return fd.read(len_)
+
+ def fsck(self):
+ self.server.cache.clear()
+ self.server.cache.upload_manager.join_all()
+ self.server.inodes.flush()
+ fsck = Fsck(self.cachedir, self.bucket,
+ { 'blocksize': self.blocksize }, self.db)
+ fsck.check()
+ self.assertFalse(fsck.found_errors)
+
+ def newname(self):
+ self.name_cnt += 1
+ return "s3ql_%d" % self.name_cnt
+
+ def test_getattr_root(self):
+ self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode))
+ self.fsck()
+
+ def test_create(self):
+ ctx = Ctx()
+ mode = self.dir_mode()
+ name = self.newname()
+
+ inode_p_old = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server._create(ROOT_INODE, name, mode, ctx)
+
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE))
+
+ inode = self.server.getattr(id_)
+
+ self.assertEqual(inode.mode, mode)
+ self.assertEqual(inode.uid, ctx.uid)
+ self.assertEqual(inode.gid, ctx.gid)
+ self.assertEqual(inode.refcount, 1)
+ self.assertEqual(inode.size, 0)
+
+ inode_p_new = self.server.getattr(ROOT_INODE)
+
+ self.assertGreater(inode_p_new.mtime, inode_p_old.mtime)
+ self.assertGreater(inode_p_new.ctime, inode_p_old.ctime)
+
+ self.fsck()
+
+ def test_extstat(self):
+ # Test with zero contents
+ self.server.extstat()
+
+ # Test with empty file
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+ self.server.extstat()
+
+ # Test with data in file
+ fh = self.server.open(inode.id, os.O_RDWR)
+ self.server.write(fh, 0, 'foobar')
+ self.server.release(fh)
+
+ self.server.extstat()
+
+ self.fsck()
+
+ @staticmethod
+ def dir_mode():
+ return (randint(0, 07777) & ~stat.S_IFDIR) | stat.S_IFDIR
+
+ @staticmethod
+ def file_mode():
+ return (randint(0, 07777) & ~stat.S_IFREG) | stat.S_IFREG
+
+ def test_getxattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.assertRaises(FUSEError, self.server.getxattr, inode.id, 'nonexistant-attr')
+
+ self.server.setxattr(inode.id, 'my-attr', 'strabumm!')
+ self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!')
+
+ self.fsck()
+
+ def test_link(self):
+ name = self.newname()
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
+ self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+ time.sleep(CLOCK_GRANULARITY)
+
+ inode_before = self.server.getattr(inode.id).copy()
+ self.server.link(inode.id, inode_p_new.id, name)
+
+ inode_after = self.server.lookup(inode_p_new.id, name)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, inode_p_new.id))
+
+ self.assertEqual(inode_before.id, id_)
+ self.assertEqual(inode_after.refcount, 2)
+ self.assertGreater(inode_after.ctime, inode_before.ctime)
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+
+ self.fsck()
+
+ def test_listxattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.assertListEqual([], self.server.listxattr(inode.id))
+
+ self.server.setxattr(inode.id, 'key1', 'blub')
+ self.assertListEqual(['key1'], self.server.listxattr(inode.id))
+
+ self.server.setxattr(inode.id, 'key2', 'blub')
+ self.assertListEqual(sorted(['key1', 'key2']),
+ sorted(self.server.listxattr(inode.id)))
+
+ self.fsck()
+
+ def test_read(self):
+
+ len_ = self.blocksize
+ data = self.random_data(len_)
+ off = self.blocksize // 2
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+
+ self.server.write(fh, off, data)
+ inode_before = self.server.getattr(inode.id).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.assertTrue(self.server.read(fh, off, len_) == data)
+ inode_after = self.server.getattr(inode.id)
+ self.assertGreater(inode_after.atime, inode_before.atime)
+ self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off])
+ self.assertTrue(self.server.read(fh, self.blocksize, len_) == data[off:])
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_readdir(self):
+
+ # Create a few entries
+ names = [ 'entry_%2d' % i for i in range(20) ]
+ for name in names:
+ (fh, _) = self.server.create(ROOT_INODE, name,
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ # Delete some to make sure that we don't have continous rowids
+ remove_no = [0, 2, 3, 5, 9]
+ for i in remove_no:
+ self.server.unlink(ROOT_INODE, names[i])
+ del names[i]
+
+ # Read all
+ fh = self.server.opendir(ROOT_INODE)
+ self.assertListEqual(sorted(names + ['lost+found']) ,
+ sorted(x[0] for x in self.server.readdir(fh, 0)))
+ self.server.releasedir(fh)
+
+ # Read in parts
+ fh = self.server.opendir(ROOT_INODE)
+ entries = list()
+ try:
+ next_ = 0
+ while True:
+ gen = self.server.readdir(fh, next_)
+ for _ in range(3):
+ (name, _, next_) = next(gen)
+ entries.append(name)
+
+ except StopIteration:
+ pass
+
+ self.assertListEqual(sorted(names + ['lost+found']) ,
+ sorted(entries))
+ self.server.releasedir(fh)
+
+ self.fsck()
+
+ def test_release(self):
+ name = self.newname()
+
+ # Test that entries are deleted when they're no longer referenced
+ (fh, inode) = self.server.create(ROOT_INODE, name,
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'foobar')
+ self.server.unlink(ROOT_INODE, name)
+ self.assertFalse(self.db.has_val('SELECT 1 FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertTrue(self.server.getattr(inode.id).id)
+ self.server.release(fh)
+
+ self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
+
+ self.fsck()
+
+ def test_removexattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.assertRaises(FUSEError, self.server.removexattr, inode.id, 'some name')
+ self.server.setxattr(inode.id, 'key1', 'blub')
+ self.server.removexattr(inode.id, 'key1')
+ self.assertListEqual([], self.server.listxattr(inode.id))
+
+ self.fsck()
+
+ def test_rename(self):
+ oldname = self.newname()
+ newname = self.newname()
+
+ inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+ inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+
+ self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)
+
+ inode_p_old_after = self.server.getattr(ROOT_INODE)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (oldname, ROOT_INODE)))
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (newname, inode_p_new.id))
+ self.assertEqual(inode.id, id_)
+
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+ self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
+ self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)
+
+
+ self.fsck()
+
+ def test_replace_file(self):
+ oldname = self.newname()
+ newname = self.newname()
+
+ (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'some data to deal with')
+ self.server.release(fh)
+ self.server.setxattr(inode.id, 'test_xattr', '42*8')
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+ inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
+
+ (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'even more data to deal with')
+ self.server.release(fh)
+ self.server.setxattr(inode2.id, 'test_xattr', '42*8')
+
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)
+
+ inode_p_old_after = self.server.getattr(ROOT_INODE)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (oldname, ROOT_INODE)))
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (newname, inode_p_new.id))
+ self.assertEqual(inode.id, id_)
+
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+ self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
+ self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)
+
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))
+
+ self.fsck()
+
+ def test_replace_dir(self):
+ oldname = self.newname()
+ newname = self.newname()
+
+ inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())
+
+ inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
+ inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
+ inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
+
+ inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx())
+
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)
+
+ inode_p_old_after = self.server.getattr(ROOT_INODE)
+ inode_p_new_after = self.server.getattr(inode_p_new.id)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (oldname, ROOT_INODE)))
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (newname, inode_p_new.id))
+ self.assertEqual(inode.id, id_)
+
+ self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
+ self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
+ self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
+ self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)
+
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))
+
+ self.fsck()
+
+ def test_setattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0641, Ctx())
+ self.server.release(fh)
+ inode_old = self.server.getattr(inode.id).copy()
+
+ attr = llfuse.EntryAttributes()
+ attr.st_mode = self.file_mode()
+ attr.st_uid = randint(0, 2 ** 32)
+ attr.st_gid = randint(0, 2 ** 32)
+ attr.st_rdev = randint(0, 2 ** 32)
+ attr.st_atime = time.timezone + randint(0, 2 ** 32) / 10 ** 6
+ attr.st_mtime = time.timezone + randint(0, 2 ** 32) / 10 ** 6
+
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.setattr(inode.id, attr)
+ inode_new = self.server.getattr(inode.id)
+ self.assertGreater(inode_new.ctime, inode_old.ctime)
+
+ for key in attr.__slots__:
+ if getattr(attr, key) is not None:
+ self.assertEquals(getattr(attr, key),
+ getattr(inode_new, key))
+
+
+ def test_truncate(self):
+ len_ = int(2.7 * self.blocksize)
+ data = self.random_data(len_)
+ attr = llfuse.EntryAttributes()
+
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx())
+ self.server.write(fh, 0, data)
+
+ attr.st_size = len_ // 2
+ self.server.setattr(inode.id, attr)
+ self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2])
+ attr.st_size = len_
+ self.server.setattr(inode.id, attr)
+ self.assertTrue(self.server.read(fh, 0, len_)
+ == data[:len_ // 2] + b'\0' * (len_ // 2))
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_truncate_0(self):
+ len1 = 158
+ len2 = 133
+ attr = llfuse.EntryAttributes()
+
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, self.random_data(len1))
+ self.server.release(fh)
+ self.server.inodes.flush()
+
+ fh = self.server.open(inode.id, os.O_RDWR)
+ attr.st_size = 0
+ self.server.setattr(inode.id, attr)
+ self.server.write(fh, 0, self.random_data(len2))
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_setxattr(self):
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ self.server.setxattr(inode.id, 'my-attr', 'strabumm!')
+ self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!')
+
+ self.fsck()
+
+ def test_statfs(self):
+ # Test with zero contents
+ self.server.statfs()
+
+ # Test with empty file
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+ self.server.statfs()
+
+ # Test with data in file
+ fh = self.server.open(inode.id, None)
+ self.server.write(fh, 0, 'foobar')
+ self.server.release(fh)
+
+ self.server.statfs()
+
+ def test_symlink(self):
+ target = self.newname()
+ name = self.newname()
+
+ inode_p_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ inode = self.server.symlink(ROOT_INODE, name, target, Ctx())
+ inode_p_after = self.server.getattr(ROOT_INODE)
+
+ self.assertEqual(target, self.server.readlink(inode.id))
+
+ id_ = self.db.get_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE))
+
+ self.assertEqual(inode.id, id_)
+ self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
+ self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
+
+
+ def test_unlink(self):
+ name = self.newname()
+
+ (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'some data to deal with')
+ self.server.release(fh)
+
+ # Add extended attributes
+ self.server.setxattr(inode.id, 'test_xattr', '42*8')
+
+ inode_p_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.unlink(ROOT_INODE, name)
+ inode_p_after = self.server.getattr(ROOT_INODE)
+
+ self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
+ self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
+
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))
+
+ self.fsck()
+
+ def test_rmdir(self):
+ name = self.newname()
+ inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx())
+ inode_p_before = self.server.getattr(ROOT_INODE).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.rmdir(ROOT_INODE, name)
+ inode_p_after = self.server.getattr(ROOT_INODE)
+
+ self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
+ self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))
+
+ self.fsck()
+
+ def test_relink(self):
+ name = self.newname()
+ name2 = self.newname()
+ data = 'some data to deal with'
+
+ (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx())
+ self.server.write(fh, 0, data)
+ self.server.unlink(ROOT_INODE, name)
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, ROOT_INODE)))
+ self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))
+
+ self.server.link(inode.id, ROOT_INODE, name2)
+ self.server.release(fh)
+
+ fh = self.server.open(inode.id, os.O_RDONLY)
+ self.assertTrue(self.server.read(fh, 0, len(data)) == data)
+ self.server.release(fh)
+ self.fsck()
+
+ def test_write(self):
+ len_ = self.blocksize
+ data = self.random_data(len_)
+ off = self.blocksize // 2
+ (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
+ self.file_mode(), Ctx())
+ inode_before = self.server.getattr(inode.id).copy()
+ time.sleep(CLOCK_GRANULARITY)
+ self.server.write(fh, off, data)
+ inode_after = self.server.getattr(inode.id)
+
+ self.assertGreater(inode_after.mtime, inode_before.mtime)
+ self.assertGreater(inode_after.ctime, inode_before.ctime)
+ self.assertEqual(inode_after.size, off + len_)
+
+ self.server.write(fh, 0, data)
+ inode_after = self.server.getattr(inode.id)
+ self.assertEqual(inode_after.size, off + len_)
+
+ self.server.release(fh)
+
+ self.fsck()
+
+ def test_copy_tree(self):
+
+ src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
+ dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx())
+
+ # Create file
+ (fh, f1_inode) = self.server.create(src_inode.id, 'file1',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file1 contents')
+ self.server.release(fh)
+
+ # Create hardlink
+ (fh, f2_inode) = self.server.create(src_inode.id, 'file2',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file2 contents')
+ self.server.release(fh)
+ f2_inode = self.server.link(f2_inode.id, src_inode.id, 'file2_hardlink')
+
+ # Create subdirectory
+ d1_inode = self.server.mkdir(src_inode.id, 'dir1', self.dir_mode(), Ctx())
+ d2_inode = self.server.mkdir(d1_inode.id, 'dir2', self.dir_mode(), Ctx())
+
+ # ..with a 3rd hardlink
+ f2_inode = self.server.link(f2_inode.id, d1_inode.id, 'file2_hardlink')
+
+ # Replicate
+ self.server.copy_tree(src_inode.id, dst_inode.id)
+
+ # Change files
+ fh = self.server.open(f1_inode.id, os.O_RDWR)
+ self.server.write(fh, 0, 'new file1 contents')
+ self.server.release(fh)
+
+ fh = self.server.open(f2_inode.id, os.O_RDWR)
+ self.server.write(fh, 0, 'new file2 contents')
+ self.server.release(fh)
+
+ # Get copy properties
+ f1_inode_c = self.server.lookup(dst_inode.id, 'file1')
+ f2_inode_c = self.server.lookup(dst_inode.id, 'file2')
+ f2h_inode_c = self.server.lookup(dst_inode.id, 'file2_hardlink')
+ d1_inode_c = self.server.lookup(dst_inode.id, 'dir1')
+ d2_inode_c = self.server.lookup(d1_inode_c.id, 'dir2')
+ f2_h_inode_c = self.server.lookup(d1_inode_c.id, 'file2_hardlink')
+
+ # Check file1
+ fh = self.server.open(f1_inode_c.id, os.O_RDWR)
+ self.assertEqual(self.server.read(fh, 0, 42), 'file1 contents')
+ self.server.release(fh)
+ self.assertNotEqual(f1_inode.id, f1_inode_c.id)
+
+ # Check file2
+ fh = self.server.open(f2_inode_c.id, os.O_RDWR)
+ self.assertTrue(self.server.read(fh, 0, 42) == 'file2 contents')
+ self.server.release(fh)
+ self.assertEqual(f2_inode_c.id, f2h_inode_c.id)
+ self.assertEqual(f2_inode_c.refcount, 3)
+ self.assertNotEqual(f2_inode.id, f2_inode_c.id)
+ self.assertEqual(f2_h_inode_c.id, f2_inode_c.id)
+
+ # Check subdir1
+ self.assertNotEqual(d1_inode.id, d1_inode_c.id)
+ self.assertNotEqual(d2_inode.id, d2_inode_c.id)
+
+ self.fsck()
+
+ def test_lock_tree(self):
+
+ inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
+
+ # Create file
+ (fh, inode1a) = self.server.create(inode1.id, 'file1',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file1 contents')
+ self.server.release(fh)
+
+ # Create subdirectory
+ inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx())
+ (fh, inode2a) = self.server.create(inode2.id, 'file2',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file2 contents')
+ self.server.release(fh)
+
+ # Another file
+ (fh, inode3) = self.server.create(ROOT_INODE, 'file1',
+ self.file_mode(), Ctx())
+ self.server.release(fh)
+
+ # Lock
+ self.server.lock_tree(inode1.id)
+
+ for i in (inode1.id, inode1a.id, inode2.id, inode2a.id):
+ self.assertTrue(self.server.inodes[i].locked)
+
+ # Remove
+ with self.assertRaises(FUSEError) as cm:
+ self.server._remove(inode1.id, 'file1', inode1a.id)
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # Rename / Replace
+ with self.assertRaises(FUSEError) as cm:
+ self.server.rename(ROOT_INODE, 'file1', inode1.id, 'file2')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.rename(inode1.id, 'file1', ROOT_INODE, 'file2')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # Open
+ with self.assertRaises(FUSEError) as cm:
+ self.server.open(inode2a.id, os.O_RDWR)
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.open(inode2a.id, os.O_WRONLY)
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ self.server.release(self.server.open(inode3.id, os.O_WRONLY))
+
+ # Write
+ fh = self.server.open(inode2a.id, os.O_RDONLY)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.write(fh, 0, 'foo')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ self.server.release(fh)
+
+ # Create
+ with self.assertRaises(FUSEError) as cm:
+ self.server._create(inode2.id, 'dir1', self.dir_mode(), Ctx())
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # Setattr
+ with self.assertRaises(FUSEError) as cm:
+ self.server.setattr(inode2a.id, dict())
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ # xattr
+ with self.assertRaises(FUSEError) as cm:
+ self.server.setxattr(inode2.id, 'name', 'value')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+ with self.assertRaises(FUSEError) as cm:
+ self.server.removexattr(inode2.id, 'name')
+ self.assertEqual(cm.exception.errno, errno.EPERM)
+
+ self.fsck()
+
+ def test_remove_tree(self):
+
+ inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
+
+ # Create file
+ (fh, inode1a) = self.server.create(inode1.id, 'file1',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file1 contents')
+ self.server.release(fh)
+
+ # Create subdirectory
+ inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx())
+ (fh, inode2a) = self.server.create(inode2.id, 'file2',
+ self.file_mode(), Ctx())
+ self.server.write(fh, 0, 'file2 contents')
+ self.server.release(fh)
+
+ # Remove
+ self.server.remove_tree(ROOT_INODE, 'source')
+
+ for (id_p, name) in ((ROOT_INODE, 'source'),
+ (inode1.id, 'file1'),
+ (inode1.id, 'dir1'),
+ (inode2.id, 'file2')):
+ self.assertFalse(self.db.has_val('SELECT inode FROM contents WHERE name=? AND '
+ 'parent_inode = ?', (name, id_p)))
+
+ for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id):
+ self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,)))
+
+ self.fsck()
+
+
+def suite():
+ return unittest.makeSuite(fs_api_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t3_fsck.py b/tests/t3_fsck.py
new file mode 100644
index 0000000..e111320
--- /dev/null
+++ b/tests/t3_fsck.py
@@ -0,0 +1,329 @@
+'''
+t3_fsck.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+import unittest2 as unittest
+from s3ql.fsck import Fsck
+from s3ql.backends import local
+from s3ql.database import Connection
+from s3ql.common import ROOT_INODE, create_tables, init_tables
+from _common import TestCase
+import os
+import stat
+import tempfile
+import time
+import shutil
+
+class fsck_tests(TestCase):
+
+ def setUp(self):
+ self.bucket_dir = tempfile.mkdtemp()
+ self.passphrase = 'schnupp'
+ self.bucket = local.Connection().get_bucket(self.bucket_dir, self.passphrase)
+ self.cachedir = tempfile.mkdtemp() + "/"
+ self.blocksize = 1024
+
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+
+ self.fsck = Fsck(self.cachedir, self.bucket,
+ { 'blocksize': self.blocksize }, self.db)
+ self.fsck.expect_errors = True
+
+ def tearDown(self):
+ shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.bucket_dir)
+
+ def assert_fsck(self, fn):
+ '''Check that fn detects and corrects an error'''
+
+
+ self.fsck.found_errors = False
+ fn()
+ self.assertTrue(self.fsck.found_errors)
+ self.fsck.found_errors = False
+ fn()
+ self.assertFalse(self.fsck.found_errors)
+
+ def test_cache(self):
+ inode = 6
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ fh = open(self.cachedir + 'inode_%d_block_1.d' % inode, 'wb')
+ fh.write('somedata')
+ fh.close()
+
+ self.assert_fsck(self.fsck.check_cache)
+ self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')
+
+ fh = open(self.cachedir + 'inode_%d_block_1' % inode, 'wb')
+ fh.write('otherdata')
+ fh.close()
+
+ self.assert_fsck(self.fsck.check_cache)
+ self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')
+
+
+ def test_lof1(self):
+
+ # Make lost+found a file
+ inode = self.db.get_val("SELECT inode FROM contents WHERE name=? AND parent_inode=?",
+ (b"lost+found", ROOT_INODE))
+ self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,))
+ self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))
+
+ self.assert_fsck(self.fsck.check_lof)
+
+ def test_lof2(self):
+ # Remove lost+found
+ self.db.execute('DELETE FROM contents WHERE name=? and parent_inode=?',
+ (b'lost+found', ROOT_INODE))
+
+ self.assert_fsck(self.fsck.check_lof)
+
+ def test_inode_refcount(self):
+
+ # Create an orphaned inode
+ self.db.execute("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 2, 0))
+
+ self.assert_fsck(self.fsck.check_inode_refcount)
+
+ # Create an inode with wrong refcount
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1, 0))
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
+ (b'name1', inode, ROOT_INODE))
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
+ (b'name2', inode, ROOT_INODE))
+
+ self.assert_fsck(self.fsck.check_inode_refcount)
+
+ def test_inode_sizes(self):
+
+ id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 2, 0))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', id_, ROOT_INODE))
+
+ # Create a block
+ obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(?, ?)',
+ (1, 500))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (id_, 0, obj_id))
+
+
+ self.assert_fsck(self.fsck.check_inode_sizes)
+
+
+
+ def test_keylist(self):
+ # Create an object that only exists in the bucket
+ self.bucket['s3ql_data_4364'] = 'Testdata'
+ self.assert_fsck(self.fsck.check_keylist)
+
+ # Create an object that does not exist in the bucket
+ self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
+ (34, 1, 0))
+ self.assert_fsck(self.fsck.check_keylist)
+
+ @staticmethod
+ def random_data(len_):
+ with open("/dev/urandom", "rb") as fd:
+ return fd.read(len_)
+
+ def test_loops(self):
+
+ # Create some directory inodes
+ inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
+ 0, 0, time.time(), time.time(), time.time(), 1))
+ for dummy in range(3) ]
+
+ inodes.append(inodes[0])
+ last = inodes[0]
+ for inode in inodes[1:]:
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
+ (bytes(inode), inode, last))
+ last = inode
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_refcount()
+ self.assertFalse(self.fsck.found_errors)
+ self.fsck.check_loops()
+ self.assertTrue(self.fsck.found_errors)
+ # We can't fix loops yet
+
+ def test_obj_refcounts(self):
+
+ obj_id = 42
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0))
+
+ self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
+ (obj_id, 2, 0))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 1, obj_id))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 2, obj_id))
+
+ self.fsck.found_errors = False
+ self.fsck.check_obj_refcounts()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 3, obj_id))
+ self.assert_fsck(self.fsck.check_obj_refcounts)
+
+ self.db.execute('DELETE FROM blocks WHERE obj_id=?', (obj_id,))
+ self.assert_fsck(self.fsck.check_obj_refcounts)
+
+ def test_unix_size(self):
+
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+
+ def test_unix_size_symlink(self):
+
+ inode = 42
+ target = 'some funny random string'
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,target,size) "
+ "VALUES (?,?,?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1,
+ target, len(target)))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_target(self):
+
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET target = ? WHERE id=?', ('foo', inode))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_rdev(self):
+
+ inode = 42
+ self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?,?)",
+ (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_child(self):
+
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('foo', ROOT_INODE, inode))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+ def test_unix_blocks(self):
+
+ obj_id = 87
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
+ "VALUES (?,?,?,?,?,?,?)",
+ (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR,
+ os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
+
+ self.db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?,?,?)',
+ ('test-entry', inode, ROOT_INODE))
+
+ self.fsck.found_errors = False
+ self.fsck.check_inode_unix()
+ self.assertFalse(self.fsck.found_errors)
+
+ self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
+ (obj_id, 2, 0))
+ self.db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
+ (inode, 1, obj_id))
+ self.fsck.check_inode_unix()
+ self.assertTrue(self.fsck.found_errors)
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(fsck_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t3_inode_cache.py b/tests/t3_inode_cache.py
new file mode 100644
index 0000000..b1d8814
--- /dev/null
+++ b/tests/t3_inode_cache.py
@@ -0,0 +1,99 @@
+'''
+t2_inode_cache.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2010 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+
+
+from s3ql import inode_cache
+from s3ql.common import create_tables, init_tables
+from s3ql.database import Connection
+from _common import TestCase
+import unittest2 as unittest
+import time
+import tempfile
+
+class cache_tests(TestCase):
+
+ def setUp(self):
+ self.dbfile = tempfile.NamedTemporaryFile()
+ self.db = Connection(self.dbfile.name)
+ create_tables(self.db)
+ init_tables(self.db)
+ self.cache = inode_cache.InodeCache(self.db)
+
+ def tearDown(self):
+ self.cache.destroy()
+
+ def test_create(self):
+ attrs = {'mode': 784,
+ 'refcount': 3,
+ 'uid': 7,
+ 'gid': 2,
+ 'size': 34674,
+ 'target': 'foobar',
+ 'rdev': 11,
+ 'atime': time.time(),
+ 'ctime': time.time(),
+ 'mtime': time.time() }
+
+ inode = self.cache.create_inode(**attrs)
+
+ for key in attrs.keys():
+ self.assertEqual(attrs[key], getattr(inode, key))
+
+ self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?',
+ (inode.id,)))
+
+
+ def test_del(self):
+ attrs = {'mode': 784,
+ 'refcount': 3,
+ 'uid': 7,
+ 'target': 'foobar',
+ 'gid': 2,
+ 'size': 34674,
+ 'rdev': 11,
+ 'atime': time.time(),
+ 'ctime': time.time(),
+ 'mtime': time.time() }
+ inode = self.cache.create_inode(**attrs)
+ del self.cache[inode.id]
+ self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
+ self.assertRaises(KeyError, self.cache.__delitem__, inode.id)
+
+ def test_get(self):
+ attrs = {'mode': 784,
+ 'refcount': 3,
+ 'uid': 7,
+ 'gid': 2,
+ 'target': 'foobar',
+ 'size': 34674,
+ 'rdev': 11,
+ 'atime': time.time(),
+ 'ctime': time.time(),
+ 'mtime': time.time() }
+ inode = self.cache.create_inode(**attrs)
+ self.assertEqual(inode, self.cache[inode.id])
+
+ self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
+ # Entry should still be in cache
+ self.assertEqual(inode, self.cache[inode.id])
+
+ # Now it should be out of the cache
+ for _ in xrange(inode_cache.CACHE_SIZE + 1):
+ dummy = self.cache[self.cache.create_inode(**attrs).id]
+
+ self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
+
+
+
+def suite():
+ return unittest.makeSuite(cache_tests)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t4_adm.py b/tests/t4_adm.py
new file mode 100644
index 0000000..c033fd3
--- /dev/null
+++ b/tests/t4_adm.py
@@ -0,0 +1,67 @@
+'''
+t4_adm.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+from _common import TestCase
+import unittest2 as unittest
+import tempfile
+import sys
+import os
+from cStringIO import StringIO
+import shutil
+import s3ql.cli.mkfs
+import s3ql.cli.adm
+from s3ql.backends import local
+
+class AdmTests(TestCase):
+
+ def setUp(self):
+ self.cache_dir = tempfile.mkdtemp()
+ self.bucket_dir = tempfile.mkdtemp()
+
+ self.bucketname = 'local://' + os.path.join(self.bucket_dir, 'mybucket')
+ self.passphrase = 'oeut3d'
+
+ def tearDown(self):
+ shutil.rmtree(self.cache_dir)
+ shutil.rmtree(self.bucket_dir)
+
+ def mkfs(self):
+ sys.stdin = StringIO('%s\n%s\n' % (self.passphrase, self.passphrase))
+ try:
+ s3ql.cli.mkfs.main(['--homedir', self.cache_dir, self.bucketname ])
+ except BaseException as exc:
+ self.fail("mkfs.s3ql failed: %s" % exc)
+
+ def test_passphrase(self):
+ self.mkfs()
+
+ passphrase_new = 'sd982jhd'
+ sys.stdin = StringIO('%s\n%s\n%s\n' % (self.passphrase,
+ passphrase_new, passphrase_new))
+ try:
+ s3ql.cli.adm.main(['passphrase', self.bucketname ])
+ except BaseException as exc:
+ self.fail("s3qladm failed: %s" % exc)
+
+
+ bucket = local.Connection().get_bucket(os.path.join(self.bucket_dir, 'mybucket'))
+ bucket.passphrase = passphrase_new
+
+ bucket.passphrase = bucket['s3ql_passphrase']
+ self.assertTrue(isinstance(bucket['s3ql_seq_no_0'], str))
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(AdmTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t4_fuse.py b/tests/t4_fuse.py
new file mode 100644
index 0000000..8c5daa1
--- /dev/null
+++ b/tests/t4_fuse.py
@@ -0,0 +1,301 @@
+'''
+t4_fuse.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+from _common import TestCase
+from cStringIO import StringIO
+from os.path import basename
+from s3ql.common import retry, AsyncFn
+import filecmp
+import os.path
+import s3ql.cli.fsck
+import s3ql.cli.mkfs
+import s3ql.cli.mount
+import s3ql.cli.umount
+import shutil
+import stat
+import llfuse
+import subprocess
+import sys
+import tempfile
+import time
+import unittest2 as unittest
+
+# For debugging
+USE_VALGRIND = False
+
+class fuse_tests(TestCase):
+
+ def setUp(self):
+ # We need this to test multi block operations
+ self.src = __file__
+ if os.path.getsize(self.src) < 1048:
+ raise RuntimeError("test file %s should be bigger than 1 kb" % self.src)
+
+ self.mnt_dir = tempfile.mkdtemp()
+ self.cache_dir = tempfile.mkdtemp()
+ self.bucket_dir = tempfile.mkdtemp()
+
+ self.bucketname = 'local://' + os.path.join(self.bucket_dir, 'mybucket')
+ self.passphrase = 'oeut3d'
+
+ self.mount_thread = None
+ self.name_cnt = 0
+
+ def tearDown(self):
+ # Umount if still mounted
+ if os.path.ismount(self.mnt_dir):
+ subprocess.call(['fusermount', '-z', '-u', self.mnt_dir])
+
+ # Try to wait for mount thread to prevent spurious errors
+ # because the db file is being removed
+ if self.mount_thread and USE_VALGRIND:
+ retry(60, lambda: self.mount_thread.poll() is not None)
+ elif self.mount_thread:
+ self.mount_thread.join(60)
+
+ shutil.rmtree(self.mnt_dir)
+ shutil.rmtree(self.cache_dir)
+ shutil.rmtree(self.bucket_dir)
+
+ if not USE_VALGRIND and not self.mount_thread.is_alive():
+ self.mount_thread.join_and_raise()
+
+ def mount(self):
+
+ sys.stdin = StringIO('%s\n%s\n' % (self.passphrase, self.passphrase))
+ try:
+ s3ql.cli.mkfs.main(['-L', 'test fs', '--blocksize', '500',
+ '--homedir', self.cache_dir, self.bucketname ])
+ except BaseException as exc:
+ self.fail("mkfs.s3ql failed: %s" % exc)
+
+
+ # Note: When running inside test suite, we have less available
+ # file descriptors
+ if USE_VALGRIND:
+ if __name__ == '__main__':
+ mypath = sys.argv[0]
+ else:
+ mypath = __file__
+ basedir = os.path.abspath(os.path.join(os.path.dirname(mypath), '..'))
+ self.mount_thread = subprocess.Popen(['valgrind', 'python-dbg',
+ os.path.join(basedir, 'bin', 'mount.s3ql'),
+ "--fg", '--homedir', self.cache_dir,
+ '--max-cache-entries', '500',
+ self.bucketname, self.mnt_dir],
+ stdin=subprocess.PIPE)
+ print(self.passphrase, file=self.mount_thread.stdin)
+ retry(30, os.path.ismount, self.mnt_dir)
+ else:
+ sys.stdin = StringIO('%s\n' % self.passphrase)
+ self.mount_thread = AsyncFn(s3ql.cli.mount.main,
+ ["--fg", '--homedir', self.cache_dir,
+ '--max-cache-entries', '500',
+ self.bucketname, self.mnt_dir])
+ self.mount_thread.start()
+
+ # Wait for mountpoint to come up
+ try:
+ retry(3, os.path.ismount, self.mnt_dir)
+ except:
+ self.mount_thread.join_and_raise()
+
+ def umount(self):
+ time.sleep(0.5)
+ devnull = open('/dev/null', 'wb')
+ retry(5, lambda: subprocess.call(['fuser', '-m', self.mnt_dir],
+ stdout=devnull, stderr=devnull) == 1)
+ s3ql.cli.umount.DONTWAIT = True
+ try:
+ s3ql.cli.umount.main([self.mnt_dir])
+ except BaseException as exc:
+ self.fail("Umount failed: %s" % exc)
+
+ # Now wait for server process
+ if USE_VALGRIND:
+ self.assertEqual(self.mount_thread.wait(), 0)
+ else:
+ exc = self.mount_thread.join_get_exc()
+ self.assertIsNone(exc)
+ self.assertFalse(os.path.ismount(self.mnt_dir))
+
+ # Now run an fsck
+ sys.stdin = StringIO('%s\n' % self.passphrase)
+ try:
+ s3ql.cli.fsck.main(['--force', '--homedir', self.cache_dir,
+ self.bucketname])
+ except BaseException as exc:
+ self.fail("fsck failed: %s" % exc)
+
+ def runTest(self):
+ # Run all tests in same environment, mounting and umounting
+ # just takes too long otherwise
+
+ self.mount()
+ self.tst_chown()
+ self.tst_link()
+ self.tst_mkdir()
+ self.tst_mknod()
+ self.tst_readdir()
+ self.tst_statvfs()
+ self.tst_symlink()
+ self.tst_truncate()
+ self.tst_write()
+ self.umount()
+
+ def newname(self):
+ self.name_cnt += 1
+ return "s3ql_%d" % self.name_cnt
+
+ def tst_mkdir(self):
+ dirname = self.newname()
+ fullname = self.mnt_dir + "/" + dirname
+ os.mkdir(fullname)
+ fstat = os.stat(fullname)
+ self.assertTrue(stat.S_ISDIR(fstat.st_mode))
+ self.assertEquals(llfuse.listdir(fullname), [])
+ self.assertEquals(fstat.st_nlink, 1)
+ self.assertTrue(dirname in llfuse.listdir(self.mnt_dir))
+ os.rmdir(fullname)
+ self.assertRaises(OSError, os.stat, fullname)
+ self.assertTrue(dirname not in llfuse.listdir(self.mnt_dir))
+
+ def tst_symlink(self):
+ linkname = self.newname()
+ fullname = self.mnt_dir + "/" + linkname
+ os.symlink("/imaginary/dest", fullname)
+ fstat = os.lstat(fullname)
+ self.assertTrue(stat.S_ISLNK(fstat.st_mode))
+ self.assertEquals(os.readlink(fullname), "/imaginary/dest")
+ self.assertEquals(fstat.st_nlink, 1)
+ self.assertTrue(linkname in llfuse.listdir(self.mnt_dir))
+ os.unlink(fullname)
+ self.assertRaises(OSError, os.lstat, fullname)
+ self.assertTrue(linkname not in llfuse.listdir(self.mnt_dir))
+
+ def tst_mknod(self):
+ filename = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, filename)
+ fstat = os.lstat(filename)
+ self.assertTrue(stat.S_ISREG(fstat.st_mode))
+ self.assertEquals(fstat.st_nlink, 1)
+ self.assertTrue(basename(filename) in llfuse.listdir(self.mnt_dir))
+ self.assertTrue(filecmp.cmp(src, filename, False))
+ os.unlink(filename)
+ self.assertRaises(OSError, os.stat, filename)
+ self.assertTrue(basename(filename) not in llfuse.listdir(self.mnt_dir))
+
+ def tst_chown(self):
+ filename = os.path.join(self.mnt_dir, self.newname())
+ os.mkdir(filename)
+ fstat = os.lstat(filename)
+ uid = fstat.st_uid
+ gid = fstat.st_gid
+
+ uid_new = uid + 1
+ os.chown(filename, uid_new, -1)
+ fstat = os.lstat(filename)
+ self.assertEquals(fstat.st_uid, uid_new)
+ self.assertEquals(fstat.st_gid, gid)
+
+ gid_new = gid + 1
+ os.chown(filename, -1, gid_new)
+ fstat = os.lstat(filename)
+ self.assertEquals(fstat.st_uid, uid_new)
+ self.assertEquals(fstat.st_gid, gid_new)
+
+ os.rmdir(filename)
+ self.assertRaises(OSError, os.stat, filename)
+ self.assertTrue(basename(filename) not in llfuse.listdir(self.mnt_dir))
+
+
+ def tst_write(self):
+ name = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, name)
+ self.assertTrue(filecmp.cmp(name, src, False))
+
+ # Don't unlink file, we want to see if cache flushing
+ # works
+
+ def tst_statvfs(self):
+ os.statvfs(self.mnt_dir)
+
+ def tst_link(self):
+ name1 = os.path.join(self.mnt_dir, self.newname())
+ name2 = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, name1)
+ self.assertTrue(filecmp.cmp(name1, src, False))
+ os.link(name1, name2)
+
+ fstat1 = os.lstat(name1)
+ fstat2 = os.lstat(name2)
+
+ self.assertEquals(fstat1, fstat2)
+ self.assertEquals(fstat1.st_nlink, 2)
+
+ self.assertTrue(basename(name2) in llfuse.listdir(self.mnt_dir))
+ self.assertTrue(filecmp.cmp(name1, name2, False))
+ os.unlink(name2)
+ fstat1 = os.lstat(name1)
+ self.assertEquals(fstat1.st_nlink, 1)
+ os.unlink(name1)
+
+ def tst_readdir(self):
+ dir_ = os.path.join(self.mnt_dir, self.newname())
+ file_ = dir_ + "/" + self.newname()
+ subdir = dir_ + "/" + self.newname()
+ subfile = subdir + "/" + self.newname()
+ src = self.src
+
+ os.mkdir(dir_)
+ shutil.copyfile(src, file_)
+ os.mkdir(subdir)
+ shutil.copyfile(src, subfile)
+
+ listdir_is = llfuse.listdir(dir_)
+ listdir_is.sort()
+ listdir_should = [ basename(file_), basename(subdir) ]
+ listdir_should.sort()
+ self.assertEquals(listdir_is, listdir_should)
+
+ os.unlink(file_)
+ os.unlink(subfile)
+ os.rmdir(subdir)
+ os.rmdir(dir_)
+
+ def tst_truncate(self):
+ filename = os.path.join(self.mnt_dir, self.newname())
+ src = self.src
+ shutil.copyfile(src, filename)
+ self.assertTrue(filecmp.cmp(filename, src, False))
+ fstat = os.stat(filename)
+ size = fstat.st_size
+ fd = os.open(filename, os.O_RDWR)
+
+ os.ftruncate(fd, size + 1024) # add > 1 block
+ self.assertEquals(os.stat(filename).st_size, size + 1024)
+
+ os.ftruncate(fd, size - 1024) # Truncate > 1 block
+ self.assertEquals(os.stat(filename).st_size, size - 1024)
+
+ os.close(fd)
+ os.unlink(filename)
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(fuse_tests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t5_cli.py b/tests/t5_cli.py
new file mode 100644
index 0000000..8e6bf68
--- /dev/null
+++ b/tests/t5_cli.py
@@ -0,0 +1,74 @@
+'''
+t5_cli.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+import os.path
+import errno
+import s3ql.cli.ctrl
+import s3ql.cli.lock
+import s3ql.cli.remove
+import llfuse
+import unittest2 as unittest
+import t4_fuse
+
+class cliTests(t4_fuse.fuse_tests):
+
+ def runTest(self):
+ self.mount()
+ self.tst_lock_rm()
+ self.tst_ctrl_flush()
+ self.umount()
+
+ def tst_ctrl_flush(self):
+
+ try:
+ s3ql.cli.ctrl.main(['flushcache', self.mnt_dir])
+ except BaseException as exc:
+ self.fail("s3qladm failed: %s" % exc)
+
+ def tst_lock_rm(self):
+
+ # Extract tar
+ tempdir = os.path.join(self.mnt_dir, 'lock_dir')
+ filename = os.path.join(tempdir, 'myfile')
+ os.mkdir(tempdir)
+ with open(filename, 'w') as fh:
+ fh.write('Hello, world')
+
+ # copy
+ try:
+ s3ql.cli.lock.main([tempdir])
+ except BaseException as exc:
+ self.fail("s3qllock failed: %s" % exc)
+
+ # Try to delete
+ with self.assertRaises(OSError) as cm:
+ os.unlink(filename)
+ self.assertEqual(cm.exception[0], errno.EPERM)
+
+ # Try to write
+ with self.assertRaises(IOError) as cm:
+ open(filename, 'w+').write('Hello')
+ self.assertEqual(cm.exception[0], errno.EPERM)
+
+ # delete properly
+ try:
+ s3ql.cli.remove.main([tempdir])
+ except BaseException as exc:
+ self.fail("s3qlrm failed: %s" % exc)
+
+ self.assertTrue('lock_dir' not in llfuse.listdir(self.mnt_dir))
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(cliTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/t5_cp.py b/tests/t5_cp.py
new file mode 100644
index 0000000..f7f758c
--- /dev/null
+++ b/tests/t5_cp.py
@@ -0,0 +1,75 @@
+'''
+t5_cp.py - this file is part of S3QL (http://s3ql.googlecode.com)
+
+Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
+
+This program can be distributed under the terms of the GNU LGPL.
+'''
+
+from __future__ import division, print_function
+import os.path
+from s3ql.cli.cp import main as s3qlcp
+import subprocess
+import tarfile
+import tempfile
+import errno
+import unittest2 as unittest
+import t4_fuse
+
+
+class cpTests(t4_fuse.fuse_tests):
+
+ def runTest(self):
+ try:
+ subprocess.call(['rsync', '--version'],
+ stderr=subprocess.STDOUT,
+ stdout=open('/dev/null', 'wb'))
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ raise unittest.SkipTest('rsync not installed')
+ raise
+
+ self.mount()
+ self.tst_cp()
+
+ self.umount()
+
+ def tst_cp(self):
+
+ # Extract tar
+ data_file = os.path.join(os.path.dirname(__file__), 'data.tar.bz2')
+ tempdir = tempfile.mkdtemp()
+ tarfile.open(data_file).extractall(tempdir)
+
+ # Rsync
+ subprocess.check_call(['rsync', '-aHAX', tempdir + '/',
+ os.path.join(self.mnt_dir, 'orig') + '/'])
+
+ # copy
+ try:
+ s3qlcp([os.path.join(self.mnt_dir, 'orig'),
+ os.path.join(self.mnt_dir, 'copy')])
+ except BaseException as exc:
+ self.fail("s3qlcp failed: %s" % exc)
+
+ # compare
+ rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete',
+ tempdir + '/',
+ os.path.join(self.mnt_dir, 'copy') + '/'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out = rsync.communicate()[0]
+ if out:
+ self.fail('Copy not equal to original, rsync says:\n' + out)
+ elif rsync.returncode != 0:
+ self.fail('rsync failed with ' + out)
+
+
+# Somehow important according to pyunit documentation
+def suite():
+ return unittest.makeSuite(cpTests)
+
+
+# Allow calling from command line
+if __name__ == "__main__":
+ unittest.main()