summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorNikolaus Rath <Nikolaus@rath.org>2016-03-09 10:08:26 -0800
committerNikolaus Rath <Nikolaus@rath.org>2016-03-09 10:08:26 -0800
commit8637d44c9ca3652c30895bf010acbd57ce6d451b (patch)
treebf0ff8738db144c79d08fd9a0abcf5afa8b51dac /tests
parent538edbe3d8a600a9ee4ff4a7822abbc79cbf00e1 (diff)
Import s3ql_1.4.orig.tar.bz2
Diffstat (limited to 'tests')
-rw-r--r--tests/t2_block_cache.py14
-rw-r--r--tests/t3_fs_api.py10
-rw-r--r--tests/t3_fsck.py42
3 files changed, 35 insertions, 31 deletions
diff --git a/tests/t2_block_cache.py b/tests/t2_block_cache.py
index 0631956..79bfb85 100644
--- a/tests/t2_block_cache.py
+++ b/tests/t2_block_cache.py
@@ -31,7 +31,7 @@ class cache_tests(TestCase):
self.bucket_dir = tempfile.mkdtemp()
self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
- self.cachedir = tempfile.mkdtemp() + "/"
+ self.cachedir = tempfile.mkdtemp()
self.blocksize = 1024
self.dbfile = tempfile.NamedTemporaryFile()
@@ -47,7 +47,7 @@ class cache_tests(TestCase):
| stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))
- self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir,
+ self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir + "/cache",
self.blocksize * 100)
# Tested methods assume that they are called from
@@ -57,8 +57,7 @@ class cache_tests(TestCase):
def tearDown(self):
self.cache.bucket_pool = self.bucket_pool
self.cache.destroy()
- if os.path.exists(self.cachedir):
- shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.cachedir)
shutil.rmtree(self.bucket_dir)
llfuse.lock.release()
@@ -305,13 +304,16 @@ class TestBucketPool(AbstractBucket):
return self.bucket.open_read(key)
- def open_write(self, key, metadata=None):
+ def open_write(self, key, metadata=None, is_compressed=False):
self.no_write -= 1
if self.no_write < 0:
raise RuntimeError('Got too many open_write calls')
- return self.bucket.open_write(key, metadata)
+ return self.bucket.open_write(key, metadata, is_compressed)
+ def is_temp_failure(self, exc):
+ return self.bucket.is_temp_failure(exc)
+
def is_get_consistent(self):
return self.bucket.is_get_consistent()
diff --git a/tests/t3_fs_api.py b/tests/t3_fs_api.py
index 2a45af3..dd9ee46 100644
--- a/tests/t3_fs_api.py
+++ b/tests/t3_fs_api.py
@@ -50,7 +50,7 @@ class fs_api_tests(TestCase):
self.bucket_dir = tempfile.mkdtemp()
self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
self.bucket = self.bucket_pool.pop_conn()
- self.cachedir = tempfile.mkdtemp() + "/"
+ self.cachedir = tempfile.mkdtemp()
self.blocksize = 1024
self.dbfile = tempfile.NamedTemporaryFile()
@@ -62,7 +62,7 @@ class fs_api_tests(TestCase):
# file system request handler
llfuse.lock.acquire()
- self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir,
+ self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir + "/cache",
self.blocksize * 5)
self.server = fs.Operations(self.block_cache, self.db, self.blocksize)
@@ -74,9 +74,7 @@ class fs_api_tests(TestCase):
def tearDown(self):
self.server.destroy()
self.block_cache.destroy()
-
- if os.path.exists(self.cachedir):
- shutil.rmtree(self.cachedir)
+ shutil.rmtree(self.cachedir)
shutil.rmtree(self.bucket_dir)
llfuse.lock.release()
@@ -88,7 +86,7 @@ class fs_api_tests(TestCase):
def fsck(self):
self.block_cache.clear()
self.server.inodes.flush()
- fsck = Fsck(self.cachedir, self.bucket,
+ fsck = Fsck(self.cachedir + '/cache', self.bucket,
{ 'blocksize': self.blocksize }, self.db)
fsck.check()
self.assertFalse(fsck.found_errors)
diff --git a/tests/t3_fsck.py b/tests/t3_fsck.py
index 4a31019..2750f63 100644
--- a/tests/t3_fsck.py
+++ b/tests/t3_fsck.py
@@ -25,7 +25,7 @@ class fsck_tests(TestCase):
def setUp(self):
self.bucket_dir = tempfile.mkdtemp()
self.bucket = local.Bucket(self.bucket_dir, None, None)
- self.cachedir = tempfile.mkdtemp() + "/"
+ self.cachedir = tempfile.mkdtemp()
self.blocksize = 1024
self.dbfile = tempfile.NamedTemporaryFile()
@@ -61,7 +61,7 @@ class fsck_tests(TestCase):
self._link('test-entry', inode)
# Create new block
- fh = open(self.cachedir + '%d-0' % inode, 'wb')
+ fh = open(self.cachedir + '/%d-0' % inode, 'wb')
fh.write('somedata')
fh.close()
self.assert_fsck(self.fsck.check_cache)
@@ -70,17 +70,17 @@ class fsck_tests(TestCase):
# Existing block
self.db.execute('UPDATE inodes SET size=? WHERE id=?',
(self.blocksize + 8, inode))
- with open(self.cachedir + '%d-1' % inode, 'wb') as fh:
+ with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
fh.write('somedata')
self.assert_fsck(self.fsck.check_cache)
# Old block preserved
- with open(self.cachedir + '%d-0' % inode, 'wb') as fh:
+ with open(self.cachedir + '/%d-0' % inode, 'wb') as fh:
fh.write('somedat2')
self.assert_fsck(self.fsck.check_cache)
# Old block removed
- with open(self.cachedir + '%d-1' % inode, 'wb') as fh:
+ with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
fh.write('somedat3')
self.assert_fsck(self.fsck.check_cache)
@@ -192,23 +192,23 @@ class fsck_tests(TestCase):
self.bucket['s3ql_data_%d' % obj_id] = 'foo'
# Case 1
- self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?',
- (None, self.blocksize + 120, id_))
+ self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.blocksize + 120, id_))
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
(id_, 1, block_id))
self.assert_fsck(self.fsck.check_inode_sizes)
# Case 2
self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,))
- self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?',
- (block_id, 129, id_))
+ self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
+ (id_, 0, block_id))
+ self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_))
self.assert_fsck(self.fsck.check_inode_sizes)
# Case 3
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
(id_, 1, block_id))
- self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?',
- (block_id, self.blocksize + 120, id_))
+ self.db.execute('UPDATE inodes SET size=? WHERE id=?',
+ (self.blocksize + 120, id_))
self.db.execute('UPDATE blocks SET refcount = refcount + 1 WHERE id = ?',
(block_id,))
self.assert_fsck(self.fsck.check_inode_sizes)
@@ -229,10 +229,12 @@ class fsck_tests(TestCase):
block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 128))
- id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size,block_id) "
- "VALUES (?,?,?,?,?,?,?,?,?)",
+ id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
(stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
- 0, 0, time.time(), time.time(), time.time(), 1, 128, block_id))
+ 0, 0, time.time(), time.time(), time.time(), 1, 128))
+ self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
+ (id_, 0, block_id))
self._link('test-entry', id_)
self.assert_fsck(self.fsck.check_keylist)
@@ -299,14 +301,16 @@ class fsck_tests(TestCase):
block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
(1, obj_id, 0))
- inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size,block_id) "
- "VALUES (?,?,?,?,?,?,?,?,?)",
+ inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
+ "VALUES (?,?,?,?,?,?,?,?)",
(stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(),
- time.time(), time.time(), time.time(), 1, self.blocksize, block_id))
+ time.time(), time.time(), time.time(), 1, self.blocksize))
self._link('test-entry', inode)
self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
- (inode, 1, block_id))
+ (inode, 0, block_id))
+ self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
+ (inode, 1, block_id))
self.assert_fsck(self.fsck.check_block_refcount)
@@ -315,7 +319,7 @@ class fsck_tests(TestCase):
obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
self.bucket['s3ql_data_%d' % obj_id] = 'foo'
self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
- (1, obj_id, 0))
+ (1, obj_id, 3))
self.assert_fsck(self.fsck.check_block_refcount)
def test_unix_size(self):