diff options
33 files changed, 229 insertions, 81 deletions
diff --git a/Changes.txt b/Changes.txt index 765bede..3303a9c 100644 --- a/Changes.txt +++ b/Changes.txt @@ -1,4 +1,44 @@ -2021-01-03, S3Ql 3.7.0 +2021-06-03, S3QL 3.7.3 + + * Fixed a DATA CORRUPTION bug in fsck.s3ql that caused the recorded size of uploaded + dirty blocks to be rounded up to the next multiple of 512 bytes, effectively appending + up to 512 zero-bytes to the end of affected files. + + This problem was introduced in version 3.4.1 (released 2020-05-08) as part of a + seemingly very minor improvement to cache usage calculation. + + You can tell that a file has (likely) been affected from fsck.s3ql messages of the + form: + + WARNING: Writing dirty block <X> of inode <Y> + + followed later by: + + WARNING: Size of inode <Y> (<path_to_file>) does not agree with number of blocks, \ + setting from <reasonable_size> to <size rounded to next multiple of 512) + + +2021-05-04, S3QL 3.7.2 + + * Fixed a crash with `dugong.StateError` in the Google Storage backend when the + authentication token expires in the middle of an upload. + + * S3QL is now compatible with setuptools >= 47. + + +2021-03-07, S3QL 3.7.1 + + * The tcp-timeout backend option of the B2 Backend works now. + + * mount.s3ql no longer crashes with "No Upload Threads available" when not running in + foreground. + + +2021-01-03, S3QL 3.7.0 + + * S3QL now requires Python 3.7 or newer. + + * S3QL now requires pyfuse3 of any version between 3.2.0 (inclusive) and 4.0 (exclusive). * S3QL now supports newer AWS S3 regions like eu-south-1. @@ -11,6 +51,8 @@ `umount.s3ql`, `umount`, or `fusermount -u` and wait for the mount.s3ql process to terminate. + * More robust expired auth token detection for Google Storage. + 2020-11-09, S3QL 3.6.0 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: s3ql -Version: 3.7.0 +Version: 3.7.3 Summary: a full-featured file system for online data storage Home-page: https://bitbucket.org/nikratio/s3ql/ Author: Nikolaus Rath @@ -32,9 +32,9 @@ Description: .. handling have been included from the very first line, and S3QL comes with extensive automated test cases for all its components. - .. _`Google Storage`: http://code.google.com/apis/storage/ - .. _`Amazon S3`: http://aws.amazon.com/s3 - .. _OpenStack: http://openstack.org/projects/storage/ + .. _`Google Storage`: https://cloud.google.com/storage/docs + .. _`Amazon S3`: https://aws.amazon.com/s3/ + .. _OpenStack: https://www.openstack.org/software/ Features @@ -139,8 +139,8 @@ Description: .. The following resources are available: * The `S3QL User's Guide`_. - * The `S3QL Wiki <https://github.com/s3ql/s3ql/wiki>`_ - * The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You + * The `S3QL Wiki`_ + * The `S3QL Mailing List`_. You can subscribe by sending a mail to `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_. @@ -151,8 +151,9 @@ Description: .. The S3QL source code is available on GitHub_. - .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html - .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql + .. _`S3QL User's Guide`: https://www.rath.org/s3ql-docs/ + .. _`S3QL Wiki`: https://github.com/s3ql/s3ql/wiki + .. _`S3QL Mailing List`: https://groups.google.com/g/s3ql .. _`GitHub Issue Tracker`: https://github.com/s3ql/s3ql/issues .. _GitHub: https://github.com/s3ql/main @@ -23,9 +23,9 @@ readable and serviceable as possible. Solid error detection and error handling have been included from the very first line, and S3QL comes with extensive automated test cases for all its components. -.. _`Google Storage`: http://code.google.com/apis/storage/ -.. _`Amazon S3`: http://aws.amazon.com/s3 -.. _OpenStack: http://openstack.org/projects/storage/ +.. _`Google Storage`: https://cloud.google.com/storage/docs +.. _`Amazon S3`: https://aws.amazon.com/s3/ +.. _OpenStack: https://www.openstack.org/software/ Features @@ -130,8 +130,8 @@ Need Help? The following resources are available: * The `S3QL User's Guide`_. -* The `S3QL Wiki <https://github.com/s3ql/s3ql/wiki>`_ -* The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You +* The `S3QL Wiki`_ +* The `S3QL Mailing List`_. You can subscribe by sending a mail to `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_. @@ -142,7 +142,8 @@ Contributing The S3QL source code is available on GitHub_. -.. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html -.. _`S3QL Mailing List`: http://groups.google.com/group/s3ql +.. _`S3QL User's Guide`: https://www.rath.org/s3ql-docs/ +.. _`S3QL Wiki`: https://github.com/s3ql/s3ql/wiki +.. _`S3QL Mailing List`: https://groups.google.com/g/s3ql .. _`GitHub Issue Tracker`: https://github.com/s3ql/s3ql/issues .. _GitHub: https://github.com/s3ql/main diff --git a/contrib/expire_backups.1 b/contrib/expire_backups.1 index 893a6ff..70238b4 100644 --- a/contrib/expire_backups.1 +++ b/contrib/expire_backups.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "EXPIRE_BACKUPS" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "EXPIRE_BACKUPS" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME expire_backups \- Intelligently expire old backups . diff --git a/contrib/pcp.1 b/contrib/pcp.1 index 6d1c9a0..1a4abdc 100644 --- a/contrib/pcp.1 +++ b/contrib/pcp.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "PCP" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "PCP" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME pcp \- Recursive, parallel copy of directory trees . diff --git a/doc/latex/manual.tex b/doc/latex/manual.tex index 6bf4bf9..14304f7 100644 --- a/doc/latex/manual.tex +++ b/doc/latex/manual.tex @@ -58,8 +58,8 @@ \title{S3QL Documentation} -\date{Jan 03, 2021} -\release{3.7.0} +\date{Jun 03, 2021} +\release{3.7.3} \author{Nikolaus Rath} \newcommand{\sphinxlogo}{\vbox{}} \renewcommand{\releasename}{Release} @@ -78,7 +78,7 @@ \chapter{S3QL} \label{\detokenize{about:s3ql}}\label{\detokenize{about::doc}} S3QL is a file system that stores all its data online using storage -services like \sphinxhref{http://code.google.com/apis/storage/}{Google Storage}, \sphinxhref{http://aws.amazon.com/s3}{Amazon S3}, or \sphinxhref{http://openstack.org/projects/storage/}{OpenStack}. S3QL +services like \sphinxhref{https://cloud.google.com/storage/docs}{Google Storage}, \sphinxhref{https://aws.amazon.com/s3/}{Amazon S3}, or \sphinxhref{https://www.openstack.org/software/}{OpenStack}. S3QL effectively provides a virtual drive of dynamic, infinite capacity that can be accessed from any computer with internet access. diff --git a/doc/man/fsck.s3ql.1 b/doc/man/fsck.s3ql.1 index 30dbeab..552d734 100644 --- a/doc/man/fsck.s3ql.1 +++ b/doc/man/fsck.s3ql.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "FSCK.S3QL" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "FSCK.S3QL" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME fsck.s3ql \- Check an S3QL file system for errors . diff --git a/doc/man/mkfs.s3ql.1 b/doc/man/mkfs.s3ql.1 index 223a950..809e9e9 100644 --- a/doc/man/mkfs.s3ql.1 +++ b/doc/man/mkfs.s3ql.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "MKFS.S3QL" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "MKFS.S3QL" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME mkfs.s3ql \- Create an S3QL file system . diff --git a/doc/man/mount.s3ql.1 b/doc/man/mount.s3ql.1 index 7afd7ab..161a354 100644 --- a/doc/man/mount.s3ql.1 +++ b/doc/man/mount.s3ql.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "MOUNT.S3QL" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "MOUNT.S3QL" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME mount.s3ql \- Mount an S3QL file system . diff --git a/doc/man/s3ql_oauth_client.1 b/doc/man/s3ql_oauth_client.1 index 749fbb1..a5fed7b 100644 --- a/doc/man/s3ql_oauth_client.1 +++ b/doc/man/s3ql_oauth_client.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QL_OAUTH_CLIENT" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QL_OAUTH_CLIENT" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3ql_oauth_client \- Obtain Google Storage OAuth2 tokens . diff --git a/doc/man/s3ql_verify.1 b/doc/man/s3ql_verify.1 index 9bf41c0..8e0701c 100644 --- a/doc/man/s3ql_verify.1 +++ b/doc/man/s3ql_verify.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QL_VERIFY" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QL_VERIFY" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3ql_verify \- Verify data in an S3QL file system . diff --git a/doc/man/s3qladm.1 b/doc/man/s3qladm.1 index f8bf5ac..dfabc71 100644 --- a/doc/man/s3qladm.1 +++ b/doc/man/s3qladm.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QLADM" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QLADM" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3qladm \- Manage S3QL file systems . diff --git a/doc/man/s3qlcp.1 b/doc/man/s3qlcp.1 index f0f89a2..6fa98ae 100644 --- a/doc/man/s3qlcp.1 +++ b/doc/man/s3qlcp.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QLCP" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QLCP" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3qlcp \- Copy-on-write replication on S3QL file systems . diff --git a/doc/man/s3qlctrl.1 b/doc/man/s3qlctrl.1 index 3bf8d4e..141e693 100644 --- a/doc/man/s3qlctrl.1 +++ b/doc/man/s3qlctrl.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QLCTRL" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QLCTRL" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3qlctrl \- Control a mounted S3QL file system . diff --git a/doc/man/s3qllock.1 b/doc/man/s3qllock.1 index 5cb1e2b..af4646a 100644 --- a/doc/man/s3qllock.1 +++ b/doc/man/s3qllock.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QLLOCK" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QLLOCK" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3qllock \- Make trees on an S3QL file system immutable . diff --git a/doc/man/s3qlrm.1 b/doc/man/s3qlrm.1 index df2d634..9bf1b7b 100644 --- a/doc/man/s3qlrm.1 +++ b/doc/man/s3qlrm.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QLRM" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QLRM" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3qlrm \- Fast tree removal on S3QL file systems . diff --git a/doc/man/s3qlstat.1 b/doc/man/s3qlstat.1 index 834e163..593b1b0 100644 --- a/doc/man/s3qlstat.1 +++ b/doc/man/s3qlstat.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "S3QLSTAT" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "S3QLSTAT" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME s3qlstat \- Gather S3QL file system statistics . diff --git a/doc/man/umount.s3ql.1 b/doc/man/umount.s3ql.1 index 6bd2726..64763ab 100644 --- a/doc/man/umount.s3ql.1 +++ b/doc/man/umount.s3ql.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "UMOUNT.S3QL" "1" "Jan 03, 2021" "3.7.0" "S3QL" +.TH "UMOUNT.S3QL" "1" "Jun 03, 2021" "3.7.3" "S3QL" .SH NAME umount.s3ql \- Unmount an S3QL file system . diff --git a/doc/manual.pdf b/doc/manual.pdf Binary files differindex 4a1f5be..cad2d6b 100644 --- a/doc/manual.pdf +++ b/doc/manual.pdf @@ -151,7 +151,7 @@ def main(): setuptools.setup( name='s3ql', - zip_safe=True, + zip_safe=False, version=s3ql.VERSION, description='a full-featured file system for online data storage', long_description=long_desc, diff --git a/src/s3ql.egg-info/PKG-INFO b/src/s3ql.egg-info/PKG-INFO index ecadfe4..6dc7353 100644 --- a/src/s3ql.egg-info/PKG-INFO +++ b/src/s3ql.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: s3ql -Version: 3.7.0 +Version: 3.7.3 Summary: a full-featured file system for online data storage Home-page: https://bitbucket.org/nikratio/s3ql/ Author: Nikolaus Rath @@ -32,9 +32,9 @@ Description: .. handling have been included from the very first line, and S3QL comes with extensive automated test cases for all its components. - .. _`Google Storage`: http://code.google.com/apis/storage/ - .. _`Amazon S3`: http://aws.amazon.com/s3 - .. _OpenStack: http://openstack.org/projects/storage/ + .. _`Google Storage`: https://cloud.google.com/storage/docs + .. _`Amazon S3`: https://aws.amazon.com/s3/ + .. _OpenStack: https://www.openstack.org/software/ Features @@ -139,8 +139,8 @@ Description: .. The following resources are available: * The `S3QL User's Guide`_. - * The `S3QL Wiki <https://github.com/s3ql/s3ql/wiki>`_ - * The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You + * The `S3QL Wiki`_ + * The `S3QL Mailing List`_. You can subscribe by sending a mail to `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_. @@ -151,8 +151,9 @@ Description: .. The S3QL source code is available on GitHub_. - .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html - .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql + .. _`S3QL User's Guide`: https://www.rath.org/s3ql-docs/ + .. _`S3QL Wiki`: https://github.com/s3ql/s3ql/wiki + .. _`S3QL Mailing List`: https://groups.google.com/g/s3ql .. _`GitHub Issue Tracker`: https://github.com/s3ql/s3ql/issues .. _GitHub: https://github.com/s3ql/main diff --git a/src/s3ql.egg-info/SOURCES.txt b/src/s3ql.egg-info/SOURCES.txt index d9580fd..1ea2517 100644 --- a/src/s3ql.egg-info/SOURCES.txt +++ b/src/s3ql.egg-info/SOURCES.txt @@ -241,9 +241,9 @@ src/s3ql.egg-info/PKG-INFO src/s3ql.egg-info/SOURCES.txt src/s3ql.egg-info/dependency_links.txt src/s3ql.egg-info/entry_points.txt +src/s3ql.egg-info/not-zip-safe src/s3ql.egg-info/requires.txt src/s3ql.egg-info/top_level.txt -src/s3ql.egg-info/zip-safe src/s3ql/backends/__init__.py src/s3ql/backends/common.py src/s3ql/backends/comprenc.py @@ -264,6 +264,7 @@ tests/Dockerfile tests/common.py tests/conftest.py tests/mock_server.py +tests/mount_helper.py tests/pytest.ini tests/pytest_checklogs.py tests/run-tests-via-docker.sh diff --git a/src/s3ql.egg-info/zip-safe b/src/s3ql.egg-info/not-zip-safe index 8b13789..8b13789 100644 --- a/src/s3ql.egg-info/zip-safe +++ b/src/s3ql.egg-info/not-zip-safe diff --git a/src/s3ql/__init__.py b/src/s3ql/__init__.py index e5d6deb..5e1c497 100644 --- a/src/s3ql/__init__.py +++ b/src/s3ql/__init__.py @@ -38,7 +38,7 @@ assert logging.LOG_ONCE # prevent warnings about unused module from pyfuse3 import ROOT_INODE -VERSION = '3.7.0' +VERSION = '3.7.3' RELEASE = '%s' % VERSION # TODO: On next revision bump, remove upgrade code from backend/comprenc.py and diff --git a/src/s3ql/backends/b2/b2_backend.py b/src/s3ql/backends/b2/b2_backend.py index e4fc29a..552a7ed 100644 --- a/src/s3ql/backends/b2/b2_backend.py +++ b/src/s3ql/backends/b2/b2_backend.py @@ -58,17 +58,17 @@ class B2Backend(AbstractBackend, metaclass=ABCDocstMeta): self.b2_application_key_id = options.backend_login self.b2_application_key = options.backend_password - self.tcp_timeout = self.options.get('tcp-timeout', 20) + self.tcp_timeout = int(self.options.get('tcp-timeout', 20)) self.account_id = None - self.disable_versions = self.options.get('disable-versions', False) - self.retry_on_cap_exceeded = self.options.get('retry-on-cap-exceeded', False) + self.disable_versions = 'disable-versions' in self.options + self.retry_on_cap_exceeded = 'retry-on-cap-exceeded' in self.options # Test modes - self.test_mode_fail_some_uploads = self.options.get('test-mode-fail-some-uploads', False) - self.test_mode_expire_some_tokens = self.options.get('test-mode-expire-some-tokens', False) - self.test_mode_force_cap_exceeded = self.options.get('test-mode-force-cap-exceeded', False) + self.test_mode_fail_some_uploads = 'test-mode-fail-some-uploads' in self.options + self.test_mode_expire_some_tokens = 'test-mode-expire-some-tokens' in self.options + self.test_mode_force_cap_exceeded = 'test-mode-force-cap-exceeded' in self.options (bucket_name, prefix) = self._parse_storage_url(options.storage_url, self.ssl_context) self.bucket_name = bucket_name diff --git a/src/s3ql/backends/gs.py b/src/s3ql/backends/gs.py index 730c16a..11b306a 100644 --- a/src/s3ql/backends/gs.py +++ b/src/s3ql/backends/gs.py @@ -436,15 +436,16 @@ class Backend(AbstractBackend, metaclass=ABCDocstMeta): raise ValueError('md5 passed to write_fd does not match fd data') resp = self.conn.read_response() - if resp.status != 200: + # If we're really unlucky, then the token has expired while we were uploading data. + if resp.status == 401: + self.conn.discard() + raise AccessTokenExpired() + elif resp.status != 200: exc = self._parse_error_response(resp) - # If we're really unlucky, then the token has expired while we - # were uploading data. - if exc.message == 'Invalid Credentials': - raise AccessTokenExpired() raise _map_request_error(exc, key) or exc self._parse_json_response(resp) + @retry @copy_ancestor_docstring def update_meta(self, key, metadata): diff --git a/src/s3ql/block_cache.py b/src/s3ql/block_cache.py index f1d8641..8082022 100644 --- a/src/s3ql/block_cache.py +++ b/src/s3ql/block_cache.py @@ -71,9 +71,7 @@ class CacheEntry(object): self.blockno = blockno self.last_write = 0 self.pos = self.fh.tell() - # use allocation size instead of st_size - # to properly account for small files - self.size = os.fstat(self.fh.fileno()).st_blocks * 512 + self.size = os.fstat(self.fh.fileno()).st_size def read(self, size=None): buf = self.fh.read(size) diff --git a/src/s3ql/cp.py b/src/s3ql/cp.py index 2f8b3c4..9775ac8 100644 --- a/src/s3ql/cp.py +++ b/src/s3ql/cp.py @@ -88,9 +88,13 @@ def main(args=None): # Make sure that write cache is flushed pyfuse3.syncfs(options.target) - fstat_t = os.stat(options.target) - pyfuse3.setxattr(ctrlfile, 'copy', - ('(%d, %d)' % (fstat_s.st_ino, fstat_t.st_ino)).encode()) + # Ensure the inode of the target folder stays in the kernel dentry cache + # (We invalidate it during the copy) + with os.scandir(options.target) as it: + + fstat_t = os.stat(options.target) + pyfuse3.setxattr(ctrlfile, 'copy', + ('(%d, %d)' % (fstat_s.st_ino, fstat_t.st_ino)).encode()) if __name__ == '__main__': main(sys.argv[1:]) diff --git a/src/s3ql/mount.py b/src/s3ql/mount.py index 6ae73a5..00a14a0 100644 --- a/src/s3ql/mount.py +++ b/src/s3ql/mount.py @@ -204,7 +204,6 @@ async def main_async(options, stdout_log_handler): async with AsyncExitStack() as cm: block_cache = BlockCache(backend_pool, db, cachepath + '-cache', options.cachesize * 1024, options.max_cache_entries) - block_cache.init(options.threads) cm.push_async_callback(block_cache.destroy, options.keep_cache) operations = fs.Operations(block_cache, db, max_obj_size=param['max_obj_size'], @@ -240,6 +239,8 @@ async def main_async(options, stdout_log_handler): mark_metadata_dirty(backend, cachepath, param) + block_cache.init(options.threads) + nursery.start_soon(metadata_upload_task.run, name='metadata-upload-task') cm.callback(metadata_upload_task.stop) diff --git a/tests/mount_helper.py b/tests/mount_helper.py new file mode 100644 index 0000000..890e6bc --- /dev/null +++ b/tests/mount_helper.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +''' +This file reproduces mount.s3ql, with pyfuse3.invalidate_inode patched +for the t5_cp.py::TestCp::test_cp_inode_invalidate test. +''' + +import sys +import os.path + +basedir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) +sys.path = [os.path.join(basedir, 'src')] + sys.path + +# Override pyfuse3.invalidate_inode : Drop kernel dentries and inodes cache +# just before calling pyfuse3.invalidate_inode +import pyfuse3 +pyfuse3_invalidate_inode = pyfuse3.invalidate_inode +def patched_pyfuse3_invalidate_inode(inode): + # echo 2 > /proc/sys/vm/drop_caches : Drop kernel dentries and inodes cache + with open("/proc/sys/vm/drop_caches", "w") as drop_caches: + drop_caches.write("2\n") + pyfuse3_invalidate_inode(inode) +pyfuse3.invalidate_inode = patched_pyfuse3_invalidate_inode + +import s3ql.mount +s3ql.mount.main(sys.argv[1:])
\ No newline at end of file diff --git a/tests/t3_fsck.py b/tests/t3_fsck.py index efed085..83238b8 100755 --- a/tests/t3_fsck.py +++ b/tests/t3_fsck.py @@ -75,11 +75,12 @@ class fsck_tests(unittest.TestCase): self._link(b'test-entry', inode) # Create new block - fh = open(self.cachedir + '/%d-0' % inode, 'wb') - fh.write(b'somedata') - fh.close() + with open(self.cachedir + '/%d-0' % inode, 'wb') as fh: + fh.write(b'somedata') + size = os.stat(fh.name).st_size self.assert_fsck(self.fsck.check_cache) self.assertEqual(self.backend['s3ql_data_1'], b'somedata') + assert self.db.get_val('SELECT size FROM inodes WHERE id=?', (inode,)) == size # Existing block self.db.execute('UPDATE inodes SET size=? WHERE id=?', @@ -220,31 +221,55 @@ class fsck_tests(unittest.TestCase): 0, 0, time_ns(), time_ns(), time_ns(), 1, 128)) self._link(b'test-entry', id_) + block_size = self.max_obj_size // 3 obj_id = self.db.rowid('INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36)) block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size, hash) ' - 'VALUES(?,?,?,?)', (1, obj_id, 512, sha256(b'foo'))) + 'VALUES(?,?,?,?)', (1, obj_id, block_size, sha256(b'foo'))) self.backend['s3ql_data_%d' % obj_id] = b'foo' - # Case 1 - self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_)) + # One block, no holes, size plausible + self.db.execute('UPDATE inodes SET size=? WHERE id=?', (block_size, id_)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', - (id_, 1, block_id)) + (id_, 0, block_id)) + self.fsck.found_errors = False + self.fsck.check() + assert not self.fsck.found_errors + + # One block, size not plausible + self.db.execute('UPDATE inodes SET size=? WHERE id=?', (block_size-1, id_)) self.assert_fsck(self.fsck.check_inodes_size) - # Case 2 + # Two blocks, hole at the beginning, size plausible self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,)) + self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + block_size, id_)) + self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', + (id_, 1, block_id)) + self.fsck.found_errors = False + self.fsck.check() + assert not self.fsck.found_errors + + # Two blocks, no holes, size plausible + self.db.execute('UPDATE blocks SET refcount = 2 WHERE id = ?', (block_id,)) self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', (id_, 0, block_id)) - self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_)) + self.fsck.found_errors = False + self.fsck.check() + assert not self.fsck.found_errors + + # Two blocks, size not plausible + self.db.execute('UPDATE inodes SET size=? WHERE id=?', + (self.max_obj_size + block_size-1, id_)) self.assert_fsck(self.fsck.check_inodes_size) - # Case 3 - self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)', - (id_, 1, block_id)) + # Two blocks, hole at the end, size plausible self.db.execute('UPDATE inodes SET size=? WHERE id=?', - (self.max_obj_size + 120, id_)) - self.db.execute('UPDATE blocks SET refcount = refcount + 1 WHERE id = ?', - (block_id,)) + (self.max_obj_size + block_size + 1, id_)) + self.fsck.found_errors = False + self.fsck.check() + assert not self.fsck.found_errors + + # Two blocks, size not plausible + self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size, id_)) self.assert_fsck(self.fsck.check_inodes_size) diff --git a/tests/t4_fuse.py b/tests/t4_fuse.py index 8c2c836..040f1b4 100755 --- a/tests/t4_fuse.py +++ b/tests/t4_fuse.py @@ -76,11 +76,14 @@ class TestFuse: self.reg_output(r'^WARNING: Maximum object sizes less than ' '1 MiB will degrade performance\.$', count=1) - def mount(self, expect_fail=None, extra_args=[]): + def mount(self, expect_fail=None, in_foreground = True, extra_args=[]): cmd = (self.s3ql_cmd_argv('mount.s3ql') + - ["--fg", '--cachedir', self.cache_dir, '--log', 'none', + ['--cachedir', self.cache_dir, '--log', 'none', '--compress', 'zlib', '--quiet', self.storage_url, self.mnt_dir, - '--authfile', '/dev/null' ] + extra_args) + '--authfile', '/dev/null' ]) + if in_foreground: + cmd += ["--fg"] + cmd += extra_args self.mount_process = subprocess.Popen(cmd, stdin=subprocess.PIPE, universal_newlines=True) if self.backend_login is not None: diff --git a/tests/t5_cp.py b/tests/t5_cp.py index d2c8681..24a282a 100755 --- a/tests/t5_cp.py +++ b/tests/t5_cp.py @@ -12,8 +12,9 @@ if __name__ == '__main__': import sys sys.exit(pytest.main([__file__] + sys.argv[1:])) -from common import populate_dir, skip_without_rsync +from common import populate_dir, skip_without_rsync, retry import os.path +import sys import shutil import subprocess from subprocess import check_output, CalledProcessError @@ -31,6 +32,16 @@ class TestCp(t4_fuse.TestFuse): self.umount() self.fsck() + def test_bg(self): + skip_without_rsync() + self.mkfs() + self.logfile = tempfile.NamedTemporaryFile() + self.mount(in_foreground=False, extra_args=["--log", self.logfile.name]) + self.tst_cp() + self.umount() + self.fsck() + self.logfile.close() + def tst_cp(self): tempdir = tempfile.mkdtemp(prefix='s3ql-cp-') @@ -59,3 +70,37 @@ class TestCp(t4_fuse.TestFuse): finally: shutil.rmtree(tempdir) + + + def test_cp_inode_invalidate(self): + if os.getuid() != 0: + pytest.skip('test_cp_inode_invalidate requires root, skipping.') + + self.passphrase = None + self.mkfs() + + # Run monkeypatched mount.s3ql with overriden pyfuse3.invalidate_inode : + # Drop kernel dentries and inodes cache just before calling pyfuse3.invalidate_inode + cmd = ([sys.executable, os.path.join(os.path.dirname(__file__), 'mount_helper.py'), "--fg", '--cachedir', + self.cache_dir, '--log', 'none', + '--compress', 'zlib', '--quiet', self.storage_url, self.mnt_dir, + '--authfile', '/dev/null' ]) + self.mount_process = subprocess.Popen(cmd, universal_newlines=True) + def poll(): + if os.path.ismount(self.mnt_dir): + return True + assert self.mount_process.poll() is None + retry(10, poll) + + + os.mkdir(os.path.join(self.mnt_dir, 'orig')) + + cmd = (self.s3ql_cmd_argv('s3qlcp') + + [ '--quiet', os.path.join(self.mnt_dir, 'orig'), + os.path.join(self.mnt_dir, 'copy')]) + cp_process = subprocess.Popen(cmd) + retry(5, lambda : cp_process.poll() is not None) + assert cp_process.wait() == 0 + + self.umount() + self.fsck() |