summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Fladischer <FladischerMichael@fladi.at>2017-01-26 17:39:23 +0100
committerMichael Fladischer <FladischerMichael@fladi.at>2017-01-26 17:39:23 +0100
commitbdb10da94ce984ae8dd3926ee24f256b869e0b73 (patch)
tree1b1858b7759c5481b9dc1c6bec7a0f4e0280dfe3
importing python-testfixtures_4.13.3.orig.tar.gz
-rw-r--r--.coveragerc22
-rw-r--r--.gitignore22
-rw-r--r--.travis.yml32
-rw-r--r--LICENSE.txt22
-rw-r--r--README.rst18
-rw-r--r--docs/Makefile78
-rw-r--r--docs/api.txt308
-rw-r--r--docs/changes.txt700
-rw-r--r--docs/comparing.txt898
-rwxr-xr-xdocs/components.txt70
-rw-r--r--docs/conf.py38
-rw-r--r--docs/datetime.txt480
-rw-r--r--docs/description.txt47
-rw-r--r--docs/development.txt66
-rw-r--r--docs/exceptions.txt139
-rw-r--r--docs/files.txt649
-rw-r--r--docs/index.txt49
-rw-r--r--docs/installation.txt30
-rw-r--r--docs/license.txt5
-rw-r--r--docs/logging.txt431
-rw-r--r--docs/make.bat100
-rw-r--r--docs/mocking.txt414
-rw-r--r--docs/popen.txt133
-rw-r--r--docs/streams.txt98
-rw-r--r--docs/utilities.txt170
-rw-r--r--docs/warnings.txt90
-rw-r--r--requirements.txt1
-rw-r--r--setup.cfg2
-rw-r--r--setup.py44
-rw-r--r--testfixtures/__init__.py28
-rw-r--r--testfixtures/comparison.py725
-rw-r--r--testfixtures/compat.py54
-rw-r--r--testfixtures/components.py45
-rw-r--r--testfixtures/logcapture.py201
-rw-r--r--testfixtures/manuel.py94
-rw-r--r--testfixtures/outputcapture.py78
-rw-r--r--testfixtures/popen.py141
-rw-r--r--testfixtures/replace.py140
-rw-r--r--testfixtures/resolve.py44
-rw-r--r--testfixtures/rmtree.py66
-rw-r--r--testfixtures/shouldraise.py97
-rw-r--r--testfixtures/shouldwarn.py55
-rw-r--r--testfixtures/tdatetime.py223
-rw-r--r--testfixtures/tempdirectory.py401
-rw-r--r--testfixtures/tests/__init__.py4
-rw-r--r--testfixtures/tests/compat.py58
-rw-r--r--testfixtures/tests/configparser-read.txt45
-rw-r--r--testfixtures/tests/configparser-write.txt43
-rw-r--r--testfixtures/tests/directory-contents.txt32
-rw-r--r--testfixtures/tests/sample1.py70
-rw-r--r--testfixtures/tests/sample2.py19
-rw-r--r--testfixtures/tests/test_compare.py1372
-rw-r--r--testfixtures/tests/test_comparison.py742
-rw-r--r--testfixtures/tests/test_components.py45
-rw-r--r--testfixtures/tests/test_date.py246
-rw-r--r--testfixtures/tests/test_datetime.py356
-rw-r--r--testfixtures/tests/test_diff.py54
-rw-r--r--testfixtures/tests/test_docs.py26
-rw-r--r--testfixtures/tests/test_generator.py21
-rw-r--r--testfixtures/tests/test_log_capture.py233
-rw-r--r--testfixtures/tests/test_logcapture.py384
-rw-r--r--testfixtures/tests/test_manuel.py235
-rw-r--r--testfixtures/tests/test_manuel_examples.py35
-rw-r--r--testfixtures/tests/test_outputcapture.py76
-rw-r--r--testfixtures/tests/test_popen.py395
-rw-r--r--testfixtures/tests/test_popen_docs.py140
-rw-r--r--testfixtures/tests/test_rangecomparison.py189
-rw-r--r--testfixtures/tests/test_replace.py455
-rw-r--r--testfixtures/tests/test_replacer.py203
-rw-r--r--testfixtures/tests/test_roundcomparison.py155
-rw-r--r--testfixtures/tests/test_should_raise.py292
-rw-r--r--testfixtures/tests/test_shouldwarn.py119
-rw-r--r--testfixtures/tests/test_stringcomparison.py52
-rw-r--r--testfixtures/tests/test_tempdir.py112
-rw-r--r--testfixtures/tests/test_tempdirectory.py411
-rw-r--r--testfixtures/tests/test_time.py185
-rw-r--r--testfixtures/tests/test_wrap.py236
-rw-r--r--testfixtures/utils.py65
-rw-r--r--testfixtures/version.txt1
79 files changed, 14654 insertions, 0 deletions
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..5a84279
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,22 @@
+[run]
+branch = True
+parallel = True
+
+[report]
+exclude_lines =
+ # the original exclude
+ pragma: no cover
+
+ # code executed only when tests fail
+ 'No exception raised!'
+ self\.fail\('Expected
+
+ # example code that we don't want to cover with pragma statements
+ guppy =
+
+[paths]
+src =
+ ./testfixtures/
+ C:\Jenkins\workspace\testfixtures-virtualenv\*\testfixtures
+ /var/lib/jenkins/slave/workspace/testfixtures-virtualenv/*/testfixtures
+ /Users/Shared/Jenkins/Home/workspace/testfixtures-virtualenv/*/testfixtures
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0a1fdb0
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,22 @@
+/.installed.cfg
+/bin/
+/develop-eggs
+/dist
+/docs/_build
+/eggs
+/*.egg-info/
+/parts/
+*.pyc
+/.coverage
+/*.xml
+/.tox
+/htmlcov
+/include
+/lib
+/local
+/man
+/.Python
+.noseids
+desc.html
+pip-selfcheck.json
+.coverage.*
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..ad19bfb
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,32 @@
+language: python
+
+# for container-y googdness:
+sudo: false
+
+python:
+ - "3.5"
+ - "3.4"
+ - "3.3"
+ - "2.7"
+
+# command to install dependencies
+install: "pip install -Ue .[test,build]"
+
+# command to run tests, e.g. python setup.py test
+script: nosetests --with-cov --cov=testfixtures
+
+after_success:
+ - coverage combine
+ - coveralls
+
+deploy:
+ provider: pypi
+ user: chrisw
+ password:
+ secure: QrmUWPegJzGIYGI8XGY7ztjHTbpMgVDLNV6f9fjjAoo8efOubJh/BASjWD3ESPlIo1j5tGCcSRP2MN1+30zBxq7a7oiCnthISx42DP6Ih+wLHFCu7uBm12AH96hIOQUAtQotNSwB9dJAJIKKom7xkrV/nStjbcqC7hosTZlTy6o=
+ on:
+ tags: true
+ repo: Simplistix/testfixtures
+ python: "3.4"
+ skip_cleanup: true
+ distributions: "sdist bdist_wheel"
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..63df5fd
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2008-2015 Simplistix Ltd
+Copyright (c) 2015-2016 Chris Withers
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of the Software,
+and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..0a06a7a
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,18 @@
+|Travis|_ |Coveralls|_ |Docs|_
+
+.. |Travis| image:: https://api.travis-ci.org/Simplistix/testfixtures.svg?branch=master
+.. _Travis: https://travis-ci.org/Simplistix/testfixtures
+
+.. |Coveralls| image:: https://coveralls.io/repos/Simplistix/testfixtures/badge.svg?branch=master
+.. _Coveralls: https://coveralls.io/r/Simplistix/testfixtures?branch=master
+
+.. |Docs| image:: https://readthedocs.org/projects/testfixtures/badge/?version=latest
+.. _Docs: http://testfixtures.readthedocs.org/en/latest/
+
+
+TestFixtures
+============
+
+TestFixtures is a collection of helpers and mock objects that are useful when
+writing unit tests or doc tests.
+
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..1de0715
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,78 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD ?= sphinx-build
+PAPER =
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf _build/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html
+ @echo
+ @echo "Build finished. The HTML pages are in _build/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in _build/dirhtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in _build/htmlhelp."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in _build/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes
+ @echo
+ @echo "The overview file is in _build/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in _build/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in _build/doctest/output.txt."
diff --git a/docs/api.txt b/docs/api.txt
new file mode 100644
index 0000000..463ec46
--- /dev/null
+++ b/docs/api.txt
@@ -0,0 +1,308 @@
+API Reference
+=============
+
+.. currentmodule:: testfixtures
+
+.. autoclass:: Comparison
+
+.. autoclass:: LogCapture
+ :members:
+
+.. autoclass:: OutputCapture
+ :members:
+
+.. autoclass:: Replace
+ :members:
+
+.. autoclass:: Replacer
+ :members:
+
+.. autofunction:: replace
+
+.. autoclass:: RoundComparison
+ :members:
+
+.. autoclass:: RangeComparison
+ :members:
+
+.. autoclass:: ShouldRaise
+ :members:
+
+.. autoclass:: ShouldWarn
+ :members:
+
+.. autoclass:: ShouldNotWarn
+ :members:
+
+.. autoclass:: StringComparison
+ :members:
+
+.. autoclass:: TempDirectory
+ :members:
+
+.. autofunction:: compare(x, y, prefix=None, suffix=None, raises=True, recursive=True, strict=False, comparers=None, **kw)
+
+.. autofunction:: testfixtures.comparison.register
+
+.. autofunction:: testfixtures.comparison.compare_simple
+
+.. autofunction:: testfixtures.comparison.compare_with_type
+
+.. autofunction:: testfixtures.comparison.compare_sequence
+
+.. autofunction:: testfixtures.comparison.compare_generator
+
+.. autofunction:: testfixtures.comparison.compare_tuple
+
+.. autofunction:: testfixtures.comparison.compare_dict
+
+.. autofunction:: testfixtures.comparison.compare_set
+
+.. autofunction:: testfixtures.comparison.compare_text
+
+.. autofunction:: diff
+
+.. autofunction:: generator
+
+.. autofunction:: log_capture
+
+.. autoclass:: should_raise
+
+.. autofunction:: tempdir
+
+.. function:: test_date(year=2001, month=1, day=1, delta=None, delta_type='days', strict=False)
+
+ A function that returns a mock object that can be used in place of
+ the :class:`datetime.date` class but where the return value of
+ :meth:`~datetime.date.today` can be controlled.
+
+ If a single positional argument of ``None`` is passed, then the
+ queue of dates to be returned will be empty and you will need to
+ call :meth:`~tdate.set` or :meth:`~tdate.add` before calling
+ :meth:`~tdate.today`.
+
+ :param year: An optional year used to create the first date returned by
+ :meth:`~datetime.date.today`.
+
+ :param month: An optional month used to create the first date returned by
+ :meth:`~datetime.date.today`.
+
+ :param day: An optional day used to create the first date returned by
+ :meth:`~datetime.date.today`.
+
+ :param delta: The size of the delta to use between values returned
+ from :meth:`~datetime.date.today`. If not specified,
+ it will increase by 1 with each call to
+ :meth:`~datetime.date.today`.
+
+ :param delta_type: The type of the delta to use between values returned
+ from :meth:`~datetime.date.today`. This can be
+ any keyword parameter accepted by the
+ :class:`~datetime.timedelta` constructor.
+
+ :param strict: If ``True``, calling the mock class and any of its
+ methods will result in an instance of the mock
+ being returned. If ``False``, the default, an
+ instance of :class:`~datetime.date` will be returned
+ instead.
+
+ The mock returned will behave exactly as the :class:`datetime.date`
+ class with the exception of the following members:
+
+ .. method:: tdate.add(*args, **kw)
+
+ This will add the :class:`datetime.date` created from the
+ supplied parameters to the queue of dates to be returned by
+ :meth:`~datetime.date.today`. An instance
+ of :class:`~datetime.date` may also be passed as a single
+ positional argument.
+
+ .. method:: tdate.set(*args, **kw)
+
+ This will set the :class:`datetime.date` created from the
+ supplied parameters as the next date to be returned by
+ :meth:`~datetime.date.today`, regardless of any dates in the
+ queue. An instance
+ of :class:`~datetime.date` may also be passed as a single
+ positional argument.
+
+ .. classmethod:: tdate.today()
+
+ This will return the next supplied or calculated date from the
+ internal queue, rather than the actual current date.
+
+.. function:: test_datetime(year=2001, month=1, day=1, hour=0, minute=0, second=0, microsecond=0,tzinfo=None, delta=None, delta_type='seconds', date_type=datetime.date, strict=False)
+
+ A function that returns a mock object that can be used in place of
+ the :class:`datetime.datetime` class but where the return value of
+ :meth:`~tdatetime.now` can be controlled.
+
+ If a single positional argument of ``None`` is passed, then the
+ queue of datetimes to be returned will be empty and you will need to
+ call :meth:`~tdatetime.set` or :meth:`~tdatetime.add` before calling
+ :meth:`~tdatetime.now` or :meth:`~tdatetime.utcnow`.
+
+ :param year: An optional year used to create the first datetime returned by
+ :meth:`~tdatetime.now`.
+
+ :param month: An optional month used to create the first datetime returned by
+ :meth:`~tdatetime.now`.
+
+ :param day: An optional day used to create the first datetime returned by
+ :meth:`~tdatetime.now`.
+
+ :param hour: An optional hour used to create the first datetime returned by
+ :meth:`~tdatetime.now`.
+
+ :param minute: An optional minute used to create the first datetime returned by
+ :meth:`~tdatetime.now`.
+
+ :param second: An optional second used to create the first datetime returned by
+ :meth:`~tdatetime.now`.
+
+ :param microsecond: An optional microsecond used to create the first datetime returned by
+ :meth:`~tdatetime.now`.
+
+ :param tzinfo: An optional tzinfo that will be used to indicate the
+ timezone intended for the values returned by
+ returned by :meth:`~tdatetime.now`. It will be used to
+ correctly calculate return values when `tz` is passed
+ to :meth:`~tdatetime.now` and when
+ :meth:`~tdatetime.utcnow` is called.
+
+ :param delta: The size of the delta to use between values returned
+ from :meth:`~tdatetime.now`. If not specified,
+ it will increase by 1 with each call to
+ :meth:`~tdatetime.now`.
+
+ :param delta_type: The type of the delta to use between values returned
+ from :meth:`~tdatetime.now`. This can be
+ any keyword parameter accepted by the
+ :class:`~datetime.timedelta` constructor.
+
+ :param date_type: The type to use for the return value of the
+ :meth:`~datetime.datetime.date` method. This can
+ help with gotchas that occur when type checking
+ if performed on values returned by the mock's
+ :meth:`~datetime.datetime.date` method.
+
+ :param strict: If ``True``, calling the mock class and any of its
+ methods will result in an instance of the mock
+ being returned. If ``False``, the default, an
+ instance of :class:`~datetime.datetime` will be
+ returned instead.
+
+ The mock returned will behave exactly as the :class:`datetime.datetime`
+ class with the exception of the following members:
+
+ .. method:: tdatetime.add(*args, **kw)
+
+ This will add the :class:`datetime.datetime` created from the
+ supplied parameters to the queue of datetimes to be returned by
+ :meth:`~tdatetime.now` or :meth:`~tdatetime.utcnow`. An instance
+ of :class:`~datetime.datetime` may also be passed as a single
+ positional argument.
+
+ .. method:: tdatetime.set(*args, *kw)
+
+ This will set the :class:`datetime.datetime` created from the
+ supplied parameters as the next datetime to be returned by
+ :meth:`~tdatetime.now` or :meth:`~tdatetime.utcnow`, clearing out
+ any datetimes in the queue. An instance
+ of :class:`~datetime.datetime` may also be passed as a single
+ positional argument.
+
+ .. classmethod:: tdatetime.now([tz])
+
+ :param tz: An optional timezone to apply to the returned time.
+ If supplied, it must be an instance of a
+ :class:`~datetime.tzinfo` subclass.
+
+ This will return the next supplied or calculated datetime from the
+ internal queue, rather than the actual current datetime.
+
+ If `tz` is supplied, it will be applied to the datetime that
+ would have have been returned from the internal queue, treating
+ that datetime as if it were in the UTC timezone.
+
+ .. classmethod:: tdatetime.utcnow()
+
+ This will return the next supplied or calculated datetime from the
+ internal queue, rather than the actual current UTC datetime.
+
+ No timezone will be applied, even that supplied to the constructor.
+
+ .. classmethod:: tdatetime.date()
+
+ This will return the date component of the current mock instance,
+ but using the date type supplied when the mock class was created.
+
+.. function:: test_time(year=2001, month=1, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, delta=None, delta_type='seconds')
+
+ A function that returns a mock object that can be used in place of
+ the :class:`time.time` function but where the return value can be
+ controlled.
+
+ If a single positional argument of ``None`` is passed, then the
+ queue of times to be returned will be empty and you will need to
+ call :meth:`~ttime.set` or :meth:`~ttime.add` before calling
+ the mock.
+
+ :param year: An optional year used to create the first time returned.
+
+ :param month: An optional month used to create the first time.
+
+ :param day: An optional day used to create the first time.
+
+ :param hour: An optional hour used to create the first time.
+
+ :param minute: An optional minute used to create the first time.
+
+ :param second: An optional second used to create the first time.
+
+ :param microsecond: An optional microsecond used to create the first time.
+
+ :param delta: The size of the delta to use between values returned.
+ If not specified, it will increase by 1 with each
+ call to the mock.
+
+ :param delta_type: The type of the delta to use between values
+ returned. This can be
+ any keyword parameter accepted by the
+ :class:`~datetime.timedelta` constructor.
+
+ The mock additionally has the following methods available on it:
+
+ .. method:: ttime.add(*args, **kw)
+
+ This will add the time specified by the supplied parameters to the
+ queue of times to be returned by calls to the mock. The
+ parameters are the same as the :class:`datetime.datetime`
+ constructor. An instance of :class:`~datetime.datetime` may also
+ be passed as a single positional argument.
+
+ .. method:: ttime.set(*args, **kw)
+
+ This will set the time specified by the supplied parameters as
+ the next time to be returned by a call to the mock, regardless of
+ any times in the queue. The parameters are the same as the
+ :class:`datetime.datetime` constructor. An instance of
+ :class:`~datetime.datetime` may also be passed as a single
+ positional argument.
+
+.. autofunction:: wrap
+
+.. autoclass:: testfixtures.components.TestComponents
+ :members:
+
+.. autoclass:: testfixtures.manuel.Files
+
+.. data:: not_there
+
+ A singleton used to represent the absence of a particular attribute.
+
+.. currentmodule:: testfixtures.popen
+
+.. autoclass:: testfixtures.popen.MockPopen
+ :members:
+
diff --git a/docs/changes.txt b/docs/changes.txt
new file mode 100644
index 0000000..9e311f0
--- /dev/null
+++ b/docs/changes.txt
@@ -0,0 +1,700 @@
+Changes
+=======
+
+.. currentmodule:: testfixtures
+
+4.13.3 (13 December 2016)
+-------------------------
+
+- :func:`compare` now better handled equality comparison with ``ignore_eq=True``
+ when either of the objects being compared cannot be hashed.
+
+4.13.2 (16 November 2016)
+-------------------------
+
+- Fixed a bug where a :class:`LogCapture` wouldn't be cleared when used via
+ :func:`log_capture` on a base class and sub class execute the same test.
+
+Thanks to "mlabonte" for the bug report.
+
+4.13.1 (2 November 2016)
+------------------------
+
+- When ``ignore_eq`` is used with :func:`compare`, fall back to comparing by
+ hash if not type-specific comparer can be found.
+
+4.13.0 (2 November 2016)
+------------------------
+
+- Add support to :func:`compare` for ignoring broken ``__eq__`` implementations.
+
+4.12.0 (18 October 2016)
+------------------------
+
+- Add support for specifying a callable to extract rows from log records
+ when using :class:`LogCapture`.
+
+- Add support for recursive comparison of log messages with :class:`LogCapture`.
+
+4.11.0 (12 October 2016)
+-------------------------
+
+- Allow the attributes returned in :meth:`LogCapture.actual` rows to be
+ specified.
+
+- Allow a default to be specified for encoding in :meth:`TempDirectory.read` and
+ :meth:`TempDirectory.write`.
+
+4.10.1 (5 September 2016)
+-------------------------
+
+- Better docs for :meth:`TempDirectory.compare`.
+
+- Remove the need for expected paths supplied to :meth:`TempDirectory.compare`
+ to be in sorted order.
+
+- Document a good way of restoring ``stdout`` when in a debugger.
+
+- Fix handling of trailing slashes in :meth:`TempDirectory.compare`.
+
+Thanks to Maximilian Albert for the :meth:`TempDirectory.compare` docs.
+
+4.10.0 (17 May 2016)
+--------------------
+
+- Fixed examples in documentation broken in 4.5.1.
+
+- Add :class:`RangeComparison` for comparing against values that fall in a
+ range.
+
+- Add :meth:`~popen.MockPopen.set_default` to :class:`~popen.MockPopen`.
+
+Thanks to Asaf Peleg for the :class:`RangeComparison` implementation.
+
+4.9.1 (19 February 2016)
+------------------------
+
+- Fix for use with PyPy, broken since 4.8.0.
+
+Thanks to Nicola Iarocci for the pull request to fix.
+
+4.9.0 (18 February 2016)
+------------------------
+
+- Added the `suffix` parameter to :func:`compare` to allow failure messages
+ to include some additional context.
+
+- Update package metadata to indicate Python 3.5 compatibility.
+
+Thanks for Felix Yan for the metadata patch.
+
+Thanks to Wim Glenn for the suffix patch.
+
+4.8.0 (2 February 2016)
+-----------------------
+
+- Introduce a new :class:`Replace` context manager and make :class:`Replacer`
+ callable. This gives more succinct and easy to read mocking code.
+
+- Add :class:`ShouldWarn` and :class:`ShouldNotWarn` context managers.
+
+4.7.0 (10 December 2015)
+------------------------
+
+- Add the ability to pass ``raises=False`` to :func:`compare` to just get
+ and resulting message back rather than having an exception raised.
+
+4.6.0 (3 December 2015)
+------------------------
+
+- Fix a bug that mean symlinked directories would never show up when using
+ :meth:`TempDirectory.compare` and friends.
+
+- Add the ``followlinks`` parameter to :meth:`TempDirectory.compare` to
+ indicate that symlinked or hard linked directories should be recursed into
+ when using ``recursive=True``.
+
+4.5.1 (23 November 2015)
+------------------------
+
+- Switch from :class:`cStringIO` to :class:`StringIO` in :class:`OutputCapture`
+ to better handle unicode being written to `stdout` or `stderr`.
+
+Thanks to "tell-k" for the patch.
+
+4.5.0 (13 November 2015)
+------------------------
+
+- :class:`LogCapture`, :class:`OutputCapture` and :class:`TempDirectory` now
+ explicitly show what is expected versus actual when reporting differences.
+
+Thanks to Daniel Fortunov for the pull request.
+
+4.4.0 (1 November 2015)
+-----------------------
+
+- Add support for labelling the arguments passed to :func:`compare`.
+
+- Allow ``expected`` and ``actual`` keyword parameters to be passed to
+ :func:`compare`.
+
+- Fix ``TypeError: unorderable types`` when :func:`compare` found multiple
+ differences in sets and dictionaries on Python 3.
+
+- Add official support for Python 3.5.
+
+- Drop official support for Python 2.6.
+
+Thanks to Daniel Fortunov for the initial ideas for explicit ``expected`` and
+``actual`` support in :func:`compare`.
+
+4.3.3 (15 September 2015)
+-------------------------
+
+- Add wheel distribution to release.
+
+- Attempt to fix up various niggles from the move to Travis CI for doing
+ releases.
+
+4.3.2 (15 September 2015)
+-------------------------
+
+- Fix broken 4.3.1 tag.
+
+4.3.1 (15 September 2015)
+-------------------------
+
+- Fix build problems introduced by moving the build process to Travis CI.
+
+4.3.0 (15 September 2015)
+-------------------------
+
+- Add :meth:`TempDirectory.compare` with a cleaner, more explicit API that
+ allows comparison of only the files in a temporary directory.
+
+- Deprecate :meth:`TempDirectory.check`, :meth:`TempDirectory.check_dir`
+ and :meth:`TempDirectory.check_all`
+
+- Relax absolute-path rules so that if it's inside the :class:`TempDirectory`,
+ it's allowed.
+
+- Allow :class:`OutputCapture` to separately check output to ``stdout`` and
+ ``stderr``.
+
+4.2.0 (11 August 2015)
+----------------------
+
+- Add :class:`~testfixtures.popen.MockPopen`, a mock helpful when testing
+ code that uses :class:`subprocess.Popen`.
+
+- :class:`ShouldRaise` now subclasses :class:`object`, so that subclasses of it
+ may use :meth:`super()`.
+
+- Drop official support for Python 3.2.
+
+Thanks to BATS Global Markets for donating the code for
+:class:`~testfixtures.popen.MockPopen`.
+
+4.1.2 (30 January 2015)
+-----------------------
+
+- Clarify documentation for ``name`` parameter to :class:`LogCapture`.
+
+- :class:`ShouldRaise` now shows different output when two exceptions have
+ the same representation but still differ.
+
+- Fix bug that could result in a :class:`dict` comparing equal to a
+ :class:`list`.
+
+Thanks to Daniel Fortunov for the documentation clarification.
+
+4.1.1 (30 October 2014)
+-------------------------
+
+- Fix bug that prevented logger propagation to be controlled by the
+ :class:`log_capture` decorator.
+
+Thanks to John Kristensen for the fix.
+
+4.1.0 (14 October 2014)
+-------------------------
+
+- Fix :func:`compare` bug when :class:`dict` instances with
+ :class:`tuple` keys were not equal.
+
+- Allow logger propagation to be controlled by :class:`LogCapture`.
+
+- Enabled disabled loggers if a :class:`LogCapture` is attached to them.
+
+Thanks to Daniel Fortunov for the :func:`compare` fix.
+
+4.0.2 (10 September 2014)
+-------------------------
+
+- Fix "maximum recursion depth exceeded" when comparing a string with
+ bytes that did not contain the same character.
+
+4.0.1 (4 August 2014)
+---------------------
+
+- Fix bugs when string compared equal and options to :func:`compare`
+ were used.
+
+- Fix bug when strictly comparing two nested structures containing
+ identical objects.
+
+4.0.0 (22 July 2014)
+--------------------
+
+- Moved from buildout to virtualenv for development.
+
+- The ``identity`` singleton is no longer needed and has been
+ removed.
+
+- :func:`compare` will now work recursively on data structures for
+ which it has registered comparers, giving more detailed feedback on
+ nested data structures. Strict comparison will also be applied
+ recursively.
+
+- Re-work the interfaces for using custom comparers with
+ :func:`compare`.
+
+- Better feedback when comparing :func:`collections.namedtuple`
+ instances.
+
+- Official support for Python 3.4.
+
+Thanks to Yevgen Kovalienia for the typo fix in :doc:`datetime`.
+
+3.1.0 (25 May 2014)
+-------------------
+
+- Added :class:`RoundComparison` helper for comparing numerics to a
+ specific precision.
+
+- Added ``unless`` parameter to :class:`ShouldRaise` to cover
+ some very specific edge cases.
+
+- Fix missing imports that showed up :class:`TempDirectory` had to do
+ the "convoluted folder delete" dance on Windows.
+
+Thanks to Jon Thompson for the :class:`RoundComparison` implementation.
+
+Thanks to Matthias Lehmann for the import error reports.
+
+3.0.2 (7 April 2014)
+--------------------
+
+- Document :attr:`ShouldRaise.raised` and make it part of the official
+ API.
+
+- Fix rare failures when cleaning up :class:`TempDirectory` instances
+ on Windows.
+
+3.0.1 (10 June 2013)
+--------------------
+
+- Some documentation tweaks and clarifications.
+
+- Fixed a bug which masked exceptions when using :func:`compare` with
+ a broken generator.
+
+- Fixed a bug when comparing a generator with a non-generator.
+
+- Ensure :class:`LogCapture` cleans up global state it may effect.
+
+- Fixed replacement of static methods using a :class:`Replacer`.
+
+3.0.0 (5 March 2013)
+--------------------
+
+- Added compatibility with Python 3.2 and 3.3.
+
+- Dropped compatibility with Python 2.5.
+
+- Removed support for the following obscure uses of
+ :class:`should_raise`:
+
+ .. code-block:: python
+
+ should_raise(x, IndexError)[1]
+ should_raise(x, KeyError)['x']
+
+- Dropped the `mode` parameter to :meth:`TempDirectory.read`.
+
+- :meth:`TempDirectory.makedir` and :meth:`TempDirectory.write` no
+ longer accept a `path` parameter.
+
+- :meth:`TempDirectory.read` and :meth:`TempDirectory.write` now
+ accept an `encoding` parameter to control how non-byte data is
+ decoded and encoded respectively.
+
+- Added the `prefix` parameter to :func:`compare` to allow failure
+ messages to be made more informative.
+
+- Fixed a problem when using sub-second deltas with :func:`test_time`.
+
+2.3.5 (13 August 2012)
+----------------------
+
+- Fixed a bug in :func:`~testfixtures.comparison.compare_dict` that
+ mean the list of keys that were the same was returned in an unsorted
+ order.
+
+2.3.4 (31 January 2012)
+-----------------------
+
+- Fixed compatibility with Python 2.5
+
+- Fixed compatibility with Python 2.7
+
+- Development model moved to continuous integration using Jenkins.
+
+- Introduced `Tox`__ based testing to ensure packaging and
+ dependencies are as expected.
+
+ __ http://tox.testrun.org/latest/
+
+- 100% line and branch coverage with tests.
+
+- Mark :class:`test_datetime`, :class:`test_date` and
+ :class:`test_time` such that nose doesn't mistake them as tests.
+
+2.3.3 (12 December 2011)
+-------------------------
+
+- Fixed a bug where when a target was replaced more than once using a
+ single :class:`Replacer`, :meth:`~Replacer.restore` would not
+ correctly restore the original.
+
+2.3.2 (10 November 2011)
+-------------------------
+
+- Fixed a bug where attributes and keys could not be
+ removed by a :class:`Replacer` as described in
+ :ref:`removing_attr_and_item` if the attribute or key might not be
+ there, such as where a test wants to ensure an ``os.environ``
+ variable is not set.
+
+2.3.1 (8 November 2011)
+-------------------------
+
+- Move to use `nose <http://readthedocs.org/docs/nose/>`__ for running
+ the TestFixtures unit tests.
+
+- Fixed a bug where :meth:`tdatetime.now` returned an instance of the
+ wrong type when `tzinfo` was passed in
+ :ref:`strict mode <strict-dates-and-times>`.
+
+2.3.0 (11 October 2011)
+-------------------------
+
+- :class:`Replacer`, :class:`TempDirectory`, :class:`LogCapture` and
+ :class:`~components.TestComponents` instances will now warn if the
+ process they are created in exits without them being cleaned
+ up. Instances of these classes should be cleaned up at the end of
+ each test and these warnings serve to point to a cause for possible
+ mysterious failures elsewhere.
+
+2.2.0 (4 October 2011)
+-------------------------
+
+- Add a :ref:`strict mode <strict-dates-and-times>` to
+ :class:`test_datetime` and :class:`test_date`.
+ When used, instances returned from the mocks are instances of those
+ mocks. The default behaviour is now to return instances of the real
+ :class:`~datetime.datetime` and :class:`~datetime.date` classes
+ instead, which is usually much more useful.
+
+2.1.0 (29 September 2011)
+-------------------------
+
+- Add a :ref:`strict mode <strict-comparison>` to
+ :func:`compare`. When used, it ensures that
+ the values compared are not only equal but also of the same
+ type. This mode is not used by default, and the default mode
+ restores the more commonly useful functionality where values of
+ similar types but that aren't equal give useful feedback about
+ differences.
+
+2.0.1 (23 September 2011)
+-------------------------
+
+- add back functionality to allow comparison of generators with
+ non-generators.
+
+2.0.0 (23 September 2011)
+-------------------------
+
+- :func:`compare` now uses a registry of comparers that can be
+ modified either by passing a `registry` option to :func:`compare`
+ or, globally, using the :func:`~comparison.register` function.
+
+- added a comparer for :class:`set` instances to :func:`compare`.
+
+- added a new `show_whitespace` parameter to
+ :func:`~comparison.compare_text`, the comparer used when comparing
+ strings and unicodes with :func:`compare`.
+
+- The internal queue for :class:`test_datetime` is now considered to
+ be in local time. This has implication on the values returned from
+ both :meth:`~tdatetime.now` and :meth:`~tdatetime.utcnow` when
+ `tzinfo` is passed to the :class:`test_datetime` constructor.
+
+- :meth:`set` and :meth:`add` on :class:`test_date`,
+ :class:`test_datetime` and :class:`test_time` now accept instances
+ of the appropriate type as an alternative to just passing in the
+ parameters to create the instance.
+
+- Refactored the monolithic ``__init__.py`` into modules for each
+ type of functionality.
+
+1.12.0 (16 August 2011)
+-----------------------
+
+- Add a :attr:`~OutputCapture.captured` property to
+ :class:`OutputCapture` so that more complex assertion can be made
+ about the output that has been captured.
+
+- :class:`OutputCapture` context managers can now be temporarily
+ disabled using their :meth:`~OutputCapture.disable` method.
+
+- Logging can now be captured only when it exceeds a specified logging
+ level.
+
+- The handling of timezones has been reworked in both
+ :func:`test_datetime` and :func:`test_time`. This is not backwards
+ compatible but is much more useful and correct.
+
+1.11.3 (3 August 2011)
+----------------------
+
+- Fix bugs where various :meth:`test_date`, :meth:`test_datetime` and
+ :meth:`test_time` methods didn't accept keyword parameters.
+
+1.11.2 (28 July 2011)
+---------------------
+
+- Fix for 1.10 and 1.11 releases that didn't include non-.py files as
+ a result of the move from subversion to git.
+
+1.11.1 (28 July 2011)
+---------------------
+
+- Fix bug where :meth:`tdatetime.now` didn't accept the `tz`
+ parameter that :meth:`datetime.datetime.now` did.
+
+1.11.0 (27 July 2011)
+---------------------
+
+- Give more useful output when comparing dicts and their subclasses.
+
+- Turn :class:`should_raise` into a decorator form of
+ :class:`ShouldRaise` rather than the rather out-moded wrapper
+ function that it was.
+
+1.10.0 (19 July 2011)
+---------------------
+
+- Remove dependency on :mod:`zope.dottedname`.
+
+- Implement the ability to mock out :class:`dict` and :class:`list`
+ items using :class:`~testfixtures.Replacer` and
+ :func:`~testfixtures.replace`.
+
+- Implement the ability to remove attributes and :class:`dict`
+ items using :class:`~testfixtures.Replacer` and
+ :func:`~testfixtures.replace`.
+
+1.9.2 (20 April 2011)
+---------------------
+
+- Fix for issue #328: :meth:`~tdatetime.utcnow` of :func:`test_datetime`
+ now returns items from the internal queue in the same way as
+ :meth:`~tdatetime.now`.
+
+1.9.1 (11 March 2011)
+------------------------
+
+- Fix bug when :class:`ShouldRaise` context managers incorrectly
+ reported what exception was incorrectly raised when the incorrectly
+ raised exception was a :class:`KeyError`.
+
+1.9.0 (11 February 2011)
+------------------------
+
+- Added :class:`~components.TestComponents` for getting a sterile
+ registry when testing code that uses :mod:`zope.component`.
+
+1.8.0 (14 January 2011)
+-----------------------
+
+- Added full Sphinx-based documentation.
+
+- added a `Manuel <http://packages.python.org/manuel/>`__ plugin for
+ reading and writing files into a :class:`TempDirectory`.
+
+- any existing log handlers present when a :class:`LogCapture` is
+ installed for a particular logger are now removed.
+
+- fix the semantics of :class:`should_raise`, which should always
+ expect an exception to be raised!
+
+- added the :class:`ShouldRaise` context manager.
+
+- added recursive support to :meth:`TempDirectory.listdir` and added
+ the new :meth:`TempDirectory.check_all` method.
+
+- added support for forward-slash separated paths to all relevant
+ :class:`TempDirectory` methods.
+
+- added :meth:`TempDirectory.getpath` method.
+
+- allow files and directories to be ignored by a regular expression
+ specification when using :class:`TempDirectory`.
+
+- made :class:`Comparison` objects work when the attributes expected
+ might be class attributes.
+
+- re-implement :func:`test_time` so that it uses the correct way to
+ get timezone-less time.
+
+- added :meth:`~tdatetime.set` along with `delta` and `delta_type`
+ parameters to :func:`test_date`, :func:`test_datetime` and
+ :func:`test_time`.
+
+- allow the date class returned by the :meth:`tdatetime.date` method
+ to be configured.
+
+- added the :class:`OutputCapture` context manager.
+
+- added the :class:`StringComparison` class.
+
+- added options to ignore trailing whitespace and blank lines when
+ comparing multi-line strings with :func:`compare`.
+
+- fixed bugs in the handling of some exception types when using
+ :class:`Comparison`, :class:`ShouldRaise` or :class:`should_raise`.
+
+- changed :func:`wrap` to correctly set __name__, along with some
+ other attributes, which should help when using the decorators with
+ certain testing frameworks.
+
+1.7.0 (20 January 2010)
+-----------------------
+
+- fixed a bug where the @replace decorator passed a classmethod
+ rather than the replacment to the decorated callable when replacing
+ a classmethod
+
+- added set method to test_date, test_datetime and test_time to allow
+ setting the parameters for the next instance to be returned.
+
+- added delta and delta_type parameters to test_date,test_datetime and
+ test_time to control the intervals between returned instances.
+
+
+1.6.2 (23 September 2009)
+-------------------------
+
+- changed Comparison to use __eq__ and __ne__ instead of the
+ deprecated __cmp__
+
+- documented that order matters when using Comparisons with objects
+ that implement __eq__ themselves, such as instances of Django
+ models.
+
+1.6.1 (06 September 2009)
+-------------------------
+
+- @replace and Replacer.replace can now replace attributes that may
+ not be present, provided the `strict` parameter is passed as False.
+
+- should_raise now catches BaseException rather than Exception so
+ raising of SystemExit and KeyboardInterrupt can be tested.
+
+1.6.0 (09 May 2009)
+-------------------
+
+- added support for using TempDirectory, Replacer and LogCapture as
+ context managers.
+
+- fixed test failure in Python 2.6.
+
+1.5.4 (11 Feb 2009)
+-------------------
+
+- fix bug where should_raise didn't complain when no exception
+ was raised but one was expected.
+
+- clarified that the return of a should_raise call will be None
+ in the event that an exception is raised but no expected
+ exception is specified.
+
+1.5.3 (17 Dec 2008)
+-------------------
+
+- should_raise now supports methods other than __call__
+
+1.5.2 (14 Dec 2008)
+-------------------
+
+- added `makedir` and `check_dir` methods to TempDirectory and added
+ support for sub directories to `read` and `write`
+
+1.5.1 (12 Dec 2008)
+-------------------
+
+- added `path` parameter to `write` method of TempDirectory so
+ that the full path of the file written can be easilly obtained
+
+1.5.0 (12 Dec 2008)
+-------------------
+
+- added handy `read` and `write` methods to TempDirectory for
+ creating and reading files in the temporary directory
+
+- added support for rich comparison of objects that don't support
+ vars()
+
+1.4.0 (12 Dec 2008)
+-------------------
+
+- improved representation of failed Comparison
+
+- improved representation of failed compare with sequences
+
+1.3.1 (10 Dec 2008)
+-------------------
+
+- fixed bug that occurs when directory was deleted by a test that
+ use tempdir or TempDirectory
+
+1.3.0 (9 Dec 2008)
+------------------
+
+- added TempDirectory helper
+
+- added tempdir decorator
+
+1.2.0 (3 Dec 2008)
+------------------
+
+- LogCaptures now auto-install on creation unless configured otherwise
+
+- LogCaptures now have a clear method
+
+- LogCaptures now have a class method uninstall_all that uninstalls
+ all instances of LogCapture. Handy for a tearDown method in doctests.
+
+1.1.0 (3 Dec 2008)
+------------------
+
+- add support to Comparisons for only comparing some attributes
+
+- move to use zope.dottedname
+
+1.0.0 (26 Nov 2008)
+-------------------
+
+- Initial Release
diff --git a/docs/comparing.txt b/docs/comparing.txt
new file mode 100644
index 0000000..db70040
--- /dev/null
+++ b/docs/comparing.txt
@@ -0,0 +1,898 @@
+Comparing objects and sequences
+===============================
+
+.. currentmodule:: testfixtures
+
+Python's :mod:`unittest` package often fails to give very useful
+feedback when comparing long sequences or chunks of text. It also has
+trouble dealing with objects that don't natively support
+comparison. The functions and classes described here alleviate these
+problems.
+
+The compare function
+--------------------
+
+The :func:`compare` function can be used as a replacement for
+:meth:`~unittest.TestCase.assertEqual`. It raises an
+``AssertionError`` when its parameters are not equal, which will be
+reported as a test failure:
+
+>>> from testfixtures import compare
+>>> compare(1, 2)
+Traceback (most recent call last):
+ ...
+AssertionError: 1 != 2
+
+However, it allows you to specify a prefix for the message to be used
+in the event of failure:
+
+>>> compare(1, 2, prefix='wrong number of orders')
+Traceback (most recent call last):
+ ...
+AssertionError: wrong number of orders: 1 != 2
+
+This is recommended as it makes the reason for the failure more
+apparent without having to delve into the code or tests.
+
+You can also optionally specify a suffix, which will be appended to the
+message on a new line:
+
+>>> compare(1, 2, suffix='(Except for very large values of 1)')
+Traceback (most recent call last):
+ ...
+AssertionError: 1 != 2
+(Except for very large values of 1)
+
+The expected and actual value can also be explicitly supplied, making it
+clearer as to what has gone wrong:
+
+>>> compare(expected=1, actual=2)
+Traceback (most recent call last):
+ ...
+AssertionError: 1 (expected) != 2 (actual)
+
+The real strengths of this function come when comparing more complex
+data types. A number of common python data types will give more
+detailed output when a comparison fails as described below:
+
+sets
+~~~~
+
+Comparing sets that aren't the same will attempt to
+highlight where the differences lie:
+
+>>> compare(set([1, 2]), set([2, 3]))
+Traceback (most recent call last):
+ ...
+AssertionError: set not as expected:
+<BLANKLINE>
+in first but not second:
+[1]
+<BLANKLINE>
+in second but not first:
+[3]
+<BLANKLINE>
+<BLANKLINE>
+
+dicts
+~~~~~
+
+Comparing dictionaries that aren't the same will attempt to
+highlight where the differences lie:
+
+>>> compare(dict(x=1, y=2, a=4), dict(x=1, z=3, a=5))
+Traceback (most recent call last):
+ ...
+AssertionError: dict not as expected:
+<BLANKLINE>
+same:
+['x']
+<BLANKLINE>
+in first but not second:
+'y': 2
+<BLANKLINE>
+in second but not first:
+'z': 3
+<BLANKLINE>
+values differ:
+'a': 4 != 5
+
+lists and tuples
+~~~~~~~~~~~~~~~~
+
+Comparing lists or tuples that aren't the same will attempt to highlight
+where the differences lie:
+
+>>> compare([1, 2, 3], [1, 2, 4])
+Traceback (most recent call last):
+ ...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+[1, 2]
+<BLANKLINE>
+first:
+[3]
+<BLANKLINE>
+second:
+[4]
+
+namedtuples
+~~~~~~~~~~~
+
+When two :func:`~collections.namedtuple` instances are compared, if
+they are of the same type, the description given will highlight which
+elements were the same and which were different:
+
+>>> from collections import namedtuple
+>>> TestTuple = namedtuple('TestTuple', 'x y z')
+>>> compare(TestTuple(1, 2, 3), TestTuple(1, 4, 3))
+Traceback (most recent call last):
+ ...
+AssertionError: TestTuple not as expected:
+<BLANKLINE>
+same:
+['x', 'z']
+<BLANKLINE>
+values differ:
+'y': 2 != 4
+
+generators
+~~~~~~~~~~
+
+When two generators are compared, they are both first unwound into
+tuples and those tuples are then compared.
+
+The :ref:`generator <generator>` helper is useful for creating a
+generator to represent the expected results:
+
+>>> from testfixtures import generator
+>>> def my_gen(t):
+... i = 0
+... while i<t:
+... i += 1
+... yield i
+>>> compare(generator(1, 2, 3), my_gen(2))
+Traceback (most recent call last):
+ ...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+(1, 2)
+<BLANKLINE>
+first:
+(3,)
+<BLANKLINE>
+second:
+()
+
+.. warning::
+
+ If you wish to assert that a function returns a generator, say, for
+ performance reasons, then you should use
+ :ref:`strict comparison <strict-comparison>`.
+
+strings and unicodes
+~~~~~~~~~~~~~~~~~~~~
+
+Comparison of strings can be tricky, particularly when those strings
+contain multiple lines; spotting the differences between the expected
+and actual values can be hard.
+
+To help with this, long strings give a more helpful representation
+when comparison fails:
+
+>>> compare("1234567891011", "1234567789")
+Traceback (most recent call last):
+ ...
+AssertionError:
+'1234567891011'
+!=
+'1234567789'
+
+Likewise, multi-line strings give unified diffs when their comparison
+fails:
+
+>>> compare("""
+... This is line 1
+... This is line 2
+... This is line 3
+... """,
+... """
+... This is line 1
+... This is another line
+... This is line 3
+... """)
+Traceback (most recent call last):
+ ...
+AssertionError:
+--- first
++++ second
+@@ -1,5 +1,5 @@
+<BLANKLINE>
+ This is line 1
+- This is line 2
++ This is another line
+ This is line 3
+<BLANKLINE>
+
+Such comparisons can still be confusing as white space is taken into
+account. If you need to care about whitespace characters, you can make
+spotting the differences easier as follows:
+
+>>> compare("\tline 1\r\nline 2"," line1 \nline 2", show_whitespace=True)
+Traceback (most recent call last):
+ ...
+AssertionError:
+--- first
++++ second
+@@ -1,2 +1,2 @@
+-'\tline 1\r\n'
++' line1 \n'
+ 'line 2'
+
+However, you may not care about some of the whitespace involved. To
+help with this, :func:`compare` has two options that can be set to
+ignore certain types of whitespace.
+
+If you wish to compare two strings that contain blank lines or lines
+containing only whitespace characters, but where you only care about
+the content, you can use the following:
+
+.. code-block:: python
+
+ compare('line1\nline2', 'line1\n \nline2\n\n',
+ blanklines=False)
+
+If you wish to compare two strings made up of lines that may have
+trailing whitespace that you don't care about, you can do so with the
+following:
+
+.. code-block:: python
+
+ compare('line1\nline2', 'line1 \t\nline2 \n',
+ trailing_whitespace=False)
+
+Recursive comparison
+~~~~~~~~~~~~~~~~~~~~
+
+Where :func:`compare` is able to provide a descriptive comparison for
+a particular type, it will then recurse to do the same for the
+elements contained within objects of that type.
+For example, when comparing a list of dictionaries, the description
+will not only tell you where in the list the difference occurred, but
+also what the differences were within the dictionaries that weren't
+equal:
+
+>>> compare([{'one': 1}, {'two': 2, 'text':'foo\nbar\nbaz'}],
+... [{'one': 1}, {'two': 2, 'text':'foo\nbob\nbaz'}])
+Traceback (most recent call last):
+ ...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+[{'one': 1}]
+<BLANKLINE>
+first:
+[{'text': 'foo\nbar\nbaz', 'two': 2}]
+<BLANKLINE>
+second:
+[{'text': 'foo\nbob\nbaz', 'two': 2}]
+<BLANKLINE>
+While comparing [1]: dict not as expected:
+<BLANKLINE>
+same:
+['two']
+<BLANKLINE>
+values differ:
+'text': 'foo\nbar\nbaz' != 'foo\nbob\nbaz'
+<BLANKLINE>
+While comparing [1]['text']:
+--- first
++++ second
+@@ -1,3 +1,3 @@
+ foo
+-bar
++bob
+ baz
+
+This also applies to any comparers you have provided, as can be seen
+in the next section.
+
+Providing your own comparers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using :meth:`compare` frequently for your own complex objects,
+it can be beneficial to give more descriptive output when two objects
+don't compare as equal.
+
+.. note::
+
+ If you are reading this section as a result of needing to test
+ objects that don't natively support comparison, or as a result of
+ needing to infrequently compare your own subclasses of python
+ basic types, take a look at :ref:`comparison-objects` as this may
+ well be an easier solution.
+
+.. invisible-code-block: python
+
+ from testfixtures.comparison import _registry, compare_sequence
+ from testfixtures import Replacer
+ r = Replacer()
+ r.replace('testfixtures.comparison._registry', {
+ list: compare_sequence
+ })
+
+As an example, suppose you have a class whose instances have a
+timestamp and a name as attributes, but you'd like to ignore the
+timestamp when comparing:
+
+.. code-block:: python
+
+ from datetime import datetime
+
+ class MyObject(object):
+ def __init__(self, name):
+ self.timestamp = datetime.now()
+ self.name = name
+
+To compare lots of these, you would first write a comparer:
+
+.. code-block:: python
+
+ def compare_my_object(x, y, context):
+ if x.name == y.name:
+ return
+ return 'MyObject named %s != MyObject named %s' % (
+ context.label('x', repr(x.name)),
+ context.label('y', repr(y.name)),
+ )
+
+Then you'd register that comparer for your type:
+
+.. code-block:: python
+
+ from testfixtures.comparison import register
+ register(MyObject, compare_my_object)
+
+.. invisible-code-block: python
+
+ import testfixtures.comparison
+ assert testfixtures.comparison._registry == {
+ MyObject: compare_my_object, list: compare_sequence,
+ }
+
+Now, it'll get used when comparing objects of that type,
+even if they're contained within other objects:
+
+>>> compare([1, MyObject('foo')], [1, MyObject('bar')])
+Traceback (most recent call last):
+ ...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+[1]
+<BLANKLINE>
+first:
+[<MyObject ...>]
+<BLANKLINE>
+second:
+[<MyObject ...>]
+<BLANKLINE>
+While comparing [1]: MyObject named 'foo' != MyObject named 'bar'
+
+From this example, you can also see that a comparer can indicate that
+two objects are equal, for :func:`compare`'s purposes, by returning
+``None``:
+
+>>> MyObject('foo') == MyObject('foo')
+False
+>>> compare(MyObject('foo'), MyObject('foo'))
+
+You can also see that you can, and should, use the context object passed in
+to add labels to the representations of the objects being compared if the
+comparison fails:
+
+>>> compare(expected=MyObject('foo'), actual=MyObject('bar'))
+Traceback (most recent call last):
+ ...
+AssertionError: MyObject named 'foo' (expected) != MyObject named 'bar' (actual)
+
+.. invisible-code-block: python
+
+ r.restore()
+
+ # set up for the next test
+ r = Replacer()
+ r.replace('testfixtures.comparison._registry', {})
+
+It may be that you only want to use a comparer or set of
+comparers for a particular test. If that's the case, you can pass
+:func:`compare` a ``comparers`` parameter consisting of a
+dictionary that maps types to comparers. These will be added to the
+global registry for the duration of the call:
+
+>>> compare(MyObject('foo'), MyObject('bar'),
+... comparers={MyObject: compare_my_object})
+Traceback (most recent call last):
+ ...
+AssertionError: MyObject named 'foo' != MyObject named 'bar'
+
+.. invisible-code-block: python
+
+ import testfixtures.comparison
+ assert testfixtures.comparison._registry == {}
+ r.restore()
+
+A full list of the available comparers included can be found below the
+API documentation for :func:`compare`. These make good candidates for
+registering for your own classes, if they provide the necessary
+behaviour, and their source is also good to read when wondering how to
+implement your own comparers.
+
+You may be wondering what the ``context`` object passed to the
+comparer is for; it allows you to hand off comparison of parts of the
+two objects currently being compared back to the :func:`compare`
+machinery, it also allows you to pass options to your comparison
+function.
+
+For example, you may have an object that has a couple of dictionaries
+as attributes:
+
+.. code-block:: python
+
+ from datetime import datetime
+
+ class Request(object):
+ def __init__(self, uri, headers, body):
+ self.uri = uri
+ self.headers = headers
+ self.body = body
+
+When your tests encounter instances of these that are not as expected,
+you want feedback about which bits of the request or response weren't
+as expected. This can be achieved by implementing a comparer as
+follows:
+
+.. code-block:: python
+
+ def compare_request(x, y, context):
+ uri_different = x.uri != y.uri
+ headers_different = context.different(x.headers, y.headers, '.headers')
+ body_different = context.different(x.body, y.body, '.body')
+ if uri_different or headers_different or body_different:
+ return 'Request for %r != Request for %r' % (
+ x.uri, y.uri
+ )
+
+.. note::
+
+ A comparer should always return some text when it considers
+ the two objects it is comparing to be different.
+
+This comparer can either be registered globally or passed to each
+:func:`compare` call and will give detailed feedback about how the
+requests were different:
+
+>>> compare(Request('/foo', {'method': 'POST'}, {'my_field': 'value_1'}),
+... Request('/foo', {'method': 'GET'}, {'my_field': 'value_2'}),
+... comparers={Request: compare_request})
+Traceback (most recent call last):
+ ...
+AssertionError: Request for '/foo' != Request for '/foo'
+<BLANKLINE>
+While comparing .headers: dict not as expected:
+<BLANKLINE>
+values differ:
+'method': 'POST' != 'GET'
+<BLANKLINE>
+While comparing .headers['method']: 'POST' != 'GET'
+<BLANKLINE>
+While comparing .body: dict not as expected:
+<BLANKLINE>
+values differ:
+'my_field': 'value_1' != 'value_2'
+<BLANKLINE>
+While comparing .body['my_field']: 'value_1' != 'value_2'
+
+As an example of passing options through to a comparer, suppose you
+wanted to compare all decimals in a nested data structure by rounding
+them to a number of decimal places that varies from test to test. The
+comparer could be implemented and registered as follows:
+
+.. invisible-code-block: python
+
+ from testfixtures.comparison import _registry
+ r = Replacer()
+ r.replace('testfixtures.comparison._registry', dict(_registry))
+
+.. code-block:: python
+
+ from decimal import Decimal
+ from testfixtures.comparison import register
+
+ def compare_decimal(x, y, context):
+ precision = context.get_option('precision', 2)
+ if round(x, precision) != round(y, precision):
+ return '%r != %r when rounded to %i decimal places' % (
+ x, y, precision
+ )
+
+ register(Decimal, compare_decimal)
+
+Now, this comparer will be used for comparing all decimals and the
+precision used will be that passed to :func:`compare`:
+
+>>> expected_order = {'price': Decimal('1.234'), 'quantity': 5}
+>>> actual_order = {'price': Decimal('1.236'), 'quantity': 5}
+>>> compare(expected_order, actual_order, precision=1)
+>>> compare(expected_order, actual_order, precision=3)
+Traceback (most recent call last):
+ ...
+AssertionError: dict not as expected:
+<BLANKLINE>
+same:
+['quantity']
+<BLANKLINE>
+values differ:
+'price': Decimal('1.234') != Decimal('1.236')
+<BLANKLINE>
+While comparing ['price']: Decimal('1.234') != Decimal('1.236') when rounded to 3 decimal places
+
+If no precision is passed, the default of ``2`` will be used:
+
+>>> compare(Decimal('2.006'), Decimal('2.009'))
+>>> compare(Decimal('2.001'), Decimal('2.009'))
+Traceback (most recent call last):
+ ...
+AssertionError: Decimal('2.001') != Decimal('2.009') when rounded to 2 decimal places
+
+.. invisible-code-block: python
+
+ r.restore()
+
+.. _strict-comparison:
+
+Ignoring ``__eq__``
+~~~~~~~~~~~~~~~~~~~
+
+Some objects, such as those from the Django ORM, have pretty broken
+implementations or ``__eq__``. Since :func:`compare` normally relies on this,
+it can result in objects appearing to be equal when they are not.
+
+Take this class, for example:
+
+.. code-block:: python
+
+ class OrmObj(object):
+ def __init__(self, a):
+ self.a = a
+ def __eq__(self, other):
+ return True
+ def __repr__(self):
+ return 'OrmObj: '+str(self.a)
+
+If we compare normally, we erroneously understand the objects to be equal:
+
+>>> compare(actual=OrmObj(1), expected=OrmObj(2))
+
+In order to get a sane comparison, we need to both supply a custom comparer
+as described above, and use the ``ignore_eq`` parameter:
+
+.. code-block:: python
+
+ def compare_orm_obj(x, y, context):
+ if x.a != y.a:
+ return 'OrmObj: %s != %s' % (x.a, y.a)
+
+>>> compare(actual=OrmObj(1), expected=OrmObj(2),
+... comparers={OrmObj: compare_orm_obj}, ignore_eq=True)
+Traceback (most recent call last):
+...
+AssertionError: OrmObj: 2 != 1
+
+Strict comparison
+~~~~~~~~~~~~~~~~~
+
+If is it important that the two values being compared are of exactly
+the same type, rather than just being equal as far as Python is
+concerned, then the strict mode of :func:`compare` should be used.
+
+For example, these two instances will normally appear to be equal
+provided the elements within them are the same:
+
+>>> TypeA = namedtuple('A', 'x')
+>>> TypeB = namedtuple('B', 'x')
+>>> compare(TypeA(1), TypeB(1))
+
+If this type difference is important, then the `strict` parameter
+should be used:
+
+>>> compare(TypeA(1), TypeB(1), strict=True)
+Traceback (most recent call last):
+ ...
+AssertionError: A(x=1) (<class '__main__.A'>) != B(x=1) (<class '__main__.B'>)
+
+.. _comparison-objects:
+
+Comparison objects
+------------------
+
+Another common problem with the checking in tests is that not all
+objects support comparison and nor should they need to. For this
+reason, TextFixtures provides the :class:`~testfixtures.Comparison`
+class.
+
+This class lets you instantiate placeholders that can be used to
+compare expected results with actual results where objects in the
+actual results do not support useful comparison.
+
+Comparisons will appear to be equal to any object they are compared
+with that matches their specification. For example, take the following
+class:
+
+.. code-block:: python
+
+ class SomeClass:
+
+ def __init__(self, x, y):
+ self.x, self.y = x, y
+
+Normal comparison doesn't work, which makes testing tricky:
+
+>>> SomeClass(1, 2) == SomeClass(1, 2)
+False
+
+Here's how this comparison can be done:
+
+>>> from testfixtures import Comparison as C
+>>> C(SomeClass, x=1, y=2) == SomeClass(1, 2)
+True
+
+Perhaps even more importantly, when a comparison fails, its
+representation changes to give information about what went wrong. The
+common idiom for using comparisons is in conjuction with
+:meth:`~unittest.TestCase.assertEqual` or
+:meth:`~testfixtures.compare`:
+
+>>> compare(C(SomeClass, x=2), SomeClass(1, 2))
+Traceback (most recent call last):
+ ...
+AssertionError:
+ <C(failed):...SomeClass>
+ x:2 != 1
+ y:2 not in Comparison
+ </C> != <...SomeClass...>
+
+The key is that the comparison object actually stores information
+about what it was last compared with. The following example shows this
+more clearly:
+
+>>> c = C(SomeClass, x=2)
+>>> print(repr(c))
+<BLANKLINE>
+ <C:...SomeClass>
+ x:2
+ </C>
+>>> c == SomeClass(1, 2)
+False
+>>> print(repr(c))
+<BLANKLINE>
+ <C(failed):...SomeClass>
+ x:2 != 1
+ y:2 not in Comparison
+ </C>
+
+.. note::
+
+ :meth:`~unittest.TestCase.assertEqual` has regressed in Python 3.4
+ and will now truncate the text shown in assertions with no way to
+ configure this behaviour. Use :func:`compare` instead, which will
+ give you other desirable behaviour as well as showing you the full
+ output of failed comparisons.
+
+Types of comparison
+~~~~~~~~~~~~~~~~~~~
+
+There are several ways a comparison can be set up depending on what
+you want to check.
+
+If you only care about the class of an object, you can set up the
+comparison with only the class:
+
+>>> C(SomeClass) == SomeClass(1, 2)
+True
+
+This can also be achieved by specifying the type of the object as a
+dotted name:
+
+>>> import sys
+>>> C('types.ModuleType') == sys
+True
+
+Alternatively, if you happen to have a non-comparable object already
+around, comparison can be done with it:
+
+>>> C(SomeClass(1,2)) == SomeClass(1,2)
+True
+
+If you only care about certain attributes, this can also easily be
+achieved with the `strict` parameter:
+
+>>> C(SomeClass, x=1, strict=False) == SomeClass(1, 2)
+True
+
+The above can be problematic if you want to compare an object with
+attibutes that share names with parameters to the :class:`~testfixtures.Comparison`
+constructor. For this reason, you can pass the attributes in a
+dictionary:
+
+>>> compare(C(SomeClass, {'strict':3}, strict=False), SomeClass(1, 2))
+Traceback (most recent call last):
+ ...
+AssertionError:
+ <C(failed):...SomeClass>
+ strict:3 not in other
+ </C> != <...SomeClass...>
+
+Gotchas
+~~~~~~~
+
+There are a few things to be careful of when using comparisons:
+
+.. work around Manuel bug :-(
+.. invisible-code-block: python
+
+ class NoVars(object):
+ __slots__ = ['x']
+
+- The default strict comparison cannot be used with a class such as
+ the following:
+
+ .. code-block:: python
+
+ class NoVars(object):
+ __slots__ = ['x']
+
+ If you try, you will get an error that explains the problem:
+
+ >>> C(NoVars, x=1) == NoVars()
+ Traceback (most recent call last):
+ ...
+ TypeError: <NoVars object at ...> does not support vars() so cannot do strict comparison
+
+ Comparisons can still be done with classes that don't support
+ ``vars()``, they just need to be non-strict:
+
+ >>> nv = NoVars()
+ >>> nv.x = 1
+ >>> C(NoVars, x=1, strict=False) == nv
+ True
+
+.. work around Manuel bug :-(
+.. invisible-code-block: python
+ class SomeModel:
+ def __eq__(self,other):
+ if isinstance(other,SomeModel):
+ return True
+ return False
+
+- If the object being compared has an ``__eq__`` method, such as
+ Django model instances, then the :class:`~testfixtures.Comparison`
+ must be the first object in the equality check.
+
+ The following class is an example of this:
+
+ .. code-block:: python
+
+ class SomeModel:
+ def __eq__(self,other):
+ if isinstance(other,SomeModel):
+ return True
+ return False
+
+ It will not work correctly if used as the second object in the
+ expression:
+
+ >>> SomeModel()==C(SomeModel)
+ False
+
+ However, if the comparison is correctly placed first, then
+ everything will behave as expected:
+
+ >>> C(SomeModel)==SomeModel()
+ True
+
+- It probably goes without saying, but comparisons should not be used
+ on both sides of an equality check:
+
+ >>> C(SomeClass)==C(SomeClass)
+ False
+
+Round Comparison objects
+-------------------------
+
+When comparing numerics you often want to be able to compare to a
+given precision to allow for rounding issues which make precise
+equality impossible.
+
+For these situations, you can use :class:`RoundComparison` objects
+wherever you would use floats or Decimals, and they will compare equal to
+any float or Decimal that matches when both sides are rounded to the
+specified precision.
+
+Here's an example:
+
+.. code-block:: python
+
+ from testfixtures import compare, RoundComparison as R
+
+ compare(R(1234.5678, 2), 1234.5681)
+
+.. note::
+
+ You should always pass the same type of object to the
+ :class:`RoundComparison` object as you intend compare it with. If
+ the type of the rounded expected value is not the same as the type of
+ the rounded value being compared against it, a :class:`TypeError`
+ will be raised.
+
+Range Comparison objects
+-------------------------
+
+When comparing orderable types just as numbers, dates and time, you may only
+know what range a value will fall into. :class:`RangeComparison` objects
+let you confirm a value is within a certain tolerance or range.
+
+Here's an example:
+
+.. code-block:: python
+
+ from testfixtures import compare, RangeComparison as R
+
+ compare(R(123.456, 789), Decimal(555.01))
+
+.. note::
+
+ :class:`RangeComparison` is inclusive of both the lower and upper bound.
+
+String Comparison objects
+-------------------------
+
+When comparing sequences of strings, particularly those comping from
+things like the python logging package, you often end up wanting to
+express a requirement that one string should be almost like another,
+or maybe fit a particular regular expression.
+
+For these situations, you can use :class:`StringComparison` objects
+wherever you would use normal strings, and they will compare equal to
+any string that matches the regular expression they are created with.
+
+Here's an example:
+
+.. code-block:: python
+
+ from testfixtures import compare, StringComparison as S
+
+ compare(S('Starting thread \d+'),'Starting thread 132356')
+
+Differentiating chunks of text
+------------------------------
+
+TextFixtures provides a function that will compare two strings and
+give a unified diff as a result. This can be handy as a third
+parameter to :meth:`~unittest.TestCase.assertEqual` or just as a
+general utility function for comparing two lumps of text.
+
+As an example:
+
+>>> from testfixtures import diff
+>>> print(diff('line1\nline2\nline3',
+... 'line1\nlineA\nline3'))
+--- first
++++ second
+@@ -1,3 +1,3 @@
+ line1
+-line2
++lineA
+ line3
diff --git a/docs/components.txt b/docs/components.txt
new file mode 100755
index 0000000..3e706b0
--- /dev/null
+++ b/docs/components.txt
@@ -0,0 +1,70 @@
+Testing with zope.component
+===========================
+
+.. invisible-code-block: python
+
+ from nose.plugins.skip import SkipTest
+ try:
+ from zope.component import getSiteManager
+ except ImportError:
+ raise SkipTest('zope.component unavailable')
+ from testfixtures.components import TestComponents
+
+.. currentmodule:: testfixtures
+
+`zope.component`__ is a fantastic aspect-oriented library for Python,
+however its unit testing support is somewhat convoluted. If you need
+to test code that registers adapters, utilities and the like then you
+may need to provide a sterile component registry. For historical
+reasons, component registries are known as `Site Managers` in
+:mod:`zope.component`.
+
+__ http://pypi.python.org/pypi/zope.component
+
+TestFixtures provides the a :class:`~components.TestComponents` helper
+which provides just such a sterile registry. It should be instantiated
+in your :class:`TestCase`'s :meth:`setUp` method. It's
+:meth:`uninstall` method should be called in the test's
+:meth:`tearDown` method.
+
+Normally, :meth:`zope.component.getSiteManager` returns whatever
+the current registry is. This may be influenced by frameworks that use
+:mod:`zope.component` which can means that unit tests have no
+baseline to start with:
+
+>>> original = getSiteManager()
+>>> print(original)
+<BaseGlobalComponents base>
+
+Once we've got a :class:`TestComponents` in place, we know what
+we're getting:
+
+>>> components = TestComponents()
+>>> getSiteManager()
+<Components Testing>
+
+The registry that :func:`getSiteManager` returns is now also
+available as an attribute of the :class:`TestComponents`
+instance:
+
+>>> getSiteManager() is components.registry
+True
+
+It's also empty:
+
+>>> tuple(components.registry.registeredUtilities())
+()
+>>> tuple(components.registry.registeredAdapters())
+()
+>>> tuple(components.registry.registeredHandlers())
+()
+
+You can do whatever you like with this registry. When you're done,
+just call the :meth:`uninstall` method:
+
+>>> components.uninstall()
+
+Now you'll have the original registy back in place:
+
+>>> getSiteManager() is original
+True
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..602454b
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+import datetime
+import os
+import pkginfo
+import sys
+
+on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+pkg_info = pkginfo.Develop(os.path.join(os.path.dirname(__file__), '..'))
+
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.intersphinx'
+ ]
+
+intersphinx_mapping = {'http://docs.python.org': None}
+
+# General
+source_suffix = '.txt'
+master_doc = 'index'
+project = pkg_info.name
+copyright = '2008-2015 Simplistix Ltd, %s Chris Withers' % datetime.datetime.now().year
+version = release = pkg_info.version
+exclude_trees = ['_build']
+exclude_patterns = ['description.txt']
+pygments_style = 'sphinx'
+
+# Options for HTML output
+if on_rtd:
+ html_theme = 'default'
+else:
+ html_theme = 'classic'
+htmlhelp_basename = project+'doc'
+
+# Options for LaTeX output
+latex_documents = [
+ ('index', project+'.tex', project+u' Documentation',
+ 'Simplistix Ltd', 'manual'),
+]
diff --git a/docs/datetime.txt b/docs/datetime.txt
new file mode 100644
index 0000000..652c5b5
--- /dev/null
+++ b/docs/datetime.txt
@@ -0,0 +1,480 @@
+Mocking dates and times
+=======================
+
+.. currentmodule:: testfixtures
+
+Testing code that involves dates and times or which has behaviour
+dependent on the date or time it is executed at has historically been
+tricky. Mocking lets you perform tests on this type of code and
+TestFixtures provides three specialised mock objects to help with
+this.
+
+Dates
+~~~~~
+
+TestFixtures provides the :func:`~testfixtures.test_date` function
+that returns a subclass of :class:`datetime.date` with a
+:meth:`~datetime.date.today` method that will return a
+consistent sequence of dates each time it is called.
+
+This enables you to write tests for code such as the following, from
+the ``testfixtures.tests.sample1`` package:
+
+.. literalinclude:: ../testfixtures/tests/sample1.py
+ :lines: 8-10,21-22
+
+:class:`~testfixtures.Replace` can be used to apply the mock as
+shown in the following example, which could appear in either a unit
+test or a doc test:
+
+>>> from testfixtures import Replace, test_date
+>>> from testfixtures.tests.sample1 import str_today_1
+>>> with Replace('testfixtures.tests.sample1.date', test_date()):
+... str_today_1()
+... str_today_1()
+'2001-01-01'
+'2001-01-02'
+
+If you need a specific date to be returned, you can specify it:
+
+>>> with Replace('testfixtures.tests.sample1.date', test_date(1978,6,13)):
+... str_today_1()
+'1978-06-13'
+
+If you need to test with a whole sequence of specific dates, this
+can be done as follows:
+
+>>> with Replace('testfixtures.tests.sample1.date', test_date(None)) as d:
+... d.add(1978,6,13)
+... d.add(2009,11,12)
+... str_today_1()
+... str_today_1()
+'1978-06-13'
+'2009-11-12'
+
+Another way to test with a specific sequence of dates is to use the
+``delta_type`` and ``delta`` parameters to
+:func:`~testfixtures.test_date`. These parameters control the type and
+size, respectively, of the difference between each date returned.
+
+For example, where 2 days elapse between each returned value:
+
+>>> with Replace('testfixtures.tests.sample1.date',
+... test_date(1978, 6, 13, delta=2, delta_type='days')) as d:
+... str_today_1()
+... str_today_1()
+... str_today_1()
+'1978-06-13'
+'1978-06-15'
+'1978-06-17'
+
+The ``delta_type`` can be any keyword parameter accepted by the
+:class:`~datetime.timedelta` constructor. Specifying a ``delta`` of
+zero can be an effective way of ensuring that all calls to the
+:meth:`~testfixtures.test_date.today` method return the same value:
+
+>>> with Replace('testfixtures.tests.sample1.date',
+... test_date(1978, 6, 13, delta=0)) as d:
+... str_today_1()
+... str_today_1()
+... str_today_1()
+'1978-06-13'
+'1978-06-13'
+'1978-06-13'
+
+When using :func:`~testfixtures.test_date`, you can, at any time, set
+the next date to be returned using the
+:meth:`~testfixtures.test_date.set` method. The date returned after
+this will be the set date plus the ``delta`` in effect:
+
+>>> with Replace('testfixtures.tests.sample1.date', test_date(delta=2)) as d:
+... str_today_1()
+... d.set(1978,8,1)
+... str_today_1()
+... str_today_1()
+'2001-01-01'
+'1978-08-01'
+'1978-08-03'
+
+Datetimes
+~~~~~~~~~
+
+TextFixtures provides the :func:`~testfixtures.test_datetime`
+function that returns a subclass of :class:`datetime.datetime` with
+a :meth:`~datetime.datetime.now` method that will return a
+consistent sequence of :obj:`~datetime.datetime` objects each time
+it is called.
+
+This enables you to write tests for code such as the following, from
+the ``testfixtures.tests.sample1`` package:
+
+.. literalinclude:: ../testfixtures/tests/sample1.py
+ :lines: 8-10,11-12
+
+We use the a :class:`~testfixtures.Replacer` as follows, which could
+appear in either a unit test or a doc test:
+
+>>> from testfixtures import Replacer, test_datetime
+>>> from testfixtures.tests.sample1 import str_now_1
+>>> with Replace('testfixtures.tests.sample1.datetime', test_datetime()):
+... str_now_1()
+... str_now_1()
+'2001-01-01 00:00:00'
+'2001-01-01 00:00:10'
+
+If you need a specific datetime to be returned, you can specify it:
+
+>>> with Replace('testfixtures.tests.sample1.datetime',
+... test_datetime(1978,6,13,1,2,3)):
+... str_now_1()
+'1978-06-13 01:02:03'
+
+If you need to test with a whole sequence of specific datetimes,
+this can be done as follows:
+
+>>> with Replace('testfixtures.tests.sample1.datetime',
+... test_datetime(None)) as d:
+... d.add(1978,6,13,16,0,1)
+... d.add(2009,11,12,11,41,20)
+... str_now_1()
+... str_now_1()
+'1978-06-13 16:00:01'
+'2009-11-12 11:41:20'
+
+Another way to test with a specific sequence of datetimes is to use the
+``delta_type`` and ``delta`` parameters to
+:func:`~testfixtures.test_datetime`. These parameters control the type and
+size, respectively, of the difference between each datetime returned.
+
+For example, where 2 hours elapse between each returned value:
+
+>>> with Replace(
+... 'testfixtures.tests.sample1.datetime',
+... test_datetime(1978, 6, 13, 16, 0, 1, delta=2, delta_type='hours')
+... ) as d:
+... str_now_1()
+... str_now_1()
+... str_now_1()
+'1978-06-13 16:00:01'
+'1978-06-13 18:00:01'
+'1978-06-13 20:00:01'
+
+The ``delta_type`` can be any keyword parameter accepted by the
+:class:`~datetime.timedelta` constructor. Specifying a ``delta`` of
+zero can be an effective way of ensuring that all calls to the
+:meth:`~testfixtures.test_datetime.now` method return the same value:
+
+>>> with Replace('testfixtures.tests.sample1.datetime',
+... test_datetime(1978, 6, 13, 16, 0, 1, delta=0)) as d:
+... str_now_1()
+... str_now_1()
+... str_now_1()
+'1978-06-13 16:00:01'
+'1978-06-13 16:00:01'
+'1978-06-13 16:00:01'
+
+When using :func:`~testfixtures.test_datetime`, you can, at any time, set
+the next datetime to be returned using the
+:meth:`~testfixtures.test_datetime.set` method. The value returned after
+this will be the set value plus the ``delta`` in effect:
+
+>>> with Replace('testfixtures.tests.sample1.datetime',
+... test_datetime(delta=2)) as d:
+... str_now_1()
+... d.set(1978,8,1)
+... str_now_1()
+... str_now_1()
+'2001-01-01 00:00:00'
+'1978-08-01 00:00:00'
+'1978-08-01 00:00:02'
+
+Timezones
+-----------------------------
+
+In many situations where you're mocking out
+:meth:`~datetime.datetime.now` or :meth:`~datetime.datetime.utcnow`
+you're not concerned about timezones, especially given that both
+methods will usually return :class:`~datetime.datetime` objects that
+have a `tzinfo` of ``None``.
+However, in some applications it is important that
+:meth:`~datetime.datetime.now` and :meth:`~datetime.datetime.utcnow`
+return different times, as they would normally if the application is
+run anywhere other than the UTC timezone.
+
+The best way to understand how to use
+:func:`~testfixtures.test_datetime` in these situations is to think of
+the internal queue as being a queue of :class:`~datetime.datetime`
+objects at the current local time with a `tzinfo` of None, much as
+would be returned by :meth:`~datetime.datetime.now`.
+If you pass in a `tz` parameter to
+:meth:`~tdatetime.now` it will be applied to the value
+before it is returned in the same way as it would by
+:meth:`datetime.datetime.now`.
+
+If you pass in a `tzinfo` to :func:`~testfixtures.test_datetime`, this
+will be taken to indicate the timezone you intend for the local times
+that :meth:`~tdatetime.now` simulates.
+As such, that timezone will be used to compute values returned from
+:meth:`~tdatetime.utcnow` such that they would be :class:`test_datetime`
+objects in the UTC timezone with the `tzinfo` set to ``None``, as
+would be the case for a normal call to
+:meth:`datetime.datetime.utcnow`.
+
+For example, lets take a timezone as defined by the following class:
+
+.. code-block:: python
+
+ from datetime import tzinfo, timedelta
+
+ class ATZInfo(tzinfo):
+
+ def tzname(self, dt):
+ return 'A TimeZone'
+
+ def utcoffset(self, dt):
+ # In general, this timezone is 5 hours behind UTC
+ offset = timedelta(hours=-5)
+ return offset+self.dst(dt)
+
+ def dst(self, dt):
+ # However, between March and September, it is only
+ # 4 hours behind UTC
+ if 3 < dt.month < 9:
+ return timedelta(hours=1)
+ return timedelta()
+
+If we create a :class:`~testfixtures.test_datetime` with this
+timezone and a delta of zero, so we can see affect of the timezone
+over multiple calls, the values returned by
+:meth:`~tdatetime.now` will be affected:
+
+>>> datetime = test_datetime(2001, 1, 1, delta=0, tzinfo=ATZInfo())
+
+A normal call to :meth:`~tdatetime.now` will return the values passed
+to the constructor:
+
+>>> print(datetime.now())
+2001-01-01 00:00:00
+
+If we now ask for this time but in the timezone we passed to
+:class:`~testfixtures.test_datetime`, we will get the same hours,
+minutes and seconds but with a ``tzinfo`` attribute set:
+
+>>> print(datetime.now(ATZInfo()))
+2001-01-01 00:00:00-05:00
+
+If we call :meth:`~tdatetime.utcnow`, we will get the time equivalent
+to the values passed to the constructor, but in the UTC timezone:
+
+>>> print(datetime.utcnow())
+2001-01-01 05:00:00
+
+The timezone passed in when the :class:`~testfixtures.test_datetime`
+is created has a similar effect on any items set:
+
+>>> datetime.set(2011,5,1,10)
+>>> print(datetime.now())
+2011-05-01 10:00:00
+>>> print(datetime.utcnow())
+2011-05-01 14:00:00
+
+Likewise, :meth:`~tdatetime.add` behaves the same way:
+
+>>> datetime = test_datetime(None, delta=0, tzinfo=ATZInfo())
+>>> datetime.add(2011,1,1,10)
+>>> datetime.add(2011,5,1,10)
+>>> datetime.add(2011,10,1,10)
+>>> print(datetime.now())
+2011-01-01 10:00:00
+>>> print(datetime.utcnow())
+2011-05-01 14:00:00
+>>> print(datetime.now())
+2011-10-01 10:00:00
+
+Times
+~~~~~
+
+TextFixtures provides the :func:`~testfixtures.test_time`
+function that, when called, returns a replacement for the
+:func:`time.time` function.
+
+This enables you to write tests for code such as the following, from
+the ``testfixtures.tests.sample1`` package:
+
+.. literalinclude:: ../testfixtures/tests/sample1.py
+ :lines: 30-34
+
+We use the a :class:`~testfixtures.Replacer` as follows, which could
+appear in either a unit test or a doc test:
+
+>>> from testfixtures import Replacer, test_time
+>>> from testfixtures.tests.sample1 import str_time
+>>> with Replace('testfixtures.tests.sample1.time', test_time()):
+... str_time()
+... str_time()
+'978307200.0'
+'978307201.0'
+
+If you need an integer representing a specific time to be returned,
+you can specify it:
+
+>>> with Replace('testfixtures.tests.sample1.time',
+... test_time(1978, 6, 13, 1, 2, 3)):
+... str_time()
+'266547723.0'
+
+If you need to test with a whole sequence of specific timestamps,
+this can be done as follows:
+
+>>> with Replace('testfixtures.tests.sample1.time', test_time(None)) as t:
+... t.add(1978,6,13,16,0,1)
+... t.add(2009,11,12,11,41,20)
+... str_time()
+... str_time()
+'266601601.0'
+'1258026080.0'
+
+Another way to test with a specific sequence of timestamps is to use the
+``delta_type`` and ``delta`` parameters to
+:func:`~testfixtures.test_time`. These parameters control the type and
+size, respectively, of the difference between each timestamp returned.
+
+For example, where 2 hours elapse between each returned value:
+
+>>> with Replace(
+... 'testfixtures.tests.sample1.time',
+... test_time(1978, 6, 13, 16, 0, 1, delta=2, delta_type='hours')
+... ) as d:
+... str_time()
+... str_time()
+... str_time()
+'266601601.0'
+'266608801.0'
+'266616001.0'
+
+The ``delta_type`` can be any keyword parameter accepted by the
+:class:`~datetime.timedelta` constructor. Specifying a ``delta`` of
+zero can be an effective way of ensuring that all calls to the
+:meth:`~time.time` function return the same value:
+
+>>> with Replace('testfixtures.tests.sample1.time',
+... test_time(1978, 6, 13, 16, 0, 1, delta=0)) as d:
+... str_time()
+... str_time()
+... str_time()
+'266601601.0'
+'266601601.0'
+'266601601.0'
+
+When using :func:`~testfixtures.test_time`, you can, at any time, set
+the next timestamp to be returned using the
+:meth:`~testfixtures.test_time.set` method. The value returned after
+this will be the set value plus the ``delta`` in effect:
+
+>>> with Replace('testfixtures.tests.sample1.time', test_time(delta=2)) as d:
+... str_time()
+... d.set(1978,8,1)
+... str_time()
+... str_time()
+'978307200.0'
+'270777600.0'
+'270777602.0'
+
+Gotchas with dates and times
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using these specialised mock objects can have some intricacies as
+described below:
+
+Local references to functions
+-----------------------------
+
+There are situations where people may have obtained a local
+reference to the :meth:`~datetime.date.today` or
+:meth:`~datetime.datetime.now` methods, such
+as the following code from the ``testfixtures.tests.sample1`` package:
+
+.. literalinclude:: ../testfixtures/tests/sample1.py
+ :lines: 8-10,14-18,24-28
+
+In these cases, you need to be careful with the replacement:
+
+>>> from testfixtures import Replacer, test_datetime
+>>> from testfixtures.tests.sample1 import str_now_2, str_today_2
+>>> with Replacer() as replace:
+... today = replace('testfixtures.tests.sample1.today', test_date().today)
+... now = replace('testfixtures.tests.sample1.now', test_datetime().now)
+... str_today_2()
+... str_now_2()
+'2001-01-01'
+'2001-01-01 00:00:00'
+
+.. _strict-dates-and-times:
+
+Use with code that checks class types
+-------------------------------------
+
+When using the above specialist mocks, you may find code that checks
+the type of parameters passed may get confused. This is because, by
+default, :class:`test_datetime` and :class:`test_date` return
+instances of the real :class:`~datetime.datetime` and
+:class:`~datetime.date` classes:
+
+>>> from testfixtures import test_datetime
+>>> from datetime import datetime
+>>> tdatetime = test_datetime()
+>>> issubclass(tdatetime, datetime)
+True
+>>> tdatetime.now().__class__
+<...'datetime.datetime'>
+
+The above behaviour, however, is generally what you want as other code
+in your application and, more importantly, in other code such as
+database adapters, may handle instances of the real
+:class:`~datetime.datetime` and :class:`~datetime.date` classes, but
+not instances of the :class:`test_datetime` and :class:`test_date`
+mocks.
+
+That said, this behaviour can cause problems if you check the type of
+an instance against one of the mock classes. Most people might expect
+the following to return ``True``:
+
+>>> isinstance(tdatetime(2011, 1, 1), tdatetime)
+False
+>>> isinstance(tdatetime.now(), tdatetime)
+False
+
+If this causes a problem for you, then both
+:class:`~datetime.datetime` and :class:`~datetime.date` take a
+`strict` keyword parameter that can be used as follows:
+
+>>> tdatetime = test_datetime(strict=True)
+>>> tdatetime.now().__class__
+<class 'testfixtures.tdatetime.tdatetime'>
+>>> isinstance(tdatetime.now(), tdatetime)
+True
+
+You will need to take care that you have replaced occurrences of the
+class where type checking is done with the correct
+:class:`test_datetime` or :class:`test_date`.
+Also, be aware that the :meth:`~tdatetime.date` method of
+:class:`test_datetime` instances will still return a normal
+:class:`~datetime.date` instance. If type checking related to this is causing
+problems, the type the :meth:`~tdatetime.date` method returns can
+be controlled as shown in the following example:
+
+.. code-block:: python
+
+ from testfixtures import test_date, test_datetime
+
+ date_type = test_date(strict=True)
+ datetime_type = test_datetime(strict=True, date_type=date_type)
+
+With things set up like this, the :meth:`~tdatetime.date` method
+will return an instance of the :class:`date_type` mock:
+
+>>> somewhen = datetime_type.now()
+>>> somewhen.date()
+tdate(2001, 1, 1)
+>>> _.__class__ is date_type
+True
diff --git a/docs/description.txt b/docs/description.txt
new file mode 100644
index 0000000..fd0f947
--- /dev/null
+++ b/docs/description.txt
@@ -0,0 +1,47 @@
+============
+TestFixtures
+============
+
+TestFixtures is a collection of helpers and mock objects that are
+useful when writing unit tests or doc tests.
+
+If you're wondering why "yet another mock object library", testing is
+often described as an art form and as such some styles of library will
+suit some people while others will suit other styles. This library
+contains common test fixtures the author found himself
+repeating from package to package and so decided to extract them into
+their own library and give them some tests of their own!
+
+The areas of testing this package can help with are listed below:
+
+**Comparing objects and sequences**
+
+Better feedback when the results aren't as you expected along with
+support for comparison of objects that don't normally support
+comparison.
+
+**Mocking out objects and methods**
+
+Easy to use ways of stubbing out objects, classes or individual
+methods for both doc tests and unit tests. Special helpers are
+provided for testing with dates and times.
+
+**Testing logging**
+
+Helpers for capturing logging output in both doc tests and
+unit tests.
+
+**Testing stream output**
+
+Helpers for capturing stream output, such as that from print
+statements, and making assertion about it.
+
+**Testing with files and directories**
+
+Support for creating and checking files and directories in sandboxes
+for both doc tests and unit tests.
+
+**Testing exceptions**
+
+Easy to use ways of checking that a certain exception is raised, even
+down the to the parameters the exception is raised with.
diff --git a/docs/development.txt b/docs/development.txt
new file mode 100644
index 0000000..09f384f
--- /dev/null
+++ b/docs/development.txt
@@ -0,0 +1,66 @@
+Development
+===========
+
+.. highlight:: bash
+
+This package is developed using continuous integration which can be
+found here:
+
+https://travis-ci.org/Simplistix/testfixtures
+
+The latest development version of the documentation can be found here:
+
+http://testfixtures.readthedocs.org/en/latest/
+
+If you wish to contribute to this project, then you should fork the
+repository found here:
+
+https://github.com/Simplistix/testfixtures/
+
+Once that has been done and you have a checkout, you can follow these
+instructions to perform various development tasks:
+
+Setting up a virtualenv
+-----------------------
+
+The recommended way to set up a development environment is to turn
+your checkout into a virtualenv and then install the package in
+editable form as follows::
+
+ $ virtualenv .
+ $ bin/pip install -U -e .[test,build]
+
+Running the tests
+-----------------
+
+Once you've set up a virtualenv, the tests can be run as follows::
+
+ $ bin/nosetests
+
+Building the documentation
+--------------------------
+
+The Sphinx documentation is built by doing the following from the
+directory containing setup.py::
+
+ $ source bin/activate
+ $ cd docs
+ $ make html
+
+To check that the description that will be used on PyPI renders properly,
+do the following::
+
+ $ python setup.py --long-description | rst2html.py > desc.html
+
+The resulting ``desc.html`` should be checked by opening in a browser.
+
+Making a release
+----------------
+
+To make a release, just update ``versions.txt``, update the change log, tag it
+and push to https://github.com/Simplistix/testfixtures
+and Travis CI should take care of the rest.
+
+Once Travis CI is done, make sure to go to
+https://readthedocs.org/projects/testfixtures/versions/
+and make sure the new release is marked as an Active Version.
diff --git a/docs/exceptions.txt b/docs/exceptions.txt
new file mode 100644
index 0000000..67d7e36
--- /dev/null
+++ b/docs/exceptions.txt
@@ -0,0 +1,139 @@
+Testing exceptions
+==================
+
+.. currentmodule:: testfixtures
+
+The :mod:`unittest` support for asserting that exceptions are raised
+when expected is fairly weak. Like many other Python testing
+libraries, TestFixtures has tools to help with this.
+
+The :class:`ShouldRaise` context manager
+----------------------------------------
+
+If you are using a version of Python where the :keyword:`with`
+statement can be used, it's recommended that you use the
+:class:`ShouldRaise` context manager.
+
+Suppose we wanted to test the following function to make sure that the
+right exception was raised:
+
+.. code-block:: python
+
+ def the_thrower(throw=True):
+ if throw:
+ raise ValueError('Not good!')
+
+The following example shows how to test that the correct exception is
+raised:
+
+>>> from testfixtures import ShouldRaise
+>>> with ShouldRaise(ValueError('Not good!')):
+... the_thrower()
+
+If the exception raised doesn't match the one expected,
+:class:`ShouldRaise` will raise an :class:`AssertionError`
+causing the tests in which it occurs to fail:
+
+>>> with ShouldRaise(ValueError('Is good!')):
+... the_thrower()
+Traceback (most recent call last):
+...
+AssertionError: ValueError('Not good!',) raised, ValueError('Is good!',) expected
+
+If you're not concerned about anything more than the type of the
+exception that's raised, you can check as follows:
+
+>>> from testfixtures import ShouldRaise
+>>> with ShouldRaise(ValueError):
+... the_thrower()
+
+If you're feeling slack and just want to check that an exception is
+raised, but don't care about the type of that exception, the following
+will suffice:
+
+>>> from testfixtures import ShouldRaise
+>>> with ShouldRaise():
+... the_thrower()
+
+If no exception is raised by the code under test, :class:`ShouldRaise`
+will raise an :class:`AssertionError` to indicate this:
+
+>>> from testfixtures import ShouldRaise
+>>> with ShouldRaise():
+... the_thrower(throw=False)
+Traceback (most recent call last):
+...
+AssertionError: No exception raised!
+
+:class:`ShouldRaise` has been implemented such that it can be
+successfully used to test if code raises both :class:`SystemExit` and
+:class:`KeyboardInterrupt` exceptions.
+
+To help with :class:`SystemExit` and other exceptions that are
+tricky to construct yourself, :class:`ShouldRaise` instances have a
+:attr:`~ShouldRaise.raised` attribute. This will contain the actual
+exception raised and can be used to inspect parts of it:
+
+>>> import sys
+>>> from testfixtures import ShouldRaise
+>>> with ShouldRaise() as s:
+... sys.exit(42)
+>>> s.raised.code
+42
+
+The :func:`should_raise` decorator
+-----------------------------------------
+
+If you are working in a traditional :mod:`unittest` environment and
+want to check that a particular test function raises an exception, you
+may find the decorator suits your needs better:
+
+.. code-block:: python
+
+ from testfixtures import should_raise
+
+ @should_raise(ValueError('Not good!'))
+ def test_function():
+ the_thrower()
+
+This decorator behaves exactly as the :class:`ShouldRaise` context
+manager described in the documentation above.
+
+.. note::
+
+ It is slightly recommended that you use the context manager rather
+ than the decorator in most cases. With the decorator, all exceptions
+ raised within the decorated function will be checked, which can
+ hinder test development. With the context manager, you can make
+ assertions about only the exact lines of code that you expect to
+ raise the exception.
+
+Exceptions that are conditionally raised
+----------------------------------------
+
+Some exceptions are only raised in certain versions of Python. For
+example, in Python 2, ``bytes()`` will turn both bytes and strings into
+bytes, while in Python 3, it will raise an exception when presented
+with a string. If you wish to make assertions that this behaviour is
+expected, you can use the ``unless`` option to :class:`ShouldRaise`
+as follows:
+
+.. code-block:: python
+
+ import sys
+ from testfixtures import ShouldRaise
+
+ PY2 = sys.version_info[:2] < (3, 0)
+
+ with ShouldRaise(TypeError, unless=PY2):
+ bytes('something')
+
+.. note::
+
+ Do **not** abuse this functionality to make sloppy assertions. It is
+ always better have two different tests that cover a case when an
+ exception should be raised and a case where an exception should not
+ be raised rather than using it above functionality. It is *only*
+ provided to help in cases where something in the environment that
+ cannot be mocked out or controlled influences whether or not an
+ exception is raised.
diff --git a/docs/files.txt b/docs/files.txt
new file mode 100644
index 0000000..fa88c2e
--- /dev/null
+++ b/docs/files.txt
@@ -0,0 +1,649 @@
+Testing with files and directories
+==================================
+
+.. currentmodule:: testfixtures
+
+Working with files and directories in tests can often require
+excessive amounts of boilerplate code to make sure that the tests
+happen in their own sandbox, files and directories contain what they
+should or code processes test files correctly, and the sandbox is
+cleared up at the end of the tests.
+
+Methods of use
+--------------
+To help with this, TestFixtures provides the
+:class:`TempDirectory` class that hides most of the
+boilerplate code you would need to write.
+
+Suppose you wanted to test the following function:
+
+.. code-block:: python
+
+ import os
+
+ def foo2bar(dirpath, filename):
+ path = os.path.join(dirpath, filename)
+ with open(path, 'rb') as input:
+ data = input.read()
+ data = data.replace(b'foo', b'bar')
+ with open(path, 'wb') as output:
+ output.write(data)
+
+There are several different ways depending on the type of test you are
+writing:
+
+The context manager
+~~~~~~~~~~~~~~~~~~~
+
+If you're using a version of Python where the ``with`` keyword is
+available, a :class:`TempDirectory` can be used as a
+context manager:
+
+>>> from testfixtures import TempDirectory
+>>> with TempDirectory() as d:
+... d.write('test.txt', b'some foo thing')
+... foo2bar(d.path, 'test.txt')
+... d.read('test.txt')
+'...'
+b'some bar thing'
+
+
+The decorator
+~~~~~~~~~~~~~
+
+If you are working in a traditional :mod:`unittest` environment and
+only work with files or directories in a particular test function, you
+may find the decorator suits your needs better:
+
+.. code-block:: python
+
+ from testfixtures import tempdir, compare
+
+ @tempdir()
+ def test_function(d):
+ d.write('test.txt', b'some foo thing')
+ foo2bar(d.path, 'test.txt')
+ compare(d.read('test.txt'), b'some bar thing')
+
+.. check the above raises no assertion error:
+
+ >>> test_function()
+
+Manual usage
+~~~~~~~~~~~~
+
+If you want to work with files or directories for the duration of a
+doctest or in every test in a :class:`~unittest.TestCase`, then you
+can use the :class:`TempDirectory` manually.
+
+The instantiation and replacement are done in the ``setUp`` function
+of the :class:`~unittest.TestCase` or passed to the
+:class:`~doctest.DocTestSuite` constructor:
+
+>>> from testfixtures import TempDirectory
+>>> d = TempDirectory()
+
+You can then use the temporary directory for your testing:
+
+>>> d.write('test.txt', b'some foo thing')
+'...'
+>>> foo2bar(d.path, 'test.txt')
+>>> d.read('test.txt') == b'some bar thing'
+True
+
+Then, in the ``tearDown`` function
+of the :class:`~unittest.TestCase` or passed to the
+:class:`~doctest.DocTestSuite` constructor, you should make sure the
+temporary directory is cleaned up:
+
+>>> d.cleanup()
+
+If you have multiple :class:`TempDirectory` objects in use,
+you can easily clean them all up:
+
+>>> TempDirectory.cleanup_all()
+
+Features of a temporary directory
+---------------------------------
+
+No matter which usage pattern you pick, you will always end up with a
+:class:`TempDirectory` object. These have an array of
+methods that let you perform common file and directory related tasks
+without all the manual boiler plate. The following sections show you
+how to perform the various tasks you're likely to bump into in the
+course of testing.
+
+.. create a tempdir for the examples:
+
+ >>> tempdir = TempDirectory()
+
+Computing paths
+~~~~~~~~~~~~~~~
+
+If you need to know the real path of the temporary directory, the
+:class:`TempDirectory` object has a :attr:`~TempDirectory.path`
+attribute:
+
+>>> tempdir.path
+'...tmp...'
+
+A common use case is to want to compute a path within the temporary
+directory to pass to code under test. This can be done with the
+:meth:`~TempDirectory.getpath` method:
+
+>>> tempdir.getpath('foo').rsplit(os.sep,1)[-1]
+'foo'
+
+If you want to compute a deeper path, you can either pass either a
+tuple or a forward slash-separated path:
+
+>>> tempdir.getpath(('foo', 'baz')).rsplit(os.sep, 2)[-2:]
+['foo', 'baz']
+>>> tempdir.getpath('foo/baz') .rsplit(os.sep, 2)[-2:]
+['foo', 'baz']
+
+.. note::
+
+ If passing a string containing path separators, a forward
+ slash should be used as the separator regardless of the underlying
+ platform separator.
+
+Writing files
+~~~~~~~~~~~~~
+
+To write to a file in the root of the temporary directory, you pass
+the name of the file and the content you want to write:
+
+>>> tempdir.write('myfile.txt', b'some text')
+'...'
+>>> with open(os.path.join(tempdir.path, 'myfile.txt')) as f:
+... print(f.read())
+some text
+
+The full path of the newly written file is returned:
+
+>>> path = tempdir.write('anotherfile.txt', b'some more text')
+>>> with open(path) as f:
+... print(f.read())
+some more text
+
+You can also write files into a sub-directory of the temporary
+directory, whether or not that directory exists, as follows:
+
+>>> path = tempdir.write(('some', 'folder', 'afile.txt'), b'the text')
+>>> with open(path) as f:
+... print(f.read())
+the text
+
+You can also specify the path to write to as a forward-slash separated
+string:
+
+>>> path = tempdir.write('some/folder/bfile.txt', b'the text')
+>>> with open(path) as f:
+... print(f.read())
+the text
+
+.. note::
+
+ Forward slashes should be used regardless of the file system or
+ operating system in use.
+
+Creating directories
+~~~~~~~~~~~~~~~~~~~~
+
+If you just want to create a sub-directory in the temporary directory
+you can do so as follows:
+
+.. new tempdir:
+
+ >>> tempdir = TempDirectory()
+
+>>> tempdir.makedir('output')
+'...'
+>>> os.path.isdir(os.path.join(tempdir.path, 'output'))
+True
+
+As with file creation, the full path of the sub-directory that has
+just been created is returned:
+
+>>> path = tempdir.makedir('more_output')
+>>> os.path.isdir(path)
+True
+
+Finally, you can create a nested sub-directory even if the intervening
+parent directories do not exist:
+
+>>> os.path.exists(os.path.join(tempdir.path, 'some'))
+False
+>>> path = tempdir.makedir(('some', 'sub', 'dir'))
+>>> os.path.exists(path)
+True
+
+You can also specify the path to write to as a forward-slash separated
+string:
+
+>>> os.path.exists(os.path.join(tempdir.path, 'another'))
+False
+>>> path = tempdir.makedir('another/sub/dir')
+>>> os.path.exists(path)
+True
+
+.. note::
+
+ Forward slashes should be used regardless of the file system or
+ operating system in use.
+
+Checking the contents of files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once a file has been written into the temporary directory, you will
+often want to check its contents. This is done with the
+:meth:`TempDirectory.read` method.
+
+Suppose the code you are testing creates some files:
+
+.. new tempdir:
+
+ >>> tempdir = TempDirectory()
+
+.. code-block:: python
+
+ def spew(path):
+ with open(os.path.join(path, 'root.txt'), 'wb') as f:
+ f.write(b'root output')
+ os.mkdir(os.path.join(path, 'subdir'))
+ with open(os.path.join(path, 'subdir', 'file.txt'), 'wb') as f:
+ f.write(b'subdir output')
+ os.mkdir(os.path.join(path, 'subdir', 'logs'))
+
+We can test this function by passing it the temporary directory's path
+and then using the :meth:`TempDirectory.read` method to
+check the files were created with the correct content:
+
+>>> spew(tempdir.path)
+>>> tempdir.read('root.txt')
+b'root output'
+>>> tempdir.read(('subdir', 'file.txt'))
+b'subdir output'
+
+The second part of the above test shows how to use the
+:meth:`TempDirectory.read` method to check the contents
+of files that are in sub-directories of the temporary directory. This
+can also be done by specifying the path relative to the root of
+the temporary directory as a forward-slash separated string:
+
+>>> tempdir.read('subdir/file.txt')
+b'subdir output'
+
+.. note::
+
+ Forward slashes should be used regardless of the file system or
+ operating system in use.
+
+Checking the contents of directories
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It's good practice to test that your code is only writing files you expect it
+to and to check they are being written to the path you expect.
+:meth:`TempDirectory.compare` is the method to use to do this.
+
+As an example, we could check that the :func:`spew` function above created no
+extraneous files as follows:
+
+>>> tempdir.compare([
+... 'root.txt',
+... 'subdir/',
+... 'subdir/file.txt',
+... 'subdir/logs/',
+... ])
+
+If we only wanted to check the sub-directory, we would specify the path to
+start from, relative to the root of the temporary directory:
+
+>>> tempdir.compare([
+... 'file.txt',
+... 'logs/',
+... ], path='subdir')
+
+If, like git, we only cared about files, we could do the comparison as follows:
+
+>>> tempdir.compare([
+... 'root.txt',
+... 'subdir/file.txt',
+... ], files_only=True)
+
+And finally, if we only cared about files at a particular level, we could
+turn off the recursive comparison as follows:
+
+>>> tempdir.compare([
+... 'root.txt',
+... 'subdir',
+... ], recursive=False)
+
+The :meth:`~testfixtures.TempDirectory.compare` method can also be used to
+check whether a directory contains nothing, for example:
+
+>>> tempdir.compare(path=('subdir', 'logs'), expected=())
+
+The above can also be done by specifying the sub-directory to be
+checked as a forward-slash separated path:
+
+>>> tempdir.compare(path='subdir/logs', expected=())
+
+If the actual directory contents do not match the expected contents passed in,
+an :class:`~exceptions.AssertionError` is raised, which will show up as a
+unit test failure:
+
+>>> tempdir.compare(['subdir'], recursive=False)
+Traceback (most recent call last):
+...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+()
+<BLANKLINE>
+expected:
+('subdir',)
+<BLANKLINE>
+actual:
+('root.txt', 'subdir')
+
+In some circumstances, you may want to ignore certain files or
+sub-directories when checking contents. To make this easy, the
+:class:`~testfixtures.TempDirectory` constructor takes an optional
+`ignore` parameter which, if provided, should contain a sequence of
+regular expressions. If any of the regular expressions return a match
+when used to search through the results of any of the the methods
+covered in this section, that result will be ignored.
+
+For example, suppose we are testing some revision control code, but
+don't really care about the revision control system's metadata
+directories, which may or may not be present:
+
+.. code-block:: python
+
+ from random import choice
+
+ def svn_ish(dirpath, filename):
+ if choice((True, False)):
+ os.mkdir(os.path.join(dirpath, '.svn'))
+ with open(os.path.join(dirpath, filename), 'wb') as f:
+ f.write(b'something')
+
+To test this, we can use any of the previously described methods.
+
+When used manually or as a context manager, this would be as follows:
+
+>>> with TempDirectory(ignore=['.svn']) as d:
+... svn_ish(d.path, 'test.txt')
+... d.compare(['test.txt'])
+
+The decorator would be as follows:
+
+.. code-block:: python
+
+ from testfixtures import tempdir, compare
+
+ @tempdir(ignore=['.svn'])
+ def test_function(d):
+ svn_ish(d.path, 'test.txt')
+ d.compare(['test.txt'])
+
+.. check the above raises no assertion error:
+
+ >>> test_function()
+
+
+.. set things up again:
+
+ >>> tempdir = TempDirectory()
+ >>> spew(tempdir.path)
+
+If you are working with doctests, the
+:meth:`~testfixtures.TempDirectory.listdir` method can be used instead:
+
+>>> tempdir.listdir()
+root.txt
+subdir
+>>> tempdir.listdir('subdir')
+file.txt
+logs
+>>> tempdir.listdir(('subdir', 'logs'))
+No files or directories found.
+
+The above example also shows how to check the contents of sub-directories of
+the temporary directory and also shows what is printed when a
+directory contains nothing. The
+:meth:`~testfixtures.TempDirectory.listdir` method can also take a
+path separated by forward slashes, which can make doctests a little
+more readable. The above test could be written as follows:
+
+>>> tempdir.listdir('subdir/logs')
+No files or directories found.
+
+However, if you have a nested folder structure, such as that created by
+our :func:`spew` function, it can be easier to just inspect the whole
+tree of files and folders created. You can do this by using the
+`recursive` parameter to :meth:`~testfixtures.TempDirectory.listdir`:
+
+>>> tempdir.listdir(recursive=True)
+root.txt
+subdir/
+subdir/file.txt
+subdir/logs/
+
+Bytes versus Strings
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. new tempdir:
+
+ >>> tempdir = TempDirectory()
+
+You'll notice that all of the examples so far have used raw bytes as
+their data and written to and read from files only in binary mode.
+This keeps all the examples nice and simple and working consistently
+between Python 2 and Python 3.
+One of the big changes between Python 2 and Python 3 was that the
+default string type became unicode instead of binary, and a new type
+for bytes was introduced. This little snippet shows the difference by
+defining two constants for the British Pound symbol:
+
+.. code-block:: python
+
+ import sys
+ PY3 = sys.version_info[:2] >= (3, 0)
+
+ if PY3:
+ some_bytes = '\xa3'.encode('utf-8')
+ some_text = '\xa3'
+ else:
+ some_bytes = '\xc2\xa3'
+ some_text = '\xc2\xa3'.decode('utf-8')
+
+Python 3 is much stricter than Python 2 about the byte versus string
+boundary and :class:`TempDirectory` has been changed to help work
+with this by only reading and writing files in binary mode and
+providing parameters to control decoding and encoding when you want to read and
+write text.
+
+For example, when writing, you can either write bytes directly, as we
+have been in the examples so far:
+
+>>> path = tempdir.write('currencies.txt', some_bytes)
+>>> with open(path, 'rb') as currencies:
+... currencies.read()
+b'\xc2\xa3'
+
+Or, you can write text, but must specify an encoding to use when
+writing the data to the file:
+
+>>> path = tempdir.write('currencies.txt', some_text, 'utf-8')
+>>> with open(path, 'rb') as currencies:
+... currencies.read()
+b'\xc2\xa3'
+
+The same is true when reading files. You can either read bytes:
+
+>>> tempdir.read('currencies.txt') == some_bytes
+True
+
+Or, you can read text, but must specify an encoding that will be used
+to decode the data in the file:
+
+>>> tempdir.read('currencies.txt', 'utf-8') == some_text
+True
+
+Working with an existing sandbox
+--------------------------------
+
+Some testing infrastructure already provides a sandbox temporary
+directory, however that infrastructure might not provide the same
+level of functionality that :class:`~testfixtures.TempDirectory`
+provides.
+
+For this reason, it is possible to wrap an existing directory such as
+the following with a :class:`~testfixtures.TempDirectory`:
+
+>>> from tempfile import mkdtemp
+>>> thedir = mkdtemp()
+
+When working with the context manager, this is done as follows:
+
+>>> with TempDirectory(path=thedir) as d:
+... d.write('file', b'data')
+... d.makedir('directory')
+... sorted(os.listdir(thedir))
+'...'
+'...'
+['directory', 'file']
+
+.. check thedir still exists and reset
+
+ >>> from shutil import rmtree
+ >>> os.path.exists(thedir)
+ True
+ >>> rmtree(thedir)
+ >>> thedir = mkdtemp()
+
+For the decorator, usage would be as follows:
+
+.. code-block:: python
+
+ from testfixtures import tempdir, compare
+
+ @tempdir(path=thedir)
+ def test_function(d):
+ d.write('file', b'data')
+ d.makedir('directory')
+ assert sorted(os.listdir(thedir))==['directory', 'file']
+
+.. check the above raises no assertion error and that thedir still
+ exits:
+
+ >>> test_function()
+ >>> os.path.exists(thedir)
+ True
+
+It is important to note that if an existing directory is used, it will
+not be deleted by either the decorator or the context manager. You
+will need to make sure that the directory is cleaned up as required.
+
+.. check the above statement is true:
+
+ >>> os.path.exists(thedir)
+ True
+
+.. better clean it up:
+
+ >>> rmtree(thedir)
+
+Using with Manuel
+-----------------
+
+`Manuel`__ is an excellent take on testing the examples found in
+documentation. It works by applying a set of specialised
+parsers to the documentation and testing or otherwise using the the
+blocks returned by those parsers.
+
+__ http://pypi.python.org/pypi/manuel
+
+The key differences between testing with Manuel and the traditional
+doctest are that it is possible to plug in different types of parser,
+not just the "python console session" one, and so it is possible to
+test different types of examples. TestFixtures provides one these
+plugins to aid working with
+:class:`~testfixtures.TempDirectory` objects. This plugin makes use of
+:rst:dir:`topic` directives with specific classes set to perform
+different actions.
+
+The following sections describe how to use this plugin to help with
+writing temporary files and checking their contents.
+
+Setting up
+~~~~~~~~~~
+
+To use the Manuel plugin, you need to make sure a
+:class:`TempDirectory` instance is available under a particular name
+in the test globals. This name is then passed to the plugin's
+constructor and the plugin is passed to Manuel's
+:class:`~manuel.testing.TestSuite` constructor.
+
+The following example shows how to return a test suite that will
+execute all of the examples below. These require not only the
+TestFixtures plugin but also the Manuel plugins that give more
+traditional doctest behaviour, hidden code blocks
+that are useful for setting things up and checking examples without
+breaking up the flow of the documentation, and capturing of examples
+from the documentation to use for use in other forms of testing:
+
+.. literalinclude:: ../testfixtures/tests/test_manuel_examples.py
+ :lines: 7-
+
+Writing files
+~~~~~~~~~~~~~
+
+To write a file with the plugin, a :rst:dir:`topic` with a class of
+``write-file`` is included in the documentation. The following example
+is a complete reStructuredText file that shows how to write a file
+that is then used by a later example:
+
+.. literalinclude:: ../testfixtures/tests/configparser-read.txt
+
+Checking the contents of files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To read a file with the plugin, a :rst:dir:`topic` with a class of
+``read-file`` is included in the documentation. The following example
+is a complete reStructuredText file that shows how to check the values
+written by the code being documented while also using this check as
+part of the documentation:
+
+.. literalinclude:: ../testfixtures/tests/configparser-write.txt
+
+Checking the contents of directories
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+While the TestFixtures plugin itself does not offer any facility for
+checking the contents of directories, Manuel's :mod:`~manuel.capture`
+plugin can be used in conjunction with the existing features of a
+:class:`TempDirectory` to illustrate the contents expected
+in a directory seamlessly within the documentation.
+
+Here's a complete reStructuredText document that illustrates this
+technique:
+
+.. literalinclude:: ../testfixtures/tests/directory-contents.txt
+
+.. clean up all tempdirs:
+
+ >>> TempDirectory.cleanup_all()
+
+A note on encoding and line endings
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As currently implemented, the plugin provided by TestFixtures only works
+with textual file content that can be encoded using the ASCII
+character set. This content will always be written with ``'\n'`` line
+seperators and, when read, will always have its line endings
+normalised to ``'\n'``. If you hit any limitations caused by this,
+please raise an issue in the tracker on GitHub.
diff --git a/docs/index.txt b/docs/index.txt
new file mode 100644
index 0000000..cbd65e0
--- /dev/null
+++ b/docs/index.txt
@@ -0,0 +1,49 @@
+TestFixtures documentation
+==========================
+
+TestFixtures is a collection of helpers and mock objects that are
+useful when writing unit tests or doc tests.
+
+The sections below describe the use of the various tools included:
+
+.. toctree::
+ :maxdepth: 1
+
+ comparing.txt
+ mocking.txt
+ datetime.txt
+ logging.txt
+ streams.txt
+ files.txt
+ exceptions.txt
+ warnings.txt
+ popen.txt
+ components.txt
+ utilities.txt
+
+If you're looking for a description of a particular tool, please see
+the API reference:
+
+.. toctree::
+ :maxdepth: 1
+
+ api.txt
+
+For details of how to install the package or get involved in its
+development, please see the sections below:
+
+.. toctree::
+ :maxdepth: 1
+
+ installation.txt
+ development.txt
+ changes.txt
+ license.txt
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/docs/installation.txt b/docs/installation.txt
new file mode 100644
index 0000000..f6f6f6b
--- /dev/null
+++ b/docs/installation.txt
@@ -0,0 +1,30 @@
+Installation Instructions
+=========================
+
+If you want to experiment with TestFixtures, the easiest way to
+install it is to do the following in a virtualenv::
+
+ pip install testfixtures
+
+If your package uses setuptools and you decide to use TestFixtures,
+then you should do one of the following:
+
+- Specify ``testfixtures`` in the ``tests_require`` parameter of your
+ package's call to ``setup`` in :file:`setup.py`.
+
+- Add an ``extra_requires`` parameter in your call to ``setup`` as
+ follows:
+
+ .. code-block:: python
+
+ setup(
+ # other stuff here
+ extras_require=dict(
+ test=['testfixtures'],
+ )
+ )
+
+.. topic:: Python version requirements
+
+ This package has been tested with Python 2.6, 2.7, 3.2 to 3.4 on Linux,
+ Mac OS X and Windows.
diff --git a/docs/license.txt b/docs/license.txt
new file mode 100644
index 0000000..b383c6e
--- /dev/null
+++ b/docs/license.txt
@@ -0,0 +1,5 @@
+=======
+License
+=======
+
+.. literalinclude:: ../LICENSE.txt
diff --git a/docs/logging.txt b/docs/logging.txt
new file mode 100644
index 0000000..e5d064d
--- /dev/null
+++ b/docs/logging.txt
@@ -0,0 +1,431 @@
+Testing logging
+===============
+
+.. currentmodule:: testfixtures
+
+Python includes an excellent :mod:`logging` package, however many
+people assume that logging calls do not need to be tested. They may
+also want to test logging calls but find the prospect too daunting.
+To help with this, TestFixtures allows you to easily capture the
+output of calls to Python's logging framework and make sure they were
+as expected.
+
+.. note:: The :class:`LogCapture` class is useful for checking that
+ your code logs the right messages. If you want to check that
+ the configuration of your handlers is correct, please see
+ the :ref:`section <check-log-config>` below.
+
+Methods of capture
+------------------
+
+There are three different techniques for capturing messages logged to
+the Python logging framework, depending on the type of test you are
+writing. They are all described in the sections below.
+
+The context manager
+~~~~~~~~~~~~~~~~~~~
+
+If you're using a version of Python where the ``with`` keyword is
+available, the context manager provided by TestFixtures can be used:
+
+>>> import logging
+>>> from testfixtures import LogCapture
+>>> with LogCapture() as l:
+... logger = logging.getLogger()
+... logger.info('a message')
+... logger.error('an error')
+
+For the duration of the ``with`` block, log messages are captured. The
+context manager provides a check method that raises an exception if
+the logging wasn't as you expected:
+
+>>> l.check(
+... ('root', 'INFO', 'a message'),
+... ('root', 'ERROR', 'another error'),
+... )
+Traceback (most recent call last):
+ ...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+(('root', 'INFO', 'a message'),)
+<BLANKLINE>
+expected:
+(('root', 'ERROR', 'another error'),)
+<BLANKLINE>
+actual:
+(('root', 'ERROR', 'an error'),)
+
+It also has a string representation that allows you to see what has
+been logged, which is useful for doc tests:
+
+>>> print(l)
+root INFO
+ a message
+root ERROR
+ an error
+
+The decorator
+~~~~~~~~~~~~~
+
+If you are working in a traditional :mod:`unittest` environment and
+only want to capture logging for a particular test function, you may
+find the decorator suits your needs better:
+
+.. code-block:: python
+
+ from testfixtures import log_capture
+
+ @log_capture()
+ def test_function(l):
+ logger = logging.getLogger()
+ logger.info('a message')
+ logger.error('an error')
+
+ l.check(
+ ('root', 'INFO', 'a message'),
+ ('root', 'ERROR', 'an error'),
+ )
+
+.. check the above raises no assertion error:
+
+ >>> test_function()
+
+Manual usage
+~~~~~~~~~~~~
+
+If you want to capture logging for the duration of a doctest or
+in every test in a :class:`~unittest.TestCase`, then you can use the
+:class:`~testfixtures.LogCapture` manually.
+
+The instantiation and replacement are done in the ``setUp`` function
+of the :class:`~unittest.TestCase` or passed to the
+:class:`~doctest.DocTestSuite` constructor:
+
+>>> from testfixtures import LogCapture
+>>> l = LogCapture()
+
+You can then execute whatever will log the messages you want to test
+for:
+
+>>> from logging import getLogger
+>>> getLogger().info('a message')
+
+At any point, you can check what has been logged using the
+check method:
+
+>>> l.check(('root', 'INFO', 'a message'))
+
+Alternatively, you can use the string representation of the
+:class:`~testfixtures.LogCapture`:
+
+>>> print(l)
+root INFO
+ a message
+
+Then, in the ``tearDown`` function
+of the :class:`~unittest.TestCase` or passed to the
+:class:`~doctest.DocTestSuite` constructor, you should make sure you
+stop the capturing:
+
+>>> l.uninstall()
+
+If you have multiple :class:`~testfixtures.LogCapture` objects in use,
+you can easily uninstall them all:
+
+>>> LogCapture.uninstall_all()
+
+Checking captured log messages
+------------------------------
+
+Regardless of how you use the :class:`~testfixtures.LogCapture` to
+capture messages, there are three ways of checking that the messages
+captured were as expected.
+
+The following example is useful for showing these:
+
+.. code-block:: python
+
+ from testfixtures import LogCapture
+ from logging import getLogger
+ logger = getLogger()
+
+ with LogCapture() as l:
+ logger.info('start of block number %i', 1)
+ try:
+ raise RuntimeError('No code to run!')
+ except:
+ logger.error('error occurred', exc_info=True)
+
+The check method
+~~~~~~~~~~~~~~~~
+
+The :obj:`~testfixtures.LogCapture` has a
+:meth:`~testfixtures.LogCapture.check` method that will compare the
+log messages captured with those you expect. Expected messages are
+expressed as three-element tuples where the first element is the name
+of the logger to which the message should have been logged, the
+second element is the string representation of the level at which the
+message should have been logged and the third element is the message
+that should have been logged after any parameter interpolation has
+taken place.
+
+If things are as you expected, the method will not raise any exceptions:
+
+>>> result = l.check(
+... ('root', 'INFO', 'start of block number 1'),
+... ('root', 'ERROR', 'error occurred'),
+... )
+
+
+However, if the actual messages logged were different, you'll get an
+:class:`~exceptions.AssertionError` explaining what happened:
+
+>>> l.check(('root', 'INFO', 'start of block number 1'))
+Traceback (most recent call last):
+ ...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+(('root', 'INFO', 'start of block number 1'),)
+<BLANKLINE>
+expected:
+()
+<BLANKLINE>
+actual:
+(('root', 'ERROR', 'error occurred'),)
+
+Printing
+~~~~~~~~
+
+The :obj:`~testfixtures.LogCapture` has a string representation that
+shows what messages it has captured. This can be useful in doc tests:
+
+>>> print(l)
+root INFO
+ start of block number 1
+root ERROR
+ error occurred
+
+This representation can also be used to check that no logging has
+occurred:
+
+>>> empty = LogCapture()
+>>> print(empty)
+No logging captured
+
+Inspecting
+~~~~~~~~~~
+
+The :obj:`~testfixtures.LogCapture` also keeps a list of the
+:class:`~logging.LogRecord` instances it captures. This is useful when
+you want to check specifics of the captured logging that aren't
+available from either the string representation or the
+:meth:`~testfixtures.LogCapture.check` method.
+
+A common case of this is where you want to check that exception
+information was logged for certain messages:
+
+>>> print(l.records[-1].exc_info)
+(<... '...RuntimeError'>, RuntimeError('No code to run!',), <traceback object at ...>)
+
+If you're working in a unit test, the following code may be more
+appropriate:
+
+.. code-block:: python
+
+ from testfixtures import compare, Comparison as C
+
+ compare(C(RuntimeError('No code to run!')), l.records[-1].exc_info[1])
+
+Only capturing specific logging
+-------------------------------
+
+Some actions that you want to test may generate a lot of logging, only
+some of which you actually need to care about.
+
+The logging you care about is often only that above a certain log
+level. If this is the case, you can configure :obj:`~testfixtures.LogCapture` to
+only capture logging at or above a specific level.
+
+If using the context manager, you would do this:
+
+>>> with LogCapture(level=logging.INFO) as l:
+... logger = getLogger()
+... logger.debug('junk')
+... logger.info('something we care about')
+... logger.error('an error')
+>>> print(l)
+root INFO
+ something we care about
+root ERROR
+ an error
+
+If using the decorator, you would do this:
+
+.. code-block:: python
+
+ @log_capture(level=logging.INFO)
+ def test_function(l):
+ logger= getLogger()
+ logger.debug('junk')
+ logger.info('what we care about')
+
+ l.check(('root', 'INFO', 'what we care about'))
+
+.. check it behaves!
+
+ >>> test_function()
+
+
+In other cases this problem can be alleviated by only capturing a
+specific logger.
+
+If using the context manager, you would do this:
+
+>>> with LogCapture('specific') as l:
+... getLogger('something').info('junk')
+... getLogger('specific').info('what we care about')
+... getLogger().info('more junk')
+>>> print(l)
+specific INFO
+ what we care about
+
+If using the decorator, you would do this:
+
+.. code-block:: python
+
+ @log_capture('specific')
+ def test_function(l):
+ getLogger('something').info('junk')
+ getLogger('specific').info('what we care about')
+ getLogger().info('more junk')
+
+ l.check(('specific', 'INFO', 'what we care about'))
+
+.. check it behaves!
+
+ >>> test_function()
+
+However, it may be that while you don't want to capture all logging,
+you do want to capture logging from multiple specific loggers.
+
+You would do this with the context manager as follows:
+
+>>> with LogCapture(('one','two')) as l:
+... getLogger('three').info('3')
+... getLogger('two').info('2')
+... getLogger('one').info('1')
+>>> print(l)
+two INFO
+ 2
+one INFO
+ 1
+
+Likewise, the same thing can be done with the decorator:
+
+.. code-block:: python
+
+ @log_capture('one','two')
+ def test_function(l):
+ getLogger('three').info('3')
+ getLogger('two').info('2')
+ getLogger('one').info('1')
+
+ l.check(
+ ('two', 'INFO', '2'),
+ ('one', 'INFO', '1')
+ )
+
+.. check it behaves!
+
+ >>> test_function()
+
+It may also be that the simplest thing to do is only capture logging
+for part of your test. This is particularly common with long doc
+tests. To make this easier, :obj:`~testfixtures.LogCapture` supports
+manual installation and uninstallation as shown in the following
+example:
+
+>>> l = LogCapture(install=False)
+>>> getLogger().info('junk')
+>>> l.install()
+>>> getLogger().info('something we care about')
+>>> l.uninstall()
+>>> getLogger().info('more junk')
+>>> l.install()
+>>> getLogger().info('something else we care about')
+>>> print(l)
+root INFO
+ something we care about
+root INFO
+ something else we care about
+
+.. uninstall:
+
+ >>> LogCapture.uninstall_all()
+
+.. _check-log-config:
+
+Checking the configuration of your log handlers
+-----------------------------------------------
+
+:class:`LogCapture` is good for checking that your code is logging the
+correct messages; just as important is checking that your application
+has correctly configured log handers. This can be done using a unit
+test such as the following:
+
+.. code-block:: python
+
+ from testfixtures import Comparison as C, compare
+ from unittest import TestCase
+ import logging
+ import sys
+
+ class LoggingConfigurationTests(TestCase):
+
+ # We mock out the handlers list for the logger we're
+ # configuring in such a way that we have no handlers
+ # configured at the start of the test and the handlers our
+ # configuration installs are removed at the end of the test.
+
+ def setUp(self):
+ self.logger = logging.getLogger()
+ self.orig_handlers = self.logger.handlers
+ self.logger.handlers = []
+ self.level = self.logger.level
+
+ def tearDown(self):
+ self.logger.handlers = self.orig_handlers
+ self.logger.level = self.level
+
+ def test_basic_configuration(self):
+ # Our logging configuration code, in this case just a
+ # call to basicConfig:
+ logging.basicConfig(format='%(levelname)s %(message)s',
+ level=logging.INFO)
+
+ # Now we check the configuration is as expected:
+
+ compare(self.logger.level, 20)
+ compare([
+ C('logging.StreamHandler',
+ stream=sys.stderr,
+ formatter=C('logging.Formatter',
+ _fmt='%(levelname)s %(message)s',
+ strict=False),
+ level=logging.NOTSET,
+ strict=False)
+ ], self.logger.handlers)
+
+
+.. the result:
+
+ >>> import unittest
+ >>> from testfixtures.compat import StringIO
+ >>> suite = unittest.TestLoader().loadTestsFromTestCase(LoggingConfigurationTests)
+ >>> stream = StringIO()
+ >>> result = unittest.TextTestRunner(verbosity=0, stream=stream).run(suite)
+ >>> if result.errors or result.failures: print(stream.getvalue())
+ >>> result
+ <unittest...TextTestResult run=1 errors=0 failures=0>
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..385632b
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,100 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+set SPHINXBUILD=..\bin\sphinx-build
+set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (_build\*) do rmdir /q /s %%i
+ del /q /s _build\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html
+ echo.
+ echo.Build finished. The HTML pages are in _build/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml
+ echo.
+ echo.Build finished. The HTML pages are in _build/dirhtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in _build/htmlhelp.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex
+ echo.
+ echo.Build finished; the LaTeX files are in _build/latex.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes
+ echo.
+ echo.The overview file is in _build/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in _build/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in _build/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/docs/mocking.txt b/docs/mocking.txt
new file mode 100644
index 0000000..761d949
--- /dev/null
+++ b/docs/mocking.txt
@@ -0,0 +1,414 @@
+Mocking out objects and methods
+===============================
+
+Mocking is the process of replacing chunks of complex functionality
+that aren't the subject of the test with mock objects that allow you
+to check that the mocked out functionality is being used as expected.
+
+In this way, you can break down testing of a complicated set of
+interacting components into testing of each individual component.
+The behaviour of components can then be tested individually,
+irrespective of the behaviour of the components around it.
+
+There are a few implementations of mock objects in the python world. An
+excellent example and the one recommended for use with TestFixtures is
+the Mock package: http://pypi.python.org/pypi/mock/
+
+Methods of replacement
+----------------------
+
+TestFixtures provides three different methods of mocking out
+functionality that can be used to replace functions, classes
+or even individual methods on a class. Consider the following module:
+
+.. topic:: testfixtures.tests.sample1
+ :class: module
+
+ .. literalinclude:: ../testfixtures/tests/sample1.py
+ :pyobject: X
+
+.. do the import quietly
+
+ >>> from testfixtures.tests.sample1 import X
+
+We want to mock out the ``y`` method of the ``X`` class, with,
+for example, the following function:
+
+.. code-block:: python
+
+ def mock_y(self):
+ return 'mock y'
+
+The context manager
+~~~~~~~~~~~~~~~~~~~
+
+For replacement of a single thing, it's easiest to use the
+:class:`~testfixtures.Replace` context manager:
+
+.. code-block:: python
+
+ from testfixtures import Replace
+
+ def test_function():
+ with Replace('testfixtures.tests.sample1.X.y', mock_y):
+ print(X().y())
+
+For the duration of the ``with`` block, the replacement is used:
+
+>>> test_function()
+mock y
+
+For multiple replacements to do, or where the you need access to the replacement
+within the code block under test, the :class:`~testfixtures.Replacer` context
+manager can be used instead:
+
+
+.. code-block:: python
+
+ from mock import Mock
+ from testfixtures import Replacer
+
+ def test_function():
+ with Replacer() as replace:
+ mock_y = replace('testfixtures.tests.sample1.X.y', Mock())
+ mock_y.return_value = 'mock y'
+ print(X().y())
+
+For the duration of the ``with`` block, the replacement is used:
+
+>>> test_function()
+mock y
+
+The decorator
+~~~~~~~~~~~~~
+
+If you are working in a traditional :mod:`unittest` environment and
+want to replace different things in different test functions, you may
+find the decorator suits your needs better:
+
+.. code-block:: python
+
+ from testfixtures import replace
+
+ @replace('testfixtures.tests.sample1.X.y', mock_y)
+ def test_function():
+ print(X().y())
+
+When using the decorator, the replacement is used for the duration of
+the decorated callable's execution:
+
+>>> test_function()
+mock y
+
+If you need to manipulate or inspect the object that's used as a
+replacement, you can add an extra parameter to your function. The
+decorator will see this and pass the replacement in it's place:
+
+.. code-block:: python
+
+ from mock import Mock, call
+ from testfixtures import compare,replace
+
+ @replace('testfixtures.tests.sample1.X.y', Mock())
+ def test_function(mock_y):
+ mock_y.return_value = 'mock y'
+ print(X().y())
+ compare(mock_y.mock_calls, expected=[call()])
+
+The above still results in the same output:
+
+>>> test_function()
+mock y
+
+Manual usage
+~~~~~~~~~~~~
+
+If you want to replace something for the duration of a doctest or you
+want to replace something for every test in a
+:class:`~unittest.TestCase`, then you can use the
+:class:`~testfixtures.Replacer` manually.
+
+The instantiation and replacement are done in the ``setUp`` function
+of the :class:`~unittest.TestCase` or passed to the
+:class:`~doctest.DocTestSuite` constructor:
+
+>>> from testfixtures import Replacer
+>>> replace = Replacer()
+>>> replace('testfixtures.tests.sample1.X.y', mock_y)
+<...>
+
+The replacement then stays in place until removed:
+
+>>> X().y()
+'mock y'
+
+Then, in the ``tearDown`` function
+of the :class:`~unittest.TestCase` or passed to the
+:class:`~doctest.DocTestSuite` constructor, the replacement is
+removed:
+
+>>> replace.restore()
+>>> X().y()
+'original y'
+
+The :meth:`~testfixtures.Replacer.restore` method can also be added as an
+:meth:`~unittest.TestCase.addCleanup` if that is easier or more compact in your test
+suite.
+
+Replacing more than one thing
+-----------------------------
+
+Both the :class:`~testfixtures.Replacer` and the
+:func:`~testfixtures.replace` decorator can be used to replace more
+than one thing at a time. For the former, this is fairly obvious:
+
+.. code-block:: python
+
+ def test_function():
+ with Replacer() as replace:
+ y = replace('testfixtures.tests.sample1.X.y', Mock())
+ y.return_value = 'mock y'
+ aMethod = replace('testfixtures.tests.sample1.X.aMethod', Mock())
+ aMethod.return_value = 'mock method'
+ x = X()
+ print(x.y(), x.aMethod())
+
+.. the result:
+
+ >>> test_function()
+ mock y mock method
+
+For the decorator, it's less obvious but still pretty easy:
+
+.. code-block:: python
+
+ from testfixtures import replace
+
+ @replace('testfixtures.tests.sample1.X.y', Mock())
+ @replace('testfixtures.tests.sample1.X.aMethod', Mock())
+ def test_function(aMethod, y):
+ print(aMethod, y)
+ aMethod().return_value = 'mock method'
+ y().return_value = 'mock y'
+ x = X()
+ print(aMethod, y)
+ print(x.y(), x.aMethod())
+
+You'll notice that you can still get access to the replacements, even
+though there are several of them.
+
+Replacing things that may not be there
+--------------------------------------
+
+The following code shows a situation where ``hpy`` may or may not be
+present depending on whether the ``guppy`` package is installed or
+not.
+
+
+.. topic:: testfixtures.tests.sample2
+ :class: module
+
+ .. literalinclude:: ../testfixtures/tests/sample2.py
+ :lines: 10-19
+
+To test the behaviour of the code that uses ``hpy`` in both of
+these cases, regardless of whether or not the ``guppy`` package is
+actually installed, we need to be able to mock out both ``hpy`` and the
+``guppy`` global. This is done by doing non-strict replacement, as
+shown in the following :class:`~unittest.TestCase`:
+
+.. imports
+
+ >>> import unittest,sys
+
+.. code-block:: python
+
+ from testfixtures.tests.sample2 import dump
+ from testfixtures import replace
+ from mock import Mock, call
+
+ class Tests(unittest.TestCase):
+
+ @replace('testfixtures.tests.sample2.guppy', True)
+ @replace('testfixtures.tests.sample2.hpy', Mock(), strict=False)
+ def test_method(self, hpy):
+
+ dump('somepath')
+
+ compare([
+ call(),
+ call().heap(),
+ call().heap().stat.dump('somepath')
+ ], hpy.mock_calls)
+
+ @replace('testfixtures.tests.sample2.guppy', False)
+ @replace('testfixtures.tests.sample2.hpy', Mock(), strict=False)
+ def test_method_no_heapy(self,hpy):
+
+ dump('somepath')
+
+ compare(hpy.mock_calls,[])
+
+.. the result:
+
+ >>> from testfixtures.compat import StringIO
+ >>> suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
+ >>> unittest.TextTestRunner(verbosity=0,stream=StringIO()).run(suite)
+ <unittest...TextTestResult run=2 errors=0 failures=0>
+
+
+The :meth:`~testfixtures.Replacer.replace` method and calling
+a :class:`Replacer` also supports non-strict replacement using the same
+keyword parameter.
+
+Replacing items in dictionaries and lists
+-----------------------------------------
+
+:class:`~testfixtures.Replace`, :class:`~testfixtures.Replacer` and the
+:func:`~testfixtures.replace` decorator can be used to replace items
+in dictionaries and lists.
+
+For example, suppose you have a data structure like the following:
+
+.. topic:: testfixtures.tests.sample1
+ :class: module
+
+ .. literalinclude:: ../testfixtures/tests/sample1.py
+ :lines: 67-70
+
+You can mock out the value associated with ``key`` and the second
+element in the ``complex_key`` list as follows:
+
+.. code-block:: python
+
+ from pprint import pprint
+ from testfixtures import Replacer
+ from testfixtures.tests.sample1 import someDict
+
+ def test_function():
+ with Replacer() as replace:
+ replace('testfixtures.tests.sample1.someDict.key', 'foo')
+ replace('testfixtures.tests.sample1.someDict.complex_key.1', 42)
+ pprint(someDict)
+
+While the replacement is in effect, the new items are in place:
+
+>>> test_function()
+{'complex_key': [1, 42, 3], 'key': 'foo'}
+
+When it is no longer in effect, the originals are returned:
+
+>>> pprint(someDict)
+{'complex_key': [1, 2, 3], 'key': 'value'}
+
+.. _removing_attr_and_item:
+
+Removing attributes and dictionary items
+----------------------------------------
+
+:class:`~testfixtures.Replace`, :class:`~testfixtures.Replacer` and the
+:func:`~testfixtures.replace` decorator can be used to remove
+attributes from objects and remove items from dictionaries.
+
+For example, suppose you have a data structure like the following:
+
+.. topic:: testfixtures.tests.sample1
+ :class: module
+
+ .. literalinclude:: ../testfixtures/tests/sample1.py
+ :lines: 67-70
+
+If you want to remove the ``key`` for the duration of a test, you can
+do so as follows:
+
+.. code-block:: python
+
+ from testfixtures import Replacer, not_there
+ from testfixtures.tests.sample1 import someDict
+
+ def test_function():
+ with Replace('testfixtures.tests.sample1.someDict.key', not_there):
+ pprint(someDict)
+
+While the replacement is in effect, ``key`` is gone:
+
+>>> test_function()
+{'complex_key': [1, 2, 3]}
+
+When it is no longer in effect, ``key`` is returned:
+
+>>> pprint(someDict)
+{'complex_key': [1, 2, 3], 'key': 'value'}
+
+If you want the whole ``someDict`` dictionary to be removed for the
+duration of a test, you would do so as follows:
+
+.. code-block:: python
+
+ from testfixtures import Replacer, not_there
+ from testfixtures.tests import sample1
+
+ def test_function():
+ with Replace('testfixtures.tests.sample1.someDict', not_there):
+ print(hasattr(sample1, 'someDict'))
+
+While the replacement is in effect, ``key`` is gone:
+
+>>> test_function()
+False
+
+When it is no longer in effect, ``key`` is returned:
+
+>>> pprint(sample1.someDict)
+{'complex_key': [1, 2, 3], 'key': 'value'}
+
+
+Gotchas
+-------
+
+- Make sure you replace the object where it's used and not where it's
+ defined. For example, with the following code from the
+ ``testfixtures.tests.sample1`` package:
+
+ .. literalinclude:: ../testfixtures/tests/sample1.py
+ :lines: 30-34
+
+ You might be tempted to mock things as follows:
+
+ >>> replace = Replacer()
+ >>> replace('time.time', Mock())
+ <...>
+
+ But this won't work:
+
+ >>> from testfixtures.tests.sample1 import str_time
+ >>> type(float(str_time()))
+ <... 'float'>
+
+ You need to replace :func:`~time.time` where it's used, not where
+ it's defined:
+
+ >>> replace('testfixtures.tests.sample1.time', Mock())
+ <...>
+ >>> str_time()
+ "<...Mock...>"
+
+ .. cleanup
+
+ >>> replace.restore()
+
+ A corollary of this is that you need to replace *all* occurrences of
+ an original to safely be able to test. This can be tricky when an
+ original is imported into many modules that may be used by a
+ particular test.
+
+- You can't replace whole top level modules, and nor should you want
+ to! The reason being that everything up to the last dot in the
+ replacement target specifies where the replacement will take place,
+ and the part after the last dot is used as the name of the thing to
+ be replaced:
+
+ >>> Replacer().replace('sys', Mock())
+ Traceback (most recent call last):
+ ...
+ ValueError: target must contain at least one dot!
diff --git a/docs/popen.txt b/docs/popen.txt
new file mode 100644
index 0000000..90de368
--- /dev/null
+++ b/docs/popen.txt
@@ -0,0 +1,133 @@
+
+.. currentmodule:: testfixtures.popen
+
+Testing use of the subprocess package
+=====================================
+
+When using the :mod:`subprocess` package there are two approaches to testing:
+
+* Have your tests exercise the real processes being instantiated and used.
+
+* Mock out use of the :mod:`subprocess` package and provide expected output
+ while recording interactions with the package to make sure they are as
+ expected.
+
+While the first of these should be preferred, it means that you need to have all
+the external software available everywhere you wish to run tests. Your tests
+will also need to make sure any dependencies of that software on
+an external environment are met. If that external software takes a long time to
+run, your tests will also take a long time to run.
+
+These challenges can often make the second approach more practical and can
+be the more pragmatic approach when coupled with a mock that accurately
+simulates the behaviour of a subprocess. :class:`~testfixtures.popen.MockPopen`
+is an attempt to provide just such a mock.
+
+.. note:: To use :class:`~testfixtures.popen.MockPopen`, you must have the
+ :mod:`mock` package installed.
+
+
+Example usage
+-------------
+
+As an example, suppose you have code such as the following that you need to
+test:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :lines: 4-12
+
+Tests that exercises this code using :class:`~testfixtures.popen.MockPopen`
+could be written as follows:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :lines: 16-52
+
+
+Passing input to processes
+--------------------------
+
+If your testing requires passing input to the subprocess, you can do so by
+checking for the input passed to :meth:`~subprocess.Popen.communicate` method
+when you check the calls on the mock as shown in this example:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :pyobject: TestMyFunc.test_communicate_with_input
+ :dedent: 4
+
+.. note:: Accessing ``.stdin`` isn't current supported by this mock.
+
+
+Reading from ``stdout`` and ``stderr``
+--------------------------------------
+
+The ``.stdout`` and ``.stderr`` attributes of the mock returned by
+:class:`~testfixtures.popen.MockPopen` will be file-like objects as with
+the real :class:`~subprocess.Popen` and can be read as shown in this example:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :pyobject: TestMyFunc.test_read_from_stdout_and_stderr
+ :dedent: 4
+
+.. warning::
+
+ While these streams behave a lot like the streams of a real
+ :class:`~subprocess.Popen` object, they do not exhibit the deadlocking
+ behaviour that can occur when the two streams are read as in the example
+ above. Be very careful when reading ``.stdout`` and ``.stderr`` and
+ consider using :class:`~subprocess.Popen.communicate` instead.
+
+Specifying the return code
+--------------------------
+
+Often code will need to behave differently depending on the return code of the
+launched process. Specifying a simulated response code, along with testing for
+the correct usage of :meth:`~subprocess.Popen.wait`, can be seen in the
+following example:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :pyobject: TestMyFunc.test_wait_and_return_code
+ :dedent: 4
+
+Checking for signal sending
+---------------------------
+
+Calls to ``.send_signal()``, ``.terminate()`` and ``.kill()`` are all recorded
+by the mock returned by :class:`~testfixtures.popen.MockPopen`
+but otherwise do nothing as shown in the following example, which doesn't
+make sense for a real test of sub-process usage but does show how the mock
+behaves:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :pyobject: TestMyFunc.test_send_signal
+ :dedent: 4
+
+Polling a process
+-----------------
+
+The :meth:`~subprocess.Popen.poll` method is often used as part of a loop
+in order to do other work while waiting for a sub-process to complete.
+The mock returned by :class:`~testfixtures.popen.MockPopen` supports this
+by allowing the ``.poll()`` method to be called a number of times before
+the ``returncode`` is set using the ``poll_count`` parameter as shown in
+the following example:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :pyobject: TestMyFunc.test_poll_until_result
+ :dedent: 4
+
+Using default behaviour
+-----------------------
+
+If you're testing something that needs to make many calls to many different
+commands that all behave the same, it can be tedious to specify the behaviour
+of each with :class:`~MockPopen.set_command`. For this case, :class:`~MockPopen`
+has the :class:`~MockPopen.set_default` method which can be used to set the
+behaviour of any command that has not been specified with
+:class:`~MockPopen.set_command` as shown in the
+following example:
+
+.. literalinclude:: ../testfixtures/tests/test_popen_docs.py
+ :pyobject: TestMyFunc.test_default_behaviour
+ :dedent: 4
+
+
diff --git a/docs/streams.txt b/docs/streams.txt
new file mode 100644
index 0000000..505f4f2
--- /dev/null
+++ b/docs/streams.txt
@@ -0,0 +1,98 @@
+Testing output to streams
+=========================
+
+.. currentmodule:: testfixtures
+
+In many situations, it's perfectly legitimate for output to be printed
+to one of the standard streams. To aid with testing this kind of
+output, TestFixtures provides the :class:`OutputCapture` helper.
+
+This helper is a context manager that captures output sent to
+``sys.stdout`` and ``sys.stderr`` and provides a
+:meth:`~OutputCapture.compare` method to check that the output was as
+expected.
+
+Here's a simple example:
+
+.. code-block:: python
+
+ from testfixtures import OutputCapture
+ import sys
+
+ with OutputCapture() as output:
+ # code under test
+ print("Hello!")
+ print("Something bad happened!", file=sys.stderr)
+
+ output.compare('\n'.join([
+ "Hello!",
+ "Something bad happened!",
+ ]))
+
+To make life easier, both the actual and expected output are stripped
+of leading and trailing whitespace before the comparison is done:
+
+>>> with OutputCapture() as o:
+... print(' Bar! ')
+... o.compare(' Foo! ')
+Traceback (most recent call last):
+...
+AssertionError: 'Foo!' (expected) != 'Bar!' (actual)
+
+However, if you need to make very explicit assertions about what has
+been written to the stream then you can do so using the `captured`
+property of the :class:`OutputCapture`:
+
+>>> with OutputCapture() as o:
+... print(' Bar! ')
+>>> print(repr(o.captured))
+' Bar! \n'
+
+If you need to explicitly check whether output went to ``stdout`` or ``stderr``,
+`separate` mode can be used:
+
+.. code-block:: python
+
+ from testfixtures import OutputCapture
+ import sys
+
+ with OutputCapture(separate=True) as output:
+ print("Hello!")
+ print("Something bad happened!", file=sys.stderr)
+
+ output.compare(
+ stdout="Hello!",
+ stderr="Something bad happened!",
+ )
+
+Finally, you may sometimes want to disable an :class:`OutputCapture`
+without removing it from your code. This often happens when you want
+to insert a debugger call while an :class:`OutputCapture` is active;
+if it remains enabled, all debugger output will be captured making the
+debugger very difficult to use!
+
+To deal with this problem, the :class:`OutputCapture` may be disabled
+and then re-enabled as follows:
+
+>>> with OutputCapture() as o:
+... print('Foo')
+... o.disable()
+... print('Bar')
+... o.enable()
+... print('Baz')
+Bar
+>>> print(o.captured)
+Foo
+Baz
+<BLANKLINE>
+
+.. note::
+
+ Some debuggers, notably :mod:`pdb`, do interesting things with streams
+ such that calling :meth:`~OutputCapture.disable` from within the debugger
+ will have no effect. A good fallback is to type the following, which will
+ almost always restore output to where you want it:
+
+ .. code-block:: python
+
+ import sys; sys.stdout=sys.__stdout__
diff --git a/docs/utilities.txt b/docs/utilities.txt
new file mode 100644
index 0000000..5b32b8a
--- /dev/null
+++ b/docs/utilities.txt
@@ -0,0 +1,170 @@
+Utilities
+=========
+
+.. currentmodule:: testfixtures
+
+This section describes a few handy functions that didn't fit nicely in
+any other section.
+
+.. _generator:
+
+The ``generator`` helper
+------------------------
+
+It can be handy when testing to be able to turn a simple sequence into
+a generator. This can be necessary when you want to check that your
+code will behave correctly when processing a generator instead of a
+simple sequence, or when you're looking to make assertions about the
+expected return value of a callable that returns a generator.
+
+If you need to turn a simple sequence into a generator, the
+:func:`generator` function is the way to do it:
+
+>>> from testfixtures import generator
+>>> generator(1,2,3)
+<generator object ...>
+
+.. invisible-code-block: python
+
+ from __future__ import print_function
+
+Iterating over this generator will return the arguments passed to the
+:func:`generator` function:
+
+>>> for i in _:
+... print(i, end=' ')
+1 2 3
+
+The ``wrap`` helper
+-------------------
+
+The :func:`wrap` helper is a decorator function that allows you to
+wrap the call to the decorated callable with calls to other
+callables. This can be useful when you want to perform setup and
+teardown actions either side of a test function.
+
+For example, take the following functions:
+
+.. code-block:: python
+
+ def before():
+ print("before")
+
+ def after():
+ print("after")
+
+The :func:`wrap` helper can be used to wrap a function with these:
+
+.. code-block:: python
+
+ from testfixtures import wrap
+
+ @wrap(before,after)
+ def a_function():
+ print("a_function")
+
+When the wrapped function is executed, the output is as follows:
+
+>>> a_function()
+before
+a_function
+after
+
+The section argument to :func:`wrap` is optional:
+
+.. code-block:: python
+
+ from testfixtures import wrap
+
+ @wrap(before)
+ def a_function():
+ print("a_function")
+
+Now, the wrapped function gives the following output when executed:
+
+>>> a_function()
+before
+a_function
+
+Multiple wrapping functions can be provided by stacking :func:`wrap`
+decorations:
+
+.. code-block:: python
+
+ def before1():
+ print("before 1")
+
+ def after1():
+ print("after 1")
+
+ def before2():
+ print("before 2")
+
+ def after2():
+ print("after 2")
+
+ @wrap(before2,after2)
+ @wrap(before1,after1)
+ def a_function():
+ print("a_function")
+
+The order of execution is illustrated below:
+
+>>> a_function()
+before 1
+before 2
+a_function
+after 2
+after 1
+
+The results of calling the wrapping functions executed before the
+wrapped function can be made available to the wrapped function
+provided it accepts positional arguments for these results:
+
+.. code-block:: python
+
+ def before1():
+ return "return 1"
+
+ def before2():
+ return "return 2"
+
+ @wrap(before2)
+ @wrap(before1)
+ def a_function(r1,r2):
+ print(r1)
+ print(r2)
+
+Calling the wrapped function illustrates the behaviour:
+
+>>> a_function()
+return 1
+return 2
+
+Finally, the return value of the wrapped function will always be that
+of the original function:
+
+.. code-block:: python
+
+ def before1():
+ return 1
+
+ def after1():
+ return 2
+
+ def before2():
+ return 3
+
+ def after2():
+ return 4
+
+ @wrap(before2,after2)
+ @wrap(before1,after2)
+ def a_function():
+ return 'original'
+
+When the above wrapped function is executed, the original return value
+is still returned:
+
+>>> a_function()
+'original'
diff --git a/docs/warnings.txt b/docs/warnings.txt
new file mode 100644
index 0000000..2642fa8
--- /dev/null
+++ b/docs/warnings.txt
@@ -0,0 +1,90 @@
+Testing warnings
+================
+
+.. currentmodule:: testfixtures
+
+The :mod:`unittest` support for asserting that warnings are issued
+when expected is fairly convoluted, so TestFixtures has tools to help with this.
+
+The :class:`ShouldWarn` context manager
+---------------------------------------
+
+This context manager allows you to assert that particular warnings are
+recorded in a block of code, for example:
+
+>>> from warnings import warn
+>>> from testfixtures import ShouldWarn
+>>> with ShouldWarn(UserWarning('you should fix that')):
+... warn('you should fix that')
+
+If a warning issued doesn't match the one expected,
+:class:`ShouldWarn` will raise an :class:`AssertionError`
+causing the test in which it occurs to fail:
+
+>>> from warnings import warn
+>>> from testfixtures import ShouldWarn
+>>> with ShouldWarn(UserWarning('you should fix that')):
+... warn("sorry dave, I can't let you do that")
+Traceback (most recent call last):
+...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+[]
+<BLANKLINE>
+expected:
+[
+ <C(failed):....UserWarning>
+ args:('you should fix that',) != ("sorry dave, I can't let you do that",)
+ </C>]
+<BLANKLINE>
+actual:
+[UserWarning("sorry dave, I can't let you do that",)]
+
+You can check multiple warnings in a particular piece of code:
+
+>>> from warnings import warn
+>>> from testfixtures import ShouldWarn
+>>> with ShouldWarn(UserWarning('you should fix that'),
+... UserWarning('and that too')):
+... warn('you should fix that')
+... warn('and that too')
+
+If you want to inspect more details of the warnings issued, you can capture
+them into a list as follows:
+
+>>> from warnings import warn_explicit
+>>> from testfixtures import ShouldWarn
+>>> with ShouldWarn() as captured:
+... warn_explicit(message='foo', category=DeprecationWarning,
+... filename='bar.py', lineno=42)
+>>> len(captured)
+1
+>>> captured[0].message
+DeprecationWarning('foo',)
+>>> captured[0].lineno
+42
+
+The :class:`ShouldNotWarn` context manager
+------------------------------------------
+
+If you do not expect any warnings to be logged in a piece of code, you can use
+the :class:`ShouldNotWarn` context manager. If any warnings are issued in the
+context it manages, it will raise an :class:`AssertionError` to indicate this:
+
+>>> from warnings import warn
+>>> from testfixtures import ShouldNotWarn
+>>> with ShouldNotWarn():
+... warn("woah dude")
+Traceback (most recent call last):
+...
+AssertionError: sequence not as expected:
+<BLANKLINE>
+same:
+[]
+<BLANKLINE>
+expected:
+[]
+<BLANKLINE>
+actual:
+[UserWarning('woah dude',)]
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..b7a3c3f
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1 @@
+-e .[build]
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..7c964b4
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[wheel]
+universal=1
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..b58625e
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2008-2014 Simplistix Ltd, 2015-2016 Chris Withers
+# See license.txt for license details.
+
+import os
+
+from setuptools import setup, find_packages
+
+name = 'testfixtures'
+base_dir = os.path.dirname(__file__)
+
+setup(
+ name=name,
+ version=open(os.path.join(base_dir, name, 'version.txt')).read().strip(),
+ author='Chris Withers',
+ author_email='chris@simplistix.co.uk',
+ license='MIT',
+ description=("A collection of helpers and mock objects "
+ "for unit tests and doc tests."),
+ long_description=open(os.path.join(base_dir,
+ 'docs',
+ 'description.txt')).read(),
+ url='https://github.com/Simplistix/testfixtures',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ ],
+ packages=find_packages(),
+ zip_safe=False,
+ include_package_data=True,
+ extras_require=dict(
+ test=['nose', 'nose-fixes', 'nose-cov', 'mock', 'manuel',
+ 'zope.component', 'coveralls'],
+ build=['sphinx', 'pkginfo', 'setuptools-git', 'wheel', 'twine']
+ )
+)
diff --git a/testfixtures/__init__.py b/testfixtures/__init__.py
new file mode 100644
index 0000000..bb8555e
--- /dev/null
+++ b/testfixtures/__init__.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+
+class singleton(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self):
+ return '<%s>' % self.name
+
+ __str__ = __repr__
+
+not_there = singleton('not_there')
+
+from testfixtures.comparison import (
+ Comparison, StringComparison, RoundComparison, compare, diff, RangeComparison
+)
+from testfixtures.tdatetime import test_datetime, test_date, test_time
+from testfixtures.logcapture import LogCapture, log_capture
+from testfixtures.outputcapture import OutputCapture
+from testfixtures.resolve import resolve
+from testfixtures.replace import Replacer, Replace, replace
+from testfixtures.shouldraise import ShouldRaise, should_raise
+from testfixtures.shouldwarn import ShouldWarn, ShouldNotWarn
+from testfixtures.tempdirectory import TempDirectory, tempdir
+from testfixtures.utils import wrap, generator
diff --git a/testfixtures/comparison.py b/testfixtures/comparison.py
new file mode 100644
index 0000000..60837b4
--- /dev/null
+++ b/testfixtures/comparison.py
@@ -0,0 +1,725 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+from collections import Iterable
+from difflib import unified_diff
+from pprint import pformat
+from re import compile, MULTILINE
+from testfixtures import not_there
+from testfixtures.compat import (
+ ClassType, Unicode, basestring, PY3, mock_call, unittest_mock_call
+ )
+from testfixtures.resolve import resolve
+from types import GeneratorType
+
+
+def compare_simple(x, y, context):
+ """
+ Returns a very simple textual difference between the two supplied objects.
+ """
+ if context.ignore_eq:
+ try:
+ hash_eq = hash(x) == hash(y)
+ except TypeError:
+ pass
+ else:
+ if hash_eq:
+ return
+ return context.label('x', repr(x)) + ' != ' + context.label('y', repr(y))
+
+
+def compare_with_type(x, y, context):
+ """
+ Return a textual description of the difference between two objects
+ including information about their types.
+ """
+ source = locals()
+ to_render = {}
+ for name in 'x', 'y':
+ obj = source[name]
+ to_render[name] = context.label(
+ name,
+ '{0} ({1!r})'.format(_short_repr(obj), type(obj))
+ )
+ return '{x} != {y}'.format(**to_render)
+
+
+def compare_sequence(x, y, context):
+ """
+ Returns a textual description of the differences between the two
+ supplied sequences.
+ """
+ l_x = len(x)
+ l_y = len(y)
+ i = 0
+ while i < l_x and i < l_y:
+ if context.different(x[i], y[i], '[%i]' % i):
+ break
+ i += 1
+
+ if l_x == l_y and i == l_x:
+ return
+
+ return ('sequence not as expected:\n\n'
+ 'same:\n%s\n\n'
+ '%s:\n%s\n\n'
+ '%s:\n%s') % (pformat(x[:i]),
+ context.x_label or 'first', pformat(x[i:]),
+ context.y_label or 'second', pformat(y[i:]),
+ )
+
+
+def compare_generator(x, y, context):
+ """
+ Returns a textual description of the differences between the two
+ supplied generators.
+
+ This is done by first unwinding each of the generators supplied
+ into tuples and then passing those tuples to
+ :func:`compare_sequence`.
+ """
+ x = tuple(x)
+ y = tuple(y)
+
+ if not context.ignore_eq and x == y:
+ return
+
+ return compare_sequence(x, y, context)
+
+
+def compare_tuple(x, y, context):
+ """
+ Returns a textual difference between two tuples or
+ :func:`collections.namedtuple` instances.
+
+ The presence of a ``_fields`` attribute on a tuple is used to
+ decide whether or not it is a :func:`~collections.namedtuple`.
+ """
+ x_fields = getattr(x, '_fields', None)
+ y_fields = getattr(y, '_fields', None)
+ if x_fields and y_fields:
+ if x_fields == y_fields:
+ return _compare_mapping(dict(zip(x_fields, x)),
+ dict(zip(y_fields, y)),
+ context,
+ x)
+ else:
+ return compare_with_type(x, y, context)
+ return compare_sequence(x, y, context)
+
+
+def compare_dict(x, y, context):
+ """
+ Returns a textual description of the differences between the two
+ supplied dictionaries.
+ """
+ return _compare_mapping(x, y, context, x)
+
+
+def sorted_by_repr(sequence):
+ return sorted(sequence, key=lambda o: repr(o))
+
+
+def _compare_mapping(x, y, context, obj_for_class):
+
+ x_keys = set(x.keys())
+ y_keys = set(y.keys())
+ x_not_y = x_keys - y_keys
+ y_not_x = y_keys - x_keys
+ same = []
+ diffs = []
+ for key in sorted_by_repr(x_keys.intersection(y_keys)):
+ if context.different(x[key], y[key], '[%r]' % (key, )):
+ diffs.append('%r: %s != %s' % (
+ key,
+ context.label('x', pformat(x[key])),
+ context.label('y', pformat(y[key])),
+ ))
+ else:
+ same.append(key)
+ lines = ['%s not as expected:' % obj_for_class.__class__.__name__]
+ if same:
+ set_same = set(same)
+ if set_same == x_keys == y_keys:
+ return
+ lines.extend(('', 'same:', repr(same)))
+
+ x_label = context.x_label or 'first'
+ y_label = context.y_label or 'second'
+
+ if x_not_y:
+ lines.extend(('', 'in %s but not %s:' % (x_label, y_label)))
+ for key in sorted_by_repr(x_not_y):
+ lines.append('%r: %s' % (
+ key,
+ pformat(x[key])
+ ))
+ if y_not_x:
+ lines.extend(('', 'in %s but not %s:' % (y_label, x_label)))
+ for key in sorted_by_repr(y_not_x):
+ lines.append('%r: %s' % (
+ key,
+ pformat(y[key])
+ ))
+ if diffs:
+ lines.extend(('', 'values differ:'))
+ lines.extend(diffs)
+ return '\n'.join(lines)
+
+
+def compare_set(x, y, context):
+ """
+ Returns a textual description of the differences between the two
+ supplied sets.
+ """
+ x_not_y = x - y
+ y_not_x = y - x
+ lines = ['%s not as expected:' % x.__class__.__name__, '']
+ x_label = context.x_label or 'first'
+ y_label = context.y_label or 'second'
+ if x_not_y:
+ lines.extend((
+ 'in %s but not %s:' % (x_label, y_label),
+ pformat(sorted_by_repr(x_not_y)),
+ '',
+ ))
+ if y_not_x:
+ lines.extend((
+ 'in %s but not %s:' % (y_label, x_label),
+ pformat(sorted_by_repr(y_not_x)),
+ '',
+ ))
+ return '\n'.join(lines)+'\n'
+
+trailing_whitespace_re = compile('\s+$', MULTILINE)
+
+
+def strip_blank_lines(text):
+ result = []
+ for line in text.split('\n'):
+ if line and not line.isspace():
+ result.append(line)
+ return '\n'.join(result)
+
+
+def split_repr(text):
+ parts = text.split('\n')
+ for i, part in enumerate(parts[:-1]):
+ parts[i] = repr(part + '\n')
+ parts[-1] = repr(parts[-1])
+ return '\n'.join(parts)
+
+
+def compare_text(x, y, context):
+ """
+ Returns an informative string describing the differences between the two
+ supplied strings. The way in which this comparison is performed
+ can be controlled using the following parameters:
+
+ :param blanklines: If `False`, then when comparing multi-line
+ strings, any blank lines in either argument
+ will be ignored.
+
+ :param trailing_whitespace: If `False`, then when comparing
+ multi-line strings, trailing
+ whilespace on lines will be ignored.
+
+ :param show_whitespace: If `True`, then whitespace characters in
+ multi-line strings will be replaced with their
+ representations.
+ """
+ blanklines = context.get_option('blanklines', True)
+ trailing_whitespace = context.get_option('trailing_whitespace', True)
+ show_whitespace = context.get_option('show_whitespace', False)
+
+ if not trailing_whitespace:
+ x = trailing_whitespace_re.sub('', x)
+ y = trailing_whitespace_re.sub('', y)
+ if not blanklines:
+ x = strip_blank_lines(x)
+ y = strip_blank_lines(y)
+ if x == y:
+ return
+ labelled_x = context.label('x', repr(x))
+ labelled_y = context.label('y', repr(y))
+ if len(x) > 10 or len(y) > 10:
+ if '\n' in x or '\n' in y:
+ if show_whitespace:
+ x = split_repr(x)
+ y = split_repr(y)
+ message = '\n' + diff(x, y, context.x_label, context.y_label)
+ else:
+ message = '\n%s\n!=\n%s' % (labelled_x, labelled_y)
+ else:
+ message = labelled_x+' != '+labelled_y
+ return message
+
+
+def _short_repr(obj):
+ repr_ = repr(obj)
+ if len(repr_) > 30:
+ repr_ = repr_[:30] + '...'
+ return repr_
+
+
+_registry = {
+ dict: compare_dict,
+ set: compare_set,
+ list: compare_sequence,
+ tuple: compare_tuple,
+ str: compare_text,
+ Unicode: compare_text,
+ GeneratorType: compare_generator,
+ mock_call.__class__: compare_simple,
+ unittest_mock_call.__class__: compare_simple,
+ }
+
+
+def register(type, comparer):
+ """
+ Register the supplied comparer for the specified type.
+ This registration is global and will be in effect from the point
+ this function is called until the end of the current process.
+ """
+ _registry[type] = comparer
+
+
+def _mro(obj):
+ class_ = getattr(obj, '__class__', None)
+ if class_ is None:
+ # must be an old-style class object in Python 2!
+ return (obj, )
+ mro = getattr(class_, '__mro__', None)
+ if mro is None:
+ # instance of old-style class in Python 2!
+ return (class_, )
+ return mro
+
+
+def _shared_mro(x, y):
+ y_mro = set(_mro(y))
+ for class_ in _mro(x):
+ if class_ in y_mro:
+ yield class_
+
+_unsafe_iterables = basestring, dict
+
+
+class CompareContext(object):
+
+ x_label = y_label = None
+
+ def __init__(self, options):
+ comparers = options.pop('comparers', None)
+ if comparers:
+ self.registry = dict(_registry)
+ self.registry.update(comparers)
+ else:
+ self.registry = _registry
+
+ self.recursive = options.pop('recursive', True)
+ self.strict = options.pop('strict', False)
+ self.ignore_eq = options.pop('ignore_eq', False)
+
+ if 'expected' in options or 'actual' in options:
+ self.x_label = 'expected'
+ self.y_label = 'actual'
+ self.x_label = options.pop('x_label', self.x_label)
+ self.y_label = options.pop('y_label', self.y_label)
+
+ self.options = options
+ self.message = ''
+ self.breadcrumbs = []
+
+ def extract_args(self, args):
+
+ possible = []
+ expected = self.options.pop('expected', not_there)
+ if expected is not not_there:
+ possible.append(expected)
+ possible.extend(args)
+ actual = self.options.pop('actual', not_there)
+ if actual is not not_there:
+ possible.append(actual)
+
+ if len(possible) != 2:
+ raise TypeError(
+ 'Exactly two objects needed, you supplied: ' +
+ repr(possible)
+ )
+
+ return possible
+
+ def get_option(self, name, default=None):
+ return self.options.get(name, default)
+
+ def label(self, side, value):
+ r = str(value)
+ label = getattr(self, side+'_label')
+ if label:
+ r += ' ('+label+')'
+ return r
+
+ def _lookup(self, x, y):
+ if self.strict and type(x) is not type(y):
+ return compare_with_type
+
+ for class_ in _shared_mro(x, y):
+ comparer = self.registry.get(class_)
+ if comparer:
+ return comparer
+
+ # fallback for iterables
+ if ((isinstance(x, Iterable) and isinstance(y, Iterable)) and not
+ (isinstance(x, _unsafe_iterables) or
+ isinstance(y, _unsafe_iterables))):
+ return compare_generator
+
+ return compare_simple
+
+ def _separator(self):
+ return '\n\nWhile comparing %s: ' % ''.join(self.breadcrumbs[1:])
+
+ def different(self, x, y, breadcrumb):
+
+ recursed = bool(self.breadcrumbs)
+ self.breadcrumbs.append(breadcrumb)
+ existing_message = self.message
+ self.message = ''
+ current_message = ''
+ try:
+
+ if not (self.strict or self.ignore_eq) and x == y:
+ return False
+
+ comparer = self._lookup(x, y)
+
+ result = comparer(x, y, self)
+ specific_comparer = comparer is not compare_simple
+
+ if self.strict:
+ if type(x) is type(x) and x == y and not specific_comparer:
+ return False
+
+ if result:
+
+ if specific_comparer and recursed:
+ current_message = self._separator()
+
+ if specific_comparer or not recursed:
+ current_message += result
+
+ if self.recursive:
+ current_message += self.message
+
+ return result
+
+ finally:
+ self.message = existing_message + current_message
+ self.breadcrumbs.pop()
+
+
+def compare(*args, **kw):
+ """
+ Compare the two arguments passed either positionally or using
+ explicit ``expected`` and ``actual`` keyword paramaters. An
+ :class:`AssertionError` will be raised if they are not the same.
+ The :class:`AssertionError` raised will attempt to provide
+ descriptions of the differences found.
+
+ Any other keyword parameters supplied will be passed to the functions
+ that end up doing the comparison. See the API documentation below
+ for details of these.
+
+ :param prefix: If provided, in the event of an :class:`AssertionError`
+ being raised, the prefix supplied will be prepended to the
+ message in the :class:`AssertionError`.
+
+ :param suffix: If provided, in the event of an :class:`AssertionError`
+ being raised, the suffix supplied will be appended to the
+ message in the :class:`AssertionError`.
+
+ :param raises: If ``False``, the message that would be raised in the
+ :class:`AssertionError` will be returned instead of the
+ exception being raised.
+
+ :param recursive: If ``True``, when a difference is found in a
+ nested data structure, attempt to highlight the location
+ of the difference.
+
+ :param strict: If ``True``, objects will only compare equal if they are
+ of the same type as well as being equal.
+
+ :param ignore_eq: If ``True``, object equality, which relies on ``__eq__``
+ being correctly implemented, will not be used.
+ Instead, comparers will be looked up and used
+ and, if no suitable comparer is found, objects will
+ be considered equal if their hash is equal.
+
+ :param comparers: If supplied, should be a dictionary mapping
+ types to comparer functions for those types. These will
+ be added to the global comparer registry for the duration
+ of this call.
+ """
+ prefix = kw.pop('prefix', None)
+ suffix = kw.pop('suffix', None)
+ raises = kw.pop('raises', True)
+ context = CompareContext(kw)
+
+ x, y = context.extract_args(args)
+
+ if not context.different(x, y, not_there):
+ return
+
+ message = context.message
+ if prefix:
+ message = prefix + ': ' + message
+ if suffix:
+ message += '\n' + suffix
+
+ if raises:
+ raise AssertionError(message)
+ return message
+
+
+class Comparison(object):
+ """
+ These are used when you need to compare objects
+ that do not natively support comparison.
+
+ :param object_or_type: The object or class from which to create the
+ :class:`Comparison`.
+
+ :param attribute_dict: An optional dictionary containing attibutes
+ to place on the :class:`Comparison`.
+
+ :param strict: If true, any expected attributes not present or extra
+ attributes not expected on the object involved in the
+ comparison will cause the comparison to fail.
+
+ :param attributes: Any other keyword parameters passed will placed
+ as attributes on the :class:`Comparison`.
+ """
+
+ failed = None
+
+ def __init__(self,
+ object_or_type,
+ attribute_dict=None,
+ strict=True,
+ **attributes):
+ if attributes:
+ if attribute_dict is None:
+ attribute_dict = attributes
+ else:
+ attribute_dict.update(attributes)
+ if isinstance(object_or_type, basestring):
+ container, method, name, c = resolve(object_or_type)
+ if c is not_there:
+ raise AttributeError(
+ '%r could not be resolved' % object_or_type
+ )
+ elif isinstance(object_or_type, (ClassType, type)):
+ c = object_or_type
+ elif isinstance(object_or_type, BaseException):
+ c = object_or_type.__class__
+ if attribute_dict is None:
+ attribute_dict = vars(object_or_type)
+ attribute_dict['args'] = object_or_type.args
+ else:
+ c = object_or_type.__class__
+ if attribute_dict is None:
+ attribute_dict = vars(object_or_type)
+ self.c = c
+ self.v = attribute_dict
+ self.strict = strict
+
+ def __eq__(self, other):
+ if self.c is not other.__class__:
+ self.failed = True
+ return False
+ if self.v is None:
+ return True
+ self.failed = {}
+ if isinstance(other, BaseException):
+ v = dict(vars(other))
+ v['args'] = other.args
+ if PY3 and '_not_found' in v:
+ del v['_not_found']
+ else:
+ try:
+ v = vars(other)
+ except TypeError:
+ if self.strict:
+ raise TypeError(
+ '%r does not support vars() so cannot '
+ 'do strict comparison' % other
+ )
+ v = {}
+ for k in self.v.keys():
+ try:
+ v[k] = getattr(other, k)
+ except AttributeError:
+ pass
+
+ e = set(self.v.keys())
+ a = set(v.keys())
+ for k in e.difference(a):
+ try:
+ # class attribute?
+ v[k] = getattr(other, k)
+ except AttributeError:
+ self.failed[k] = '%s not in other' % repr(self.v[k])
+ else:
+ a.add(k)
+ if self.strict:
+ for k in a.difference(e):
+ self.failed[k] = '%s not in Comparison' % repr(v[k])
+ for k in e.intersection(a):
+ ev = self.v[k]
+ av = v[k]
+ if ev != av:
+ self.failed[k] = '%r != %r' % (ev, av)
+ if self.failed:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not(self == other)
+
+ def __repr__(self, indent=2):
+ full = False
+ if self.failed is True:
+ v = 'wrong type</C>'
+ elif self.v is None:
+ v = ''
+ else:
+ full = True
+ v = '\n'
+ if self.failed:
+ vd = self.failed
+ r = str
+ else:
+ vd = self.v
+ r = repr
+ for vk, vv in sorted(vd.items()):
+ if isinstance(vv, Comparison):
+ vvr = vv.__repr__(indent+2)
+ else:
+ vvr = r(vv)
+ v += (' ' * indent + '%s:%s\n' % (vk, vvr))
+ v += (' '*indent)+'</C>'
+ name = getattr(self.c, '__module__', '')
+ if name:
+ name += '.'
+ name += getattr(self.c, '__name__', '')
+ if not name:
+ name = repr(self.c)
+ r = '<C%s:%s>%s' % (self.failed and '(failed)' or '', name, v)
+ if full:
+ return '\n'+(' '*indent)+r
+ else:
+ return r
+
+
+class StringComparison:
+ """
+ An object that can be used in comparisons of expected and actual
+ strings where the string expected matches a pattern rather than a
+ specific concrete string.
+
+ :param regex_source: A string containing the source for a regular
+ expression that will be used whenever this
+ :class:`StringComparison` is compared with
+ any :class:`basestring` instance.
+
+ """
+ def __init__(self, regex_source):
+ self.re = compile(regex_source)
+
+ def __eq__(self, other):
+ if not isinstance(other, basestring):
+ return
+ if self.re.match(other):
+ return True
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return '<S:%s>' % self.re.pattern
+
+ def __lt__(self, other):
+ return self.re.pattern < other
+
+ def __gt__(self, other):
+ return self.re.pattern > other
+
+
+class RoundComparison:
+ """
+ An object that can be used in comparisons of expected and actual
+ numerics to a specified precision.
+
+ :param value: numeric to be compared.
+
+ :param precision: Number of decimal places to round to in order
+ to perform the comparison.
+ """
+ def __init__(self, value, precision):
+ self.rounded = round(value, precision)
+ self.precision = precision
+
+ def __eq__(self, other):
+ other_rounded = round(other, self.precision)
+ if type(self.rounded) is not type(other_rounded):
+ raise TypeError('Cannot compare %r with %r' % (self, type(other)))
+ return self.rounded == other_rounded
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return '<R:%s to %i digits>' % (self.rounded, self.precision)
+
+
+def diff(x, y, x_label='', y_label=''):
+ """
+ A shorthand function that uses :mod:`difflib` to return a
+ string representing the differences between the two string
+ arguments.
+
+ Most useful when comparing multi-line strings.
+ """
+ return '\n'.join(
+ unified_diff(
+ x.split('\n'),
+ y.split('\n'),
+ x_label or 'first',
+ y_label or 'second',
+ lineterm='')
+ )
+
+
+class RangeComparison:
+ """
+ An object that can be used in comparisons of orderable types to
+ check that a value specified within the given range.
+
+ :param lower_bound: the inclusive lower bound for the acceptable range.
+
+ :param upper_bound: the inclusive upper bound for the acceptable range.
+ """
+ def __init__(self, lower_bound, upper_bound):
+ self.lower_bound = lower_bound
+ self.upper_bound = upper_bound
+
+ def __eq__(self, other):
+ return self.lower_bound <= other <= self.upper_bound
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ return '<Range: [%s, %s]>' % (self.lower_bound, self.upper_bound)
diff --git a/testfixtures/compat.py b/testfixtures/compat.py
new file mode 100644
index 0000000..fe4a632
--- /dev/null
+++ b/testfixtures/compat.py
@@ -0,0 +1,54 @@
+# compatibility module for different python versions
+import sys
+
+if sys.version_info[:2] > (3, 0):
+
+ PY2 = False
+ PY3 = True
+
+ Bytes = bytes
+ Unicode = str
+ basestring = str
+ BytesLiteral = lambda x: x.encode('latin1')
+ UnicodeLiteral = lambda x: x
+
+ class_type_name = 'class'
+ ClassType = type
+ exception_module = 'builtins'
+ new_class = type
+ self_name = '__self__'
+ from io import StringIO
+ xrange = range
+
+else:
+
+ PY2 = True
+ PY3 = False
+
+ Bytes = str
+ Unicode = unicode
+ basestring = basestring
+ BytesLiteral = lambda x: x
+ UnicodeLiteral = lambda x: x.decode('latin1')
+
+ class_type_name = 'type'
+ from types import ClassType
+ exception_module = 'exceptions'
+ from new import classobj as new_class
+ self_name = 'im_self'
+ from StringIO import StringIO
+ xrange = xrange
+
+try:
+ from mock import call as mock_call
+except ImportError: # pragma: no cover
+ class MockCall:
+ pass
+ mock_call = MockCall()
+
+try:
+ from unittest.mock import call as unittest_mock_call
+except ImportError:
+ class UnittestMockCall:
+ pass
+ unittest_mock_call = UnittestMockCall()
diff --git a/testfixtures/components.py b/testfixtures/components.py
new file mode 100644
index 0000000..bb0dc12
--- /dev/null
+++ b/testfixtures/components.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2010-2011 Simplistix Ltd
+# See license.txt for license details.
+
+import atexit
+import warnings
+
+from zope.component import getSiteManager
+from zope.component.registry import Components
+
+
+class TestComponents:
+ """
+ A helper for providing a sterile registry when testing
+ with :mod:`zope.component`.
+
+ Instantiation will install an empty registry that will be returned
+ by :func:`zope.component.getSiteManager`.
+ """
+
+ instances = set()
+ atexit_setup = False
+
+ def __init__(self):
+ self.registry = Components('Testing')
+ self.old = getSiteManager.sethook(lambda: self.registry)
+ self.instances.add(self)
+ if not self.__class__.atexit_setup:
+ atexit.register(self.atexit)
+ self.__class__.atexit_setup = True
+
+ def uninstall(self):
+ """
+ Remove the sterile registry and replace it with the one that
+ was in place before this :class:`TestComponents` was
+ instantiated.
+ """
+ getSiteManager.sethook(self.old)
+ self.instances.remove(self)
+
+ @classmethod
+ def atexit(cls):
+ if cls.instances:
+ warnings.warn(
+ 'TestComponents instances not uninstalled by shutdown!'
+ )
diff --git a/testfixtures/logcapture.py b/testfixtures/logcapture.py
new file mode 100644
index 0000000..c805ba2
--- /dev/null
+++ b/testfixtures/logcapture.py
@@ -0,0 +1,201 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+from collections import defaultdict
+import atexit
+import logging
+import warnings
+
+from testfixtures.comparison import compare
+from testfixtures.utils import wrap
+
+
+class LogCapture(logging.Handler):
+ """
+ These are used to capture entries logged to the Python logging
+ framework and make assertions about what was logged.
+
+ :param names: A string (or tuple of strings) containing the dotted name(s)
+ of loggers to capture. By default, the root logger is
+ captured.
+
+ :param install: If `True`, the :class:`LogCapture` will be
+ installed as part of its instantiation.
+
+ :param propagate: If specified, any captured loggers will have their
+ `propagate` attribute set to the supplied value. This can
+ be used to prevent propagation from a child logger to a
+ parent logger that has configured handlers.
+
+ :param attributes:
+
+ The sequence of attribute names to return for each record or a callable
+ that extracts a row from a record..
+
+ If a sequence of attribute names, those attributes will be taken from the
+ :class:`~logging.LogRecord`. If an attribute is callable, the value
+ used will be the result of calling it. If an attribute is missing,
+ ``None`` will be used in its place.
+
+ If a callable, it will be called with the :class:`~logging.LogRecord`
+ and the value returned will be used as the row..
+
+ :param recursive_check:
+
+ If ``True``, log messages will be compared recursively by
+ :meth:`LogCapture.check`.
+ """
+
+ instances = set()
+ atexit_setup = False
+ installed = False
+
+ def __init__(self, names=None, install=True, level=1, propagate=None,
+ attributes=('name', 'levelname', 'getMessage'),
+ recursive_check=False):
+ logging.Handler.__init__(self)
+ if not isinstance(names, tuple):
+ names = (names, )
+ self.names = names
+ self.level = level
+ self.propagate = propagate
+ self.attributes = attributes
+ self.recursive_check = recursive_check
+ self.old = defaultdict(dict)
+ self.clear()
+ if install:
+ self.install()
+
+ @classmethod
+ def atexit(cls):
+ if cls.instances:
+ warnings.warn(
+ 'LogCapture instances not uninstalled by shutdown, '
+ 'loggers captured:\n'
+ '%s' % ('\n'.join((str(i.names) for i in cls.instances)))
+ )
+
+ def clear(self):
+ "Clear any entries that have been captured."
+ self.records = []
+
+ def emit(self, record):
+ self.records.append(record)
+
+ def install(self):
+ """
+ Install this :class:`LogHandler` into the Python logging
+ framework for the named loggers.
+
+ This will remove any existing handlers for those loggers and
+ drop their level to that specified on this :class:`LogCapture` in order
+ to capture all logging.
+ """
+ for name in self.names:
+ logger = logging.getLogger(name)
+ self.old['levels'][name] = logger.level
+ self.old['handlers'][name] = logger.handlers
+ self.old['disabled'][name] = logger.disabled
+ self.old['progagate'][name] = logger.propagate
+ logger.setLevel(self.level)
+ logger.handlers = [self]
+ logger.disabled = False
+ if self.propagate is not None:
+ logger.propagate = self.propagate
+ self.instances.add(self)
+ if not self.__class__.atexit_setup:
+ atexit.register(self.atexit)
+ self.__class__.atexit_setup = True
+
+ def uninstall(self):
+ """
+ Un-install this :class:`LogHandler` from the Python logging
+ framework for the named loggers.
+
+ This will re-instate any existing handlers for those loggers
+ that were removed during installation and retore their level
+ that prior to installation.
+ """
+ if self in self.instances:
+ for name in self.names:
+ logger = logging.getLogger(name)
+ logger.setLevel(self.old['levels'][name])
+ logger.handlers = self.old['handlers'][name]
+ logger.disabled = self.old['disabled'][name]
+ logger.propagate = self.old['progagate'][name]
+ self.instances.remove(self)
+
+ @classmethod
+ def uninstall_all(cls):
+ "This will uninstall all existing :class:`LogHandler` objects."
+ for i in tuple(cls.instances):
+ i.uninstall()
+
+ def _actual_row(self, record):
+ for a in self.attributes:
+ value = getattr(record, a, None)
+ if callable(value):
+ value = value()
+ yield value
+
+ def actual(self):
+ for r in self.records:
+ if callable(self.attributes):
+ yield self.attributes(r)
+ else:
+ result = tuple(self._actual_row(r))
+ if len(result) == 1:
+ yield result[0]
+ else:
+ yield result
+
+ def __str__(self):
+ if not self.records:
+ return 'No logging captured'
+ return '\n'.join(["%s %s\n %s" % r for r in self.actual()])
+
+ def check(self, *expected):
+ """
+ This will compare the captured entries with the expected
+ entries provided and raise an :class:`AssertionError` if they
+ do not match.
+
+ :param expected: A sequence of 3-tuples containing the
+ expected log entries. Each tuple should be of
+ the form (logger_name, string_level, message)
+ """
+ return compare(
+ expected,
+ actual=tuple(self.actual()),
+ recursive=self.recursive_check
+ )
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.uninstall()
+
+
+class LogCaptureForDecorator(LogCapture):
+
+ def install(self):
+ LogCapture.install(self)
+ self.clear()
+ return self
+
+
+def log_capture(*names, **kw):
+ """
+ A decorator for making a :class:`LogCapture` installed an
+ available for the duration of a test function.
+
+ :param names: An optional sequence of names specifying the loggers
+ to be captured. If not specified, the root logger
+ will be captured.
+
+ Keyword parameters other than ``install`` may also be supplied and will be
+ passed on to the :class:`LogCapture` constructor.
+ """
+ l = LogCaptureForDecorator(names or None, install=False, **kw)
+ return wrap(l.install, l.uninstall)
diff --git a/testfixtures/manuel.py b/testfixtures/manuel.py
new file mode 100644
index 0000000..0e65214
--- /dev/null
+++ b/testfixtures/manuel.py
@@ -0,0 +1,94 @@
+from __future__ import absolute_import
+
+# Copyright (c) 2010-2013 Simplistix Ltd
+#
+# See license.txt for more details.
+import os
+import re
+import textwrap
+
+from manuel import Manuel
+from testfixtures import diff
+
+FILEBLOCK_START = re.compile(r'^\.\.\s*topic::?\s*(.+)\b', re.MULTILINE)
+FILEBLOCK_END = re.compile(r'(\n\Z|\n(?=\S))')
+CLASS = re.compile(r'\s+:class:\s*(read|write)-file')
+
+
+class FileBlock(object):
+ def __init__(self, path, content, action):
+ self.path, self.content, self.action = path, content, action
+
+
+class FileResult(object):
+ passed = True
+ expected = None
+ actual = None
+
+
+class Files(Manuel):
+ """
+ A `Manuel <http://packages.python.org/manuel/>`__ plugin that
+ parses certain ReST sections to read and write files in the
+ configured :class:`TempDirectory`.
+
+ :param name: This is the name of the :class:`TempDirectory` to use
+ in the Manual global namespace (ie: `globs`).
+
+ """
+ def __init__(self, name):
+ self.name = name
+ Manuel.__init__(self,
+ parsers=[self.parse],
+ evaluaters=[self.evaluate],
+ formatters=[self.format])
+
+ def parse(self, document):
+ for region in document.find_regions(FILEBLOCK_START, FILEBLOCK_END):
+ lines = region.source.splitlines()
+ class_ = CLASS.match(lines[1])
+ if not class_:
+ continue
+ index = 3
+ if lines[index].strip() == '::':
+ index += 1
+ source = textwrap.dedent('\n'.join(lines[index:])).lstrip()
+ if source[-1] != '\n':
+ source += '\n'
+ region.parsed = FileBlock(
+ region.start_match.group(1),
+ source,
+ class_.group(1)
+ )
+ document.claim_region(region)
+
+ def evaluate(self, region, document, globs):
+ if not isinstance(region.parsed, FileBlock):
+ return
+ block = region.parsed
+ dir = globs[self.name]
+ result = region.evaluated = FileResult()
+ if block.action == 'read':
+ actual = dir.read(block.path, 'ascii').replace(os.linesep, '\n')
+ if actual != block.content:
+ result.passed = False
+ result.path = block.path
+ result.expected = block.content
+ result.actual = actual
+ if block.action == 'write':
+ dir.write(block.path, block.content, 'ascii')
+
+ def format(self, document):
+ for region in document:
+ result = region.evaluated
+ if not isinstance(result, FileResult):
+ continue
+ if not result.passed:
+ region.formatted = diff(
+ result.expected,
+ result.actual,
+ 'File "%s", line %i:' % (document.location, region.lineno),
+ 'Reading from "%s":' % result.path
+ )
+
+ return
diff --git a/testfixtures/outputcapture.py b/testfixtures/outputcapture.py
new file mode 100644
index 0000000..a70206f
--- /dev/null
+++ b/testfixtures/outputcapture.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# Copyright (c) 2015 Chris Withers
+# See license.txt for license details.
+
+import sys
+
+from testfixtures.comparison import compare
+from testfixtures.compat import StringIO
+
+
+class OutputCapture(object):
+ """
+ A context manager for capturing output to the
+ :attr:`sys.stdout` and :attr:`sys.stderr` streams.
+
+ :param separate: If ``True``, ``stdout`` and ``stderr`` will be captured
+ separately and their expected values must be passed to
+ :meth:`~OutputCapture.compare`.
+
+ .. note:: If ``separate`` is passed as ``True``,
+ :attr:`OutputCapture.captured` will be an empty string.
+ """
+
+ original_stdout = None
+ original_stderr = None
+
+ def __init__(self, separate=False):
+ self.separate = separate
+
+ def __enter__(self):
+ self.output = StringIO()
+ self.stdout = StringIO()
+ self.stderr = StringIO()
+ self.enable()
+ return self
+
+ def __exit__(self, *args):
+ self.disable()
+
+ def disable(self):
+ "Disable the output capture if it is enabled."
+ sys.stdout = self.original_stdout
+ sys.stderr = self.original_stderr
+
+ def enable(self):
+ "Enable the output capture if it is disabled."
+ if self.original_stdout is None:
+ self.original_stdout = sys.stdout
+ self.original_stderr = sys.stderr
+ if self.separate:
+ sys.stdout = self.stdout
+ sys.stderr = self.stderr
+ else:
+ sys.stdout = sys.stderr = self.output
+
+ @property
+ def captured(self):
+ "A property containing any output that has been captured so far."
+ return self.output.getvalue()
+
+ def compare(self, expected='', stdout='', stderr=''):
+ """
+ Compare the captured output to that expected. If the output is
+ not the same, an :class:`AssertionError` will be raised.
+
+ :param expected: A string containing the expected combined output
+ of ``stdout`` and ``stderr``.
+
+ :param stdout: A string containing the expected output to ``stdout``.
+
+ :param stderr: A string containing the expected output to ``stderr``.
+ """
+ for prefix, _expected, captured in (
+ (None, expected, self.captured),
+ ('stdout', stdout, self.stdout.getvalue()),
+ ('stderr', stderr, self.stderr.getvalue()),
+ ):
+ compare(_expected.strip(), actual=captured.strip(), prefix=prefix)
diff --git a/testfixtures/popen.py b/testfixtures/popen.py
new file mode 100644
index 0000000..baf7cbe
--- /dev/null
+++ b/testfixtures/popen.py
@@ -0,0 +1,141 @@
+# Copyright (c) 2015 Simplistix Ltd
+# See license.txt for license details.
+from mock import Mock
+from subprocess import Popen as Popen
+from tempfile import TemporaryFile
+from testfixtures.compat import basestring
+from testfixtures.utils import extend_docstring
+
+
+class MockPopen(object):
+ """
+ A specialised mock for testing use of :class:`subprocess.Popen`.
+ An instance of this class can be used in place of the
+ :class:`subprocess.Popen` and is often inserted where it's needed using
+ :func:`mock.patch` or a :class:`Replacer`.
+ """
+
+ default_command = None
+
+ def __init__(self):
+ self.commands = {}
+ self.mock = mock = Mock()
+ self.mock.Popen.side_effect = self.Popen
+ mock.Popen_instance = Mock(spec=Popen)
+ inst = mock.Popen.return_value = mock.Popen_instance
+ inst.communicate.side_effect = self.communicate
+ inst.wait.side_effect = self.wait
+ inst.send_signal.side_effect = self.send_signal
+ inst.terminate.side_effect = self.terminate
+ inst.kill.side_effect = self.kill
+ inst.poll.side_effect = self.poll
+
+ def set_command(self, command, stdout=b'', stderr=b'', returncode=0,
+ pid=1234, poll_count=3):
+ """
+ Set the behaviour of this mock when it is used to simulate the
+ specified command.
+
+ :param command: A string representing the command to be simulated.
+ """
+ self.commands[command] = (stdout, stderr, returncode, pid, poll_count)
+
+ def set_default(self, stdout=b'', stderr=b'', returncode=0,
+ pid=1234, poll_count=3):
+ """
+ Set the behaviour of this mock when it is used to simulate commands
+ that have no explicit behavior specified using
+ :meth:`~MockPopen.set_command`.
+ """
+ self.default_command = (stdout, stderr, returncode, pid, poll_count)
+
+ def __call__(self, *args, **kw):
+ return self.mock.Popen(*args, **kw)
+
+ def Popen(self, args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False, cwd=None,
+ env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+
+ if isinstance(args, basestring):
+ cmd = args
+ else:
+ cmd = ' '.join(args)
+
+ behaviour = self.commands.get(cmd, self.default_command)
+ if behaviour is None:
+ raise KeyError('Nothing specified for command %r' % cmd)
+
+ self.stdout, self.stderr, self.returncode, pid, poll = behaviour
+ self.poll_count = poll
+ for name in 'stdout', 'stderr':
+ f = TemporaryFile()
+ f.write(getattr(self, name))
+ f.flush()
+ f.seek(0)
+ setattr(self.mock.Popen_instance, name, f)
+
+ self.mock.Popen_instance.pid = pid
+ self.mock.Popen_instance.returncode = None
+
+ return self.mock.Popen_instance
+
+ def wait(self):
+ "Simulate calls to :meth:`subprocess.Popen.wait`"
+ self.mock.Popen_instance.returncode = self.returncode
+ return self.returncode
+
+ def communicate(self, input=None):
+ "Simulate calls to :meth:`subprocess.Popen.communicate`"
+ self.wait()
+ return self.stdout, self.stderr
+
+ def poll(self):
+ "Simulate calls to :meth:`subprocess.Popen.poll`"
+ while self.poll_count and self.mock.Popen_instance.returncode is None:
+ self.poll_count -= 1
+ return None
+ # This call to wait() is NOT how poll() behaves in reality.
+ # poll() NEVER sets the returncode.
+ # The returncode is *only* ever set by process completion.
+ # The following is an artifact of the fixture's implementation.
+ return self.wait()
+
+ # These are here to check parameter types
+ def send_signal(self, signal):
+ "Simulate calls to :meth:`subprocess.Popen.send_signal`"
+ pass
+
+ def terminate(self):
+ "Simulate calls to :meth:`subprocess.Popen.terminate`"
+ pass
+
+ def kill(self):
+ "Simulate calls to :meth:`subprocess.Popen.kill`"
+ pass
+
+
+set_command_params = """
+:param stdout:
+ A string representing the simulated content written by the process
+ to the stdout pipe.
+:param stderr:
+ A string representing the simulated content written by the process
+ to the stderr pipe.
+:param returncode:
+ An integer representing the return code of the simulated process.
+:param pid:
+ An integer representing the process identifier of the simulated
+ process. This is useful if you have code the prints out the pids
+ of running processes.
+:param poll_count:
+ Specifies the number of times :meth:`MockPopen.poll` can be
+ called before :attr:`MockPopen.returncode` is set and returned
+ by :meth:`MockPopen.poll`.
+"""
+
+
+# add the param docs, so we only have one copy of them!
+extend_docstring(set_command_params,
+ [MockPopen.set_command, MockPopen.set_default])
diff --git a/testfixtures/replace.py b/testfixtures/replace.py
new file mode 100644
index 0000000..bbf024f
--- /dev/null
+++ b/testfixtures/replace.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2008-2013 Simplistix Ltd, 2016 Chris Withers
+# See license.txt for license details.
+
+from functools import partial
+from testfixtures.compat import ClassType
+from testfixtures.resolve import resolve, not_there
+from testfixtures.utils import wrap, extend_docstring
+
+import warnings
+
+
+def not_same_descriptor(x, y, descriptor):
+ return isinstance(x, descriptor) and not isinstance(y, descriptor)
+
+
+class Replacer:
+ """
+ These are used to manage the mocking out of objects so that units
+ of code can be tested without having to rely on their normal
+ dependencies.
+ """
+
+ def __init__(self):
+ self.originals = {}
+
+ def _replace(self, container, name, method, value, strict=True):
+ if value is not_there:
+ if method == 'a':
+ delattr(container, name)
+ if method == 'i':
+ del container[name]
+ else:
+ if method == 'a':
+ setattr(container, name, value)
+ if method == 'i':
+ container[name] = value
+
+ def __call__(self, target, replacement, strict=True):
+ """
+ Replace the specified target with the supplied replacement.
+ """
+
+ container, method, attribute, t_obj = resolve(target)
+ if method is None:
+ raise ValueError('target must contain at least one dot!')
+ if t_obj is not_there and strict:
+ raise AttributeError('Original %r not found' % attribute)
+ if t_obj is not_there and replacement is not_there:
+ return not_there
+
+ replacement_to_use = replacement
+
+ if isinstance(container, (type, ClassType)):
+
+ if not_same_descriptor(t_obj, replacement, classmethod):
+ replacement_to_use = classmethod(replacement)
+
+ elif not_same_descriptor(t_obj, replacement, staticmethod):
+ replacement_to_use = staticmethod(replacement)
+
+ self._replace(container, attribute, method, replacement_to_use, strict)
+ if target not in self.originals:
+ self.originals[target] = t_obj
+ return replacement
+
+ def replace(self, target, replacement, strict=True):
+ """
+ Replace the specified target with the supplied replacement.
+ """
+ self(target, replacement, strict)
+
+ def restore(self):
+ """
+ Restore all the original objects that have been replaced by
+ calls to the :meth:`replace` method of this :class:`Replacer`.
+ """
+ for target, original in tuple(self.originals.items()):
+ container, method, attribute, found = resolve(target)
+ self._replace(container, attribute, method, original, strict=False)
+ del self.originals[target]
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.restore()
+
+ def __del__(self):
+ if self.originals:
+ # no idea why coverage misses the following statement
+ # it's covered by test_replace.TestReplace.test_replacer_del
+ warnings.warn( # pragma: no cover
+ 'Replacer deleted without being restored, '
+ 'originals left: %r' % self.originals
+ )
+
+
+def replace(target, replacement, strict=True):
+ """
+ A decorator to replace a target object for the duration of a test
+ function.
+ """
+ r = Replacer()
+ return wrap(partial(r.__call__, target, replacement, strict), r.restore)
+
+
+class Replace(object):
+ """
+ A context manager that uses a :class:`Replacer` to replace a single target.
+ """
+
+ def __init__(self, target, replacement, strict=True):
+ self.target = target
+ self.replacement = replacement
+ self.strict = strict
+ self._replacer = Replacer()
+
+ def __enter__(self):
+ return self._replacer(self.target, self.replacement, self.strict)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._replacer.restore()
+
+replace_params_doc = """
+:param target: A string containing the dotted-path to the
+ object to be replaced. This path may specify a
+ module in a package, an attribute of a module,
+ or any attribute of something contained within
+ a module.
+
+:param replacement: The object to use as a replacement.
+
+:param strict: When `True`, an exception will be raised if an
+ attempt is made to replace an object that does
+ not exist.
+"""
+
+# add the param docs, so we only have one copy of them!
+extend_docstring(replace_params_doc,
+ [Replacer.__call__, Replacer.replace, replace, Replace])
diff --git a/testfixtures/resolve.py b/testfixtures/resolve.py
new file mode 100644
index 0000000..ed48385
--- /dev/null
+++ b/testfixtures/resolve.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from testfixtures import not_there
+
+
+def resolve(dotted_name):
+ names = dotted_name.split('.')
+ used = names.pop(0)
+ found = __import__(used)
+ container = found
+ method = None
+ n = None
+ for n in names:
+ container = found
+ used += '.' + n
+ try:
+ found = found.__dict__[n]
+ method = 'a'
+ except (AttributeError, KeyError):
+ try:
+ found = getattr(found, n)
+ method = 'a' # pragma: no branch
+ except AttributeError:
+ try:
+ __import__(used)
+ except ImportError:
+ method = 'i'
+ try:
+ found = found[n] # pragma: no branch
+ except KeyError:
+ found = not_there # pragma: no branch
+ except TypeError:
+ try:
+ n = int(n)
+ except ValueError:
+ method = 'a'
+ found = not_there
+ else:
+ found = found[n] # pragma: no branch
+ else:
+ found = getattr(found, n)
+ method = 'a' # pragma: no branch
+ return container, method, n, found
diff --git a/testfixtures/rmtree.py b/testfixtures/rmtree.py
new file mode 100644
index 0000000..321ff6e
--- /dev/null
+++ b/testfixtures/rmtree.py
@@ -0,0 +1,66 @@
+# lamosity needed to make things reliable on Windows :-(
+# (borrowed from Python's test_support.py)
+import errno
+import os
+import shutil
+import sys
+import time
+import warnings
+
+if sys.platform.startswith("win"): # pragma: no cover
+ def _waitfor(func, pathname, waitall=False):
+ # Perform the operation
+ func(pathname)
+ # Now setup the wait loop
+ if waitall:
+ dirname = pathname
+ else:
+ dirname, name = os.path.split(pathname)
+ dirname = dirname or '.'
+ # Check for `pathname` to be removed from the filesystem.
+ # The exponential backoff of the timeout amounts to a total
+ # of ~1 second after which the deletion is probably an error
+ # anyway.
+ # Testing on a i7@4.3GHz shows that usually only 1 iteration is
+ # required when contention occurs.
+ timeout = 0.001
+ while timeout < 1.0: # pragma: no branch
+ # Note we are only testing for the existence of the file(s) in
+ # the contents of the directory regardless of any security or
+ # access rights. If we have made it this far, we have sufficient
+ # permissions to do that much using Python's equivalent of the
+ # Windows API FindFirstFile.
+ # Other Windows APIs can fail or give incorrect results when
+ # dealing with files that are pending deletion.
+ L = os.listdir(dirname)
+ if not (L if waitall else name in L): # pragma: no branch
+ return
+ # Increase the timeout and try again
+ time.sleep(timeout) # pragma: no cover
+ timeout *= 2 # pragma: no cover
+ warnings.warn('tests may fail, delete still pending for '
+ + pathname, # pragma: no cover
+ RuntimeWarning, stacklevel=4)
+
+ def _rmtree(path):
+ def _rmtree_inner(path):
+ for name in os.listdir(path):
+ fullname = os.path.join(path, name)
+ if os.path.isdir(fullname):
+ _waitfor(_rmtree_inner, fullname, waitall=True)
+ os.rmdir(fullname)
+ else:
+ os.unlink(fullname)
+ _waitfor(_rmtree_inner, path, waitall=True)
+ _waitfor(os.rmdir, path)
+else:
+ _rmtree = shutil.rmtree
+
+
+def rmtree(path):
+ try:
+ _rmtree(path)
+ except OSError as e: # pragma: no cover
+ # Unix returns ENOENT, Windows returns ESRCH.
+ if e.errno not in (errno.ENOENT, errno.ESRCH): # pragma: no branch
+ raise
diff --git a/testfixtures/shouldraise.py b/testfixtures/shouldraise.py
new file mode 100644
index 0000000..3baefe1
--- /dev/null
+++ b/testfixtures/shouldraise.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+from functools import wraps
+from testfixtures import Comparison
+
+param_docs = """
+
+ :param exception: This can be one of the following:
+
+ * `None`, indicating that an exception must be
+ raised, but the type is unimportant.
+
+ * An exception class, indicating that the type
+ of the exception is important but not the
+ parameters it is created with.
+
+ * An exception instance, indicating that an
+ exception exactly matching the one supplied
+ should be raised.
+
+ :param unless: Can be passed a boolean that, when ``True`` indicates that
+ no exception is expected. This is useful when checking
+ that exceptions are only raised on certain versions of
+ Python.
+"""
+
+
+class ShouldRaise(object):
+ __doc__ = """
+ This context manager is used to assert that an exception is raised
+ within the context it is managing.
+ """ + param_docs
+
+ #: The exception captured by the context manager.
+ #: Can be used to inspect specific attributes of the exception.
+ raised = None
+
+ def __init__(self, exception=None, unless=False):
+ self.exception = exception
+ self.expected = not unless
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, actual, traceback):
+ # bug in python :-(
+ if type is not None and not isinstance(actual, type):
+ # fixed in 2.7 onwards!
+ actual = type(actual) # pragma: no cover
+
+ self.raised = actual
+
+ if self.expected:
+ if self.exception:
+ comparison = Comparison(self.exception)
+ if comparison != actual:
+ repr_actual = repr(actual)
+ repr_expected = repr(self.exception)
+ message = '%s raised, %s expected' % (
+ repr_actual, repr_expected
+ )
+ if repr_actual == repr_expected:
+ print(str(comparison).split('\n'))
+ extra = [', attributes differ:']
+ extra.extend(str(comparison).split('\n')[2:-1])
+ message += '\n'.join(extra)
+ raise AssertionError(message)
+
+ elif not actual:
+ raise AssertionError('No exception raised!')
+ elif actual:
+ raise AssertionError('%r raised, no exception expected' % actual)
+
+ return True
+
+
+class should_raise:
+ __doc__ = """
+ A decorator to assert that the decorated function will raised
+ an exception. An exception class or exception instance may be
+ passed to check more specifically exactly what exception will be
+ raised.
+ """ + param_docs
+
+ def __init__(self, exception=None, unless=None):
+ self.exception = exception
+ self.unless = unless
+
+ def __call__(self, target):
+
+ @wraps(target)
+ def _should_raise_wrapper(*args, **kw):
+ with ShouldRaise(self.exception, self.unless):
+ target(*args, **kw)
+
+ return _should_raise_wrapper
diff --git a/testfixtures/shouldwarn.py b/testfixtures/shouldwarn.py
new file mode 100644
index 0000000..d50366f
--- /dev/null
+++ b/testfixtures/shouldwarn.py
@@ -0,0 +1,55 @@
+import warnings
+
+from testfixtures import Comparison as C, compare
+
+
+class ShouldWarn(warnings.catch_warnings):
+ """
+ This context manager is used to assert that warnings are issued
+ within the context it is managing.
+
+ :param expected: This should be a sequence made up of one or more elements,
+ each of one of the following types:
+
+ * A warning class, indicating that the type
+ of the warnings is important but not the
+ parameters it is created with.
+
+ * A warning instance, indicating that a
+ warning exactly matching the one supplied
+ should have been issued.
+
+ If no expected warnings are passed, you will need to inspect
+ the contents of the list returned by the context manager.
+ """
+
+ _empty_okay = False
+
+ def __init__(self, *expected):
+ super(ShouldWarn, self).__init__(record=True)
+ self.expected = [C(e) for e in expected]
+
+ def __enter__(self):
+ self.recorded = super(ShouldWarn, self).__enter__()
+ warnings.simplefilter("always")
+ return self.recorded
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ super(ShouldWarn, self).__exit__(exc_type, exc_val, exc_tb)
+ if not self.recorded and self._empty_okay:
+ return
+ if not self.expected and self.recorded and not self._empty_okay:
+ return
+ compare(self.expected, actual=[wm.message for wm in self.recorded])
+
+
+class ShouldNotWarn(ShouldWarn):
+ """
+ This context manager is used to assert that no warnings are issued
+ within the context it is managing.
+ """
+
+ _empty_okay = True
+
+ def __init__(self):
+ super(ShouldNotWarn, self).__init__()
diff --git a/testfixtures/tdatetime.py b/testfixtures/tdatetime.py
new file mode 100644
index 0000000..7ec145f
--- /dev/null
+++ b/testfixtures/tdatetime.py
@@ -0,0 +1,223 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from calendar import timegm
+from datetime import datetime, timedelta, date
+from testfixtures.compat import new_class
+
+
+@classmethod
+def add(cls, *args, **kw):
+ if 'tzinfo' in kw or len(args) > 7:
+ raise TypeError('Cannot add tzinfo to %s' % cls.__name__)
+ if args and isinstance(args[0], cls.__bases__[0]):
+ inst = args[0]
+ if getattr(inst, 'tzinfo', None):
+ raise ValueError(
+ 'Cannot add %s with tzinfo set' % inst.__class__.__name__
+ )
+ if cls._ct:
+ inst = cls._ct(inst)
+ cls._q.append(inst)
+ else:
+ cls._q.append(cls(*args, **kw))
+
+
+@classmethod
+def set_(cls, *args, **kw):
+ if 'tzinfo' in kw or len(args) > 7:
+ raise TypeError('Cannot set tzinfo on %s' % cls.__name__)
+ if args and isinstance(args[0], cls.__bases__[0]):
+ inst = args[0]
+ if getattr(inst, 'tzinfo', None):
+ raise ValueError(
+ 'Cannot set %s with tzinfo set' % inst.__class__.__name__
+ )
+ if cls._q:
+ cls._q = []
+ cls.add(*args, **kw)
+
+
+def __add__(self, other):
+ r = super(self.__class__, self).__add__(other)
+ if self._ct:
+ r = self._ct(r)
+ return r
+
+
+def __new__(cls, *args, **kw):
+ if cls is cls._cls:
+ return super(cls, cls).__new__(cls, *args, **kw)
+ else:
+ return cls._cls(*args, **kw)
+
+
+@classmethod
+def instantiate(cls):
+ r = cls._q.pop(0)
+ if not cls._q:
+ cls._gap += cls._gap_d
+ n = r + timedelta(**{cls._gap_t: cls._gap})
+ if cls._ct:
+ n = cls._ct(n)
+ cls._q.append(n)
+ return r
+
+
+@classmethod
+def now(cls, tz=None):
+ r = cls._instantiate()
+ if tz is not None:
+ if cls._tzta:
+ r = r - cls._tzta.utcoffset(r)
+ r = tz.fromutc(r.replace(tzinfo=tz))
+ return cls._ct(r)
+
+
+@classmethod
+def utcnow(cls):
+ r = cls._instantiate()
+ if cls._tzta is not None:
+ r = r - cls._tzta.utcoffset(r)
+ return r
+
+
+def test_factory(n, type, default, args, kw, tz=None, **to_patch):
+ q = []
+ to_patch['_q'] = q
+ to_patch['_tzta'] = tz
+ to_patch['add'] = add
+ to_patch['set'] = set_
+ to_patch['__add__'] = __add__
+ if '__new__' not in to_patch:
+ to_patch['__new__'] = __new__
+ class_ = new_class(n, (type, ), to_patch)
+ strict = kw.pop('strict', False)
+ if strict:
+ class_._cls = class_
+ else:
+ class_._cls = type
+ if args == (None, ):
+ pass
+ elif args or kw:
+ q.append(class_(*args, **kw))
+ else:
+ q.append(class_(*default))
+ return class_
+
+
+def correct_date_method(self):
+ return self._date_type(
+ self.year,
+ self.month,
+ self.day
+ )
+
+
+@classmethod
+def correct_datetime(cls, dt):
+ return cls._cls(
+ dt.year,
+ dt.month,
+ dt.day,
+ dt.hour,
+ dt.minute,
+ dt.second,
+ dt.microsecond,
+ dt.tzinfo,
+ )
+
+
+def test_datetime(*args, **kw):
+ tz = None
+ if len(args) > 7:
+ tz = args[7]
+ args = args[:7]
+ else:
+ tz = kw.pop('tzinfo', None)
+ if 'delta' in kw:
+ gap = kw.pop('delta')
+ gap_delta = 0
+ else:
+ gap = 0
+ gap_delta = 10
+ delta_type = kw.pop('delta_type', 'seconds')
+ date_type = kw.pop('date_type', date)
+ return test_factory(
+ 'tdatetime', datetime, (2001, 1, 1, 0, 0, 0), args, kw, tz,
+ _ct=correct_datetime,
+ _instantiate=instantiate,
+ now=now,
+ utcnow=utcnow,
+ _gap=gap,
+ _gap_d=gap_delta,
+ _gap_t=delta_type,
+ date=correct_date_method,
+ _date_type=date_type,
+ )
+
+test_datetime.__test__ = False
+
+
+@classmethod
+def correct_date(cls, d):
+ return cls._cls(
+ d.year,
+ d.month,
+ d.day,
+ )
+
+
+def test_date(*args, **kw):
+ if 'delta' in kw:
+ gap = kw.pop('delta')
+ gap_delta = 0
+ else:
+ gap = 0
+ gap_delta = 1
+ delta_type = kw.pop('delta_type', 'days')
+ return test_factory(
+ 'tdate', date, (2001, 1, 1), args, kw,
+ _ct=correct_date,
+ today=instantiate,
+ _gap=gap,
+ _gap_d=gap_delta,
+ _gap_t=delta_type,
+ )
+
+ms = 10**6
+
+
+def __time_new__(cls, *args, **kw):
+ if args or kw:
+ return super(cls, cls).__new__(cls, *args, **kw)
+ else:
+ val = cls.instantiate()
+ t = timegm(val.utctimetuple())
+ t += (float(val.microsecond)/ms)
+ return t
+
+test_date.__test__ = False
+
+
+def test_time(*args, **kw):
+ if 'tzinfo' in kw or len(args) > 7:
+ raise TypeError("You don't want to use tzinfo with test_time")
+ if 'delta' in kw:
+ gap = kw.pop('delta')
+ gap_delta = 0
+ else:
+ gap = 0
+ gap_delta = 1
+ delta_type = kw.pop('delta_type', 'seconds')
+ return test_factory(
+ 'ttime', datetime, (2001, 1, 1, 0, 0, 0), args, kw,
+ _ct=None,
+ instantiate=instantiate,
+ _gap=gap,
+ _gap_d=gap_delta,
+ _gap_t=delta_type,
+ __new__=__time_new__,
+ )
+
+test_time.__test__ = False
diff --git a/testfixtures/tempdirectory.py b/testfixtures/tempdirectory.py
new file mode 100644
index 0000000..7b7602a
--- /dev/null
+++ b/testfixtures/tempdirectory.py
@@ -0,0 +1,401 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+import atexit
+import os
+import warnings
+
+from re import compile
+from tempfile import mkdtemp
+from testfixtures.comparison import compare
+from testfixtures.compat import basestring
+from testfixtures.utils import wrap
+
+from .rmtree import rmtree
+
+
+class TempDirectory:
+ """
+ A class representing a temporary directory on disk.
+
+ :param ignore: A sequence of strings containing regular expression
+ patterns that match filenames that should be
+ ignored by the :class:`TempDirectory` listing and
+ checking methods.
+
+ :param create: If `True`, the temporary directory will be created
+ as part of class instantiation.
+
+ :param path: If passed, this should be a string containing a
+ physical path to use as the temporary directory. When
+ passed, :class:`TempDirectory` will not create a new
+ directory to use.
+
+ :param encoding: A default encoding to use for :meth:`read` and
+ :meth:`write` operations when the ``encoding`` parameter
+ is not passed to those methods.
+ """
+
+ instances = set()
+ atexit_setup = False
+
+ #: The physical path of the :class:`TempDirectory` on disk
+ path = None
+
+ def __init__(self, ignore=(), create=True, path=None, encoding=None):
+ self.ignore = []
+ for regex in ignore:
+ self.ignore.append(compile(regex))
+ self.path = path
+ self.encoding = encoding
+ self.dont_remove = bool(path)
+ if create:
+ self.create()
+
+ @classmethod
+ def atexit(cls):
+ if cls.instances:
+ warnings.warn(
+ 'TempDirectory instances not cleaned up by shutdown:\n'
+ '%s' % ('\n'.join(i.path for i in cls.instances))
+ )
+
+ def create(self):
+ """
+ Create a temporary directory for this instance to use if one
+ has not already been created.
+ """
+ if self.path:
+ return self
+ self.path = mkdtemp()
+ self.instances.add(self)
+ if not self.__class__.atexit_setup:
+ atexit.register(self.atexit)
+ self.__class__.atexit_setup = True
+ return self
+
+ def cleanup(self):
+ """
+ Delete the temporary directory and anything in it.
+ This :class:`TempDirectory` cannot be used again unless
+ :meth:`create` is called.
+ """
+ if self.path and os.path.exists(self.path) and not self.dont_remove:
+ rmtree(self.path)
+ del self.path
+ if self in self.instances:
+ self.instances.remove(self)
+
+ @classmethod
+ def cleanup_all(cls):
+ """
+ Delete all temporary directories associated with all
+ :class:`TempDirectory` objects.
+ """
+ for i in tuple(cls.instances):
+ i.cleanup()
+
+ def actual(self,
+ path=None, recursive=False, files_only=False, followlinks=False):
+ path = self._join(path) if path else self.path
+
+ result = []
+ if recursive:
+ for dirpath, dirnames, filenames in os.walk(
+ path, followlinks=followlinks
+ ):
+ dirpath = '/'.join(dirpath[len(path)+1:].split(os.sep))
+ if dirpath:
+ dirpath += '/'
+
+ for dirname in dirnames:
+ if not files_only:
+ result.append(dirpath+dirname+'/')
+
+ for name in sorted(filenames):
+ result.append(dirpath+name)
+ else:
+ for n in os.listdir(path):
+ result.append(n)
+
+ filtered = []
+ for path in sorted(result):
+ ignore = False
+ for regex in self.ignore:
+ if regex.search(path):
+ ignore = True
+ break
+ if ignore:
+ continue
+ filtered.append(path)
+ return filtered
+
+ def listdir(self, path=None, recursive=False):
+ """
+ Print the contents of the specified directory.
+
+ :param path: The path to list, which can be:
+
+ * `None`, indicating the root of the temporary
+ directory should be listed.
+
+ * A tuple of strings, indicating that the
+ elements of the tuple should be used as directory
+ names to traverse from the root of the
+ temporary directory to find the directory to be
+ listed.
+
+ * A forward-slash separated string, indicating
+ the directory or subdirectory that should be
+ traversed to from the temporary directory and
+ listed.
+
+ :param recursive: If `True`, the directory specified will have
+ its subdirectories recursively listed too.
+ """
+ actual = self.actual(path, recursive)
+ if not actual:
+ print('No files or directories found.')
+ for n in actual:
+ print(n)
+
+ def compare(self, expected, path=None, files_only=False, recursive=True,
+ followlinks=False):
+ """
+ Compare the expected contents with the actual contents of the temporary
+ directory. An :class:`AssertionError` will be raised if they are not the
+ same.
+
+ :param expected: A sequence of strings containing the paths
+ expected in the directory. These paths should
+ be forward-slash separated and relative to
+ the root of the temporary directory.
+
+ :param path: The path to use as the root for the comparison,
+ relative to the root of the temporary directory.
+ This can either be:
+
+ * A tuple of strings, making up the relative path.
+
+ * A forward-slash separated string.
+
+ If it is not provided, the root of the temporary
+ directory will be used.
+
+ :param files_only: If specified, directories will be excluded from
+ the list of actual paths used in the comparison.
+
+ :param recursive: If passed as ``False``, only the direct contents of
+ the directory specified by ``path`` will be included
+ in the actual contents used for comparison.
+
+ :param followlinks: If passed as ``True``, symlinks and hard links
+ will be followed when recursively building up
+ the actual list of directory contents.
+ """
+ compare(expected=sorted(expected),
+ actual=tuple(self.actual(
+ path, recursive, files_only, followlinks
+ )),
+ recursive=False)
+
+ def check(self, *expected):
+ """
+ .. deprecated:: 4.3.0
+
+ Compare the contents of the temporary directory with the
+ expected contents supplied.
+
+ This method only checks the root of the temporary directory.
+
+ :param expected: A sequence of strings containing the names
+ expected in the directory.
+ """
+ compare(expected, tuple(self.actual()), recursive=False)
+
+ def check_dir(self, dir, *expected):
+ """
+ .. deprecated:: 4.3.0
+
+ Compare the contents of the specified subdirectory of the
+ temporary directory with the expected contents supplied.
+
+ This method will only check the contents of the subdirectory
+ specified and will not recursively check subdirectories.
+
+ :param dir: The subdirectory to check, which can be:
+
+ * A tuple of strings, indicating that the
+ elements of the tuple should be used as directory
+ names to traverse from the root of the
+ temporary directory to find the directory to be
+ checked.
+
+ * A forward-slash separated string, indicating
+ the directory or subdirectory that should be
+ traversed to from the temporary directory and
+ checked.
+
+ :param expected: A sequence of strings containing the names
+ expected in the directory.
+ """
+ compare(expected, tuple(self.actual(dir)), recursive=False)
+
+ def check_all(self, dir, *expected):
+ """
+ .. deprecated:: 4.3.0
+
+ Recursively compare the contents of the specified directory
+ with the expected contents supplied.
+
+ :param dir: The directory to check, which can be:
+
+ * A tuple of strings, indicating that the
+ elements of the tuple should be used as directory
+ names to traverse from the root of the
+ temporary directory to find the directory to be
+ checked.
+
+ * A forward-slash separated string, indicating
+ the directory or subdirectory that should be
+ traversed to from the temporary directory and
+ checked.
+
+ * An empty string, indicating that the whole
+ temporary directory should be checked.
+
+ :param expected: A sequence of strings containing the paths
+ expected in the directory. These paths should
+ be forward-slash separated and relative to
+ the root of the temporary directory.
+ """
+ compare(expected, tuple(self.actual(dir, recursive=True)),
+ recursive=False)
+
+ def _join(self, name):
+ # make things platform independent
+ if isinstance(name, basestring):
+ name = name.split('/')
+ relative = os.sep.join(name).rstrip(os.sep)
+ if relative.startswith(os.sep):
+ if relative.startswith(self.path):
+ return relative
+ raise ValueError(
+ 'Attempt to read or write outside the temporary Directory'
+ )
+ return os.path.join(self.path, relative)
+
+ def makedir(self, dirpath):
+ """
+ Make an empty directory at the specified path within the
+ temporary directory. Any intermediate subdirectories that do
+ not exist will also be created.
+
+ :param dirpath: The directory to create, which can be:
+
+ * A tuple of strings.
+
+ * A forward-slash separated string.
+
+ :returns: The full path of the created directory.
+ """
+ thepath = self._join(dirpath)
+ os.makedirs(thepath)
+ return thepath
+
+ def write(self, filepath, data, encoding=None):
+ """
+ Write the supplied data to a file at the specified path within
+ the temporary directory. Any subdirectories specified that do
+ not exist will also be created.
+
+ The file will always be written in binary mode. The data supplied must
+ either be bytes or an encoding must be supplied to convert the string
+ into bytes.
+
+ :param filepath: The path to the file to create, which can be:
+
+ * A tuple of strings.
+
+ * A forward-slash separated string.
+
+ :param data: A string containing the data to be written.
+
+ :param encoding: The encoding to be used if data is not bytes. Should
+ not be passed if data is already bytes.
+
+ :returns: The full path of the file written.
+ """
+ if isinstance(filepath, basestring):
+ filepath = filepath.split('/')
+ if len(filepath) > 1:
+ dirpath = self._join(filepath[:-1])
+ if not os.path.exists(dirpath):
+ os.makedirs(dirpath)
+ thepath = self._join(filepath)
+ encoding = encoding or self.encoding
+ if encoding is not None:
+ data = data.encode(encoding)
+ with open(thepath, 'wb') as f:
+ f.write(data)
+ return thepath
+
+ def getpath(self, path):
+ """
+ Return the full path on disk that corresponds to the path
+ relative to the temporary directory that is passed in.
+
+ :param path: The path to the file to create, which can be:
+
+ * A tuple of strings.
+
+ * A forward-slash separated string.
+
+ :returns: A string containing the full path.
+ """
+ return self._join(path)
+
+ def read(self, filepath, encoding=None):
+ """
+ Reads the file at the specified path within the temporary
+ directory.
+
+ The file is always read in binary mode. Bytes will be returned unless
+ an encoding is supplied, in which case a unicode string of the decoded
+ data will be returned.
+
+ :param filepath: The path to the file to read, which can be:
+
+ * A tuple of strings.
+
+ * A forward-slash separated string.
+
+ :param encoding: The encoding used to decode the data in the file.
+
+ :returns: A string containing the data read.
+ """
+ with open(self._join(filepath), 'rb') as f:
+ data = f.read()
+ encoding = encoding or self.encoding
+ if encoding is not None:
+ return data.decode(encoding)
+ return data
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.cleanup()
+
+
+def tempdir(*args, **kw):
+ """
+ A decorator for making a :class:`TempDirectory` available for the
+ duration of a test function.
+
+ All arguments and parameters are passed through to the
+ :class:`TempDirectory` constructor.
+ """
+ kw['create'] = False
+ l = TempDirectory(*args, **kw)
+ return wrap(l.create, l.cleanup)
diff --git a/testfixtures/tests/__init__.py b/testfixtures/tests/__init__.py
new file mode 100644
index 0000000..dd18dc0
--- /dev/null
+++ b/testfixtures/tests/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) 2008 Simplistix Ltd
+# See license.txt for license details.
+import warnings
+warnings.simplefilter('default', ImportWarning)
diff --git a/testfixtures/tests/compat.py b/testfixtures/tests/compat.py
new file mode 100644
index 0000000..112785f
--- /dev/null
+++ b/testfixtures/tests/compat.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2011-2013 Simplistix Ltd, 2015 Chris Withers
+# See license.txt for license details.
+
+# This module contains bits and pieces to achieve compatibility across all the
+# versions of python supported.
+
+import doctest
+import manuel
+import re
+import sys
+import textwrap
+
+from manuel.codeblock import (
+ CODEBLOCK_START,
+ CODEBLOCK_END,
+ CodeBlock,
+ execute_code_block,
+ )
+
+# version markers
+
+from ..compat import PY2 as py_2
+py_33_plus = sys.version_info[:2] >= (3, 3)
+py_34_plus = sys.version_info[:2] >= (3, 4)
+py_35_plus = sys.version_info[:2] >= (3, 5)
+
+# Python 2.7 compatibility stuff
+
+BYTE_LITERALS = re.compile("^b('.*')$", re.MULTILINE)
+
+
+def find_code_blocks(document):
+ for region in document.find_regions(CODEBLOCK_START, CODEBLOCK_END):
+ start_end = CODEBLOCK_START.search(region.source).end()
+ source = textwrap.dedent(region.source[start_end:])
+ if py_2:
+ source = BYTE_LITERALS.sub('\\1', source)
+ source = 'from __future__ import print_function\n' + source
+ source_location = '%s:%d' % (document.location, region.lineno)
+ code = compile(source, source_location, 'exec', 0, True)
+ document.claim_region(region)
+ region.parsed = CodeBlock(code, source)
+
+
+class Manuel(manuel.Manuel):
+ def __init__(self):
+ manuel.Manuel.__init__(self, [find_code_blocks], [execute_code_block])
+
+
+if py_2:
+ class DocTestChecker(doctest.OutputChecker):
+ def check_output(self, want, got, optionflags):
+ want = BYTE_LITERALS.sub('\\1', want)
+ return doctest.OutputChecker.check_output(
+ self, want, got, optionflags
+ )
+else:
+ DocTestChecker = doctest.OutputChecker
diff --git a/testfixtures/tests/configparser-read.txt b/testfixtures/tests/configparser-read.txt
new file mode 100644
index 0000000..d55378a
--- /dev/null
+++ b/testfixtures/tests/configparser-read.txt
@@ -0,0 +1,45 @@
+Here's an example configuration file:
+
+.. topic:: example.cfg
+ :class: write-file
+
+ ::
+
+ [A Section]
+ dir=frob
+ long: this value continues
+ on the next line
+
+.. invisible-code-block: python
+
+ from testfixtures.compat import PY3
+ # change to the temp directory
+ import os
+ original_dir = os.getcwd()
+ os.chdir(tempdir.path)
+
+To parse this file using the :mod:`ConfigParser` module, you would
+do the following:
+
+.. code-block:: python
+
+ if PY3:
+ from configparser import ConfigParser
+ else:
+ from ConfigParser import ConfigParser
+ config = ConfigParser()
+ config.read('example.cfg')
+
+The items in the section are now available as follows:
+
+>>> for name, value in sorted(config.items('A Section')):
+... print('{0!r}:{1!r}'.format(name, value))
+'dir':'frob'
+'long':'this value continues\non the next line'
+
+.. invisible-code-block: python
+
+ # change out again
+ import os
+ os.chdir(original_dir)
+
diff --git a/testfixtures/tests/configparser-write.txt b/testfixtures/tests/configparser-write.txt
new file mode 100644
index 0000000..9f871ff
--- /dev/null
+++ b/testfixtures/tests/configparser-write.txt
@@ -0,0 +1,43 @@
+.. invisible-code-block: python
+
+ from testfixtures.compat import PY3
+ # change to the temp directory
+ import os
+ original_dir = os.getcwd()
+ os.chdir(tempdir.path)
+
+To construct a configuration file using the :mod:`ConfigParser`
+module, you would do the following:
+
+.. code-block:: python
+
+ if PY3:
+ from configparser import ConfigParser
+ else:
+ from ConfigParser import ConfigParser
+ config = ConfigParser()
+ config.add_section('A Section')
+ config.set('A Section', 'dir', 'frob')
+ f = open('example.cfg','w')
+ config.write(f)
+ f.close()
+
+The generated configuration file will be as follows:
+
+.. topic:: example.cfg
+ :class: read-file
+
+ ::
+
+ [A Section]
+ dir = frob
+
+
+.. config parser writes whitespace at the end, be careful when testing!
+
+.. invisible-code-block: python
+
+ # change out again
+ import os
+ os.chdir(original_dir)
+
diff --git a/testfixtures/tests/directory-contents.txt b/testfixtures/tests/directory-contents.txt
new file mode 100644
index 0000000..73599fd
--- /dev/null
+++ b/testfixtures/tests/directory-contents.txt
@@ -0,0 +1,32 @@
+Here's an example piece of code that creates some files and
+directories:
+
+.. code-block:: python
+
+ import os
+
+ def spew(path):
+ with open(os.path.join(path, 'root.txt'), 'wb') as f:
+ f.write(b'root output')
+ os.mkdir(os.path.join(path, 'subdir'))
+ with open(os.path.join(path, 'subdir', 'file.txt'), 'wb') as f:
+ f.write(b'subdir output')
+ os.mkdir(os.path.join(path, 'subdir', 'logs'))
+
+This function is used as follows:
+
+>>> spew(tempdir.path)
+
+This will create the following files and directories::
+
+ root.txt
+ subdir/
+ subdir/file.txt
+ subdir/logs/
+
+.. -> expected_listing
+
+.. invisible-code-block: python
+
+ # check the listing was as expected
+ tempdir.compare(expected_listing.strip().split('\n'))
diff --git a/testfixtures/tests/sample1.py b/testfixtures/tests/sample1.py
new file mode 100644
index 0000000..e0d95cc
--- /dev/null
+++ b/testfixtures/tests/sample1.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2008 Simplistix Ltd
+# See license.txt for license details.
+"""
+A sample module containing the kind of code that
+TestFixtures helps with testing
+"""
+
+from datetime import datetime, date
+
+
+def str_now_1():
+ return str(datetime.now())
+
+now = datetime.now
+
+
+def str_now_2():
+ return str(now())
+
+
+def str_today_1():
+ return str(date.today())
+
+today = date.today
+
+
+def str_today_2():
+ return str(today())
+
+from time import time
+
+
+def str_time():
+ return str(time())
+
+
+class X:
+
+ def y(self):
+ return "original y"
+
+ @classmethod
+ def aMethod(cls):
+ return cls
+
+ @staticmethod
+ def bMethod():
+ return 2
+
+
+def z():
+ return "original z"
+
+
+class TestClassA:
+ def __init__(self, *args):
+ self.args = args
+
+
+class TestClassB(TestClassA):
+ pass
+
+
+def a_function():
+ return (TestClassA(1), TestClassB(2), TestClassA(3))
+
+someDict = dict(
+ key='value',
+ complex_key=[1, 2, 3],
+ )
diff --git a/testfixtures/tests/sample2.py b/testfixtures/tests/sample2.py
new file mode 100644
index 0000000..6713a62
--- /dev/null
+++ b/testfixtures/tests/sample2.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+"""
+A sample module containing the kind of code that
+TestFixtures helps with testing
+"""
+
+from testfixtures.tests.sample1 import X, z
+
+try:
+ from guppy import hpy
+ guppy = True
+except ImportError:
+ guppy = False
+
+
+def dump(path):
+ if guppy:
+ hpy().heap().stat.dump(path)
diff --git a/testfixtures/tests/test_compare.py b/testfixtures/tests/test_compare.py
new file mode 100644
index 0000000..0ea7e42
--- /dev/null
+++ b/testfixtures/tests/test_compare.py
@@ -0,0 +1,1372 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+from datetime import date
+
+from functools import partial
+
+from collections import namedtuple
+from mock import Mock, call
+from re import compile
+from testfixtures import (
+ Comparison as C,
+ Replacer,
+ ShouldRaise,
+ compare,
+ generator,
+ singleton,
+ )
+from testfixtures.compat import (
+ class_type_name, exception_module, PY3, xrange,
+ BytesLiteral, UnicodeLiteral
+ )
+from testfixtures.comparison import compare_sequence
+from unittest import TestCase
+from .compat import py_33_plus, py_2
+
+hexaddr = compile('0x[0-9A-Fa-f]+')
+
+
+def hexsub(raw):
+ return hexaddr.sub('...', raw)
+
+call_list_repr = repr(Mock().mock_calls.__class__)
+
+marker = object()
+
+
+class TestCompare(TestCase):
+
+ def checkRaises(self, x=marker, y=marker, message=None, regex=None, **kw):
+ args = []
+ for value in x, y:
+ if value is not marker:
+ args.append(value)
+ try:
+ compare(*args, **kw)
+ except Exception as e:
+ if not isinstance(e, AssertionError):
+ self.fail('Expected AssertionError, got %r' % e)
+ actual = hexsub(e.args[0])
+ if message is not None:
+ # handy for debugging, but can't be relied on for tests!
+ # compare(actual, message, show_whitespace=True)
+ self.assertEqual(actual, message)
+ else:
+ if not regex.match(actual): # pragma: no cover
+ self.fail('%r did not match %r' % (actual, regex.pattern))
+ else:
+ self.fail('No exception raised!')
+
+ def test_object_same(self):
+ o = object()
+ compare(o, o)
+
+ def test_object_diff(self):
+ o1 = object()
+ o2 = object()
+ self.checkRaises(
+ o1, o2,
+ '<object object at ...> != <object object at ...>'
+ )
+
+ def test_different_types(self):
+ self.checkRaises('x', 1, "'x' != 1")
+
+ def test_number_same(self):
+ compare(1, 1)
+
+ def test_number_different(self):
+ self.checkRaises(1, 2, '1 != 2')
+
+ def test_different_with_labels(self):
+ self.checkRaises(1, 2, '1 (expected) != 2 (actual)',
+ x_label='expected', y_label='actual')
+
+ def test_string_same(self):
+ compare('x', 'x')
+
+ def test_unicode_string_different(self):
+ if py_2:
+ expected = "u'a' != 'b'"
+ else:
+ expected = "'a' != b'b'"
+ self.checkRaises(
+ UnicodeLiteral('a'), BytesLiteral('b'),
+ expected
+ )
+
+ def test_string_diff_short(self):
+ self.checkRaises(
+ '\n'+('x'*9), '\n'+('y'*9),
+ "'\\nxxxxxxxxx' != '\\nyyyyyyyyy'"
+ )
+
+ def test_string_diff_long(self):
+ self.checkRaises(
+ 'x'*11, 'y'*11,
+ "\n'xxxxxxxxxxx'\n!=\n'yyyyyyyyyyy'"
+ )
+
+ def test_string_diff_long_newlines(self):
+ self.checkRaises(
+ 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
+ "\n--- first\n+++ second\n@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz"
+ )
+
+ def test_string_diff_short_labels(self):
+ self.checkRaises(
+ '\n'+('x'*9), '\n'+('y'*9),
+ "'\\nxxxxxxxxx' (expected) != '\\nyyyyyyyyy' (actual)",
+ x_label='expected',
+ y_label='actual'
+ )
+
+ def test_string_diff_long_labels(self):
+ self.checkRaises(
+ 'x'*11, 'y'*11,
+ "\n'xxxxxxxxxxx' (expected)\n!=\n'yyyyyyyyyyy' (actual)",
+ x_label='expected',
+ y_label='actual'
+ )
+
+ def test_string_diff_long_newlines_labels(self):
+ self.checkRaises(
+ 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
+ "\n--- expected\n+++ actual\n"
+ "@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz",
+ x_label='expected',
+ y_label='actual'
+ )
+
+ def test_exception_same_object(self):
+ e = ValueError('some message')
+ compare(e, e)
+
+ def test_exception_same_c_wrapper(self):
+ e1 = ValueError('some message')
+ e2 = ValueError('some message')
+ compare(C(e1), e2)
+
+ def test_exception_different_object(self):
+ e1 = ValueError('some message')
+ e2 = ValueError('some message')
+ self.checkRaises(
+ e1, e2,
+ "ValueError('some message',) != ValueError('some message',)"
+ )
+
+ def test_exception_different_object_c_wrapper(self):
+ e1 = ValueError('some message')
+ e2 = ValueError('some message')
+ compare(C(e1), e2)
+
+ def test_exception_diff(self):
+ e1 = ValueError('some message')
+ e2 = ValueError('some other message')
+ self.checkRaises(
+ e1, e2,
+ "ValueError('some message',) != ValueError('some other message',)"
+ )
+
+ def test_exception_diff_c_wrapper(self):
+ e1 = ValueError('some message')
+ e2 = ValueError('some other message')
+ self.checkRaises(
+ C(e1), e2,
+ ("\n"
+ " <C(failed):{0}.ValueError>\n"
+ " args:('some message',) != ('some other message',)\n"
+ " </C>"
+ " != "
+ "ValueError('some other message',)"
+ ).format(exception_module))
+
+ def test_sequence_long(self):
+ self.checkRaises(
+ ['quite a long string 1', 'quite a long string 2',
+ 'quite a long string 3', 'quite a long string 4',
+ 'quite a long string 5', 'quite a long string 6',
+ 'quite a long string 7', 'quite a long string 8'],
+ ['quite a long string 1', 'quite a long string 2',
+ 'quite a long string 3', 'quite a long string 4',
+ 'quite a long string 9', 'quite a long string 10',
+ 'quite a long string 11', 'quite a long string 12'],
+ "sequence not as expected:\n\n"
+ "same:\n"
+ "['quite a long string 1',\n"
+ " 'quite a long string 2',\n"
+ " 'quite a long string 3',\n"
+ " 'quite a long string 4']\n\n"
+ "first:\n"
+ "['quite a long string 5',\n"
+ " 'quite a long string 6',\n"
+ " 'quite a long string 7',\n"
+ " 'quite a long string 8']\n\n"
+ "second:\n"
+ "['quite a long string 9',\n"
+ " 'quite a long string 10',\n"
+ " 'quite a long string 11',\n"
+ " 'quite a long string 12']\n"
+ "\n"
+ "While comparing [4]: \n"
+ "'quite a long string 5'\n"
+ "!=\n"
+ "'quite a long string 9'"
+ )
+
+ def test_sequence_different_labels_supplied(self):
+ self.checkRaises(
+ [1, 2, 3], [1, 2, 4],
+ "sequence not as expected:\n\n"
+ "same:\n"
+ "[1, 2]\n\n"
+ "expected:\n"
+ "[3]\n\n"
+ "actual:\n"
+ "[4]",
+ x_label='expected',
+ y_label='actual',
+ )
+
+ def test_list_same(self):
+ compare([1, 2, 3], [1, 2, 3])
+
+ def test_list_different(self):
+ self.checkRaises(
+ [1, 2, 3], [1, 2, 4],
+ "sequence not as expected:\n\n"
+ "same:\n"
+ "[1, 2]\n\n"
+ "first:\n"
+ "[3]\n\n"
+ "second:\n"
+ "[4]"
+ )
+
+ def test_list_totally_different(self):
+ self.checkRaises(
+ [1], [2],
+ "sequence not as expected:\n\n"
+ "same:\n"
+ "[]\n\n"
+ "first:\n"
+ "[1]\n\n"
+ "second:\n"
+ "[2]"
+ )
+
+ def test_list_first_shorter(self):
+ self.checkRaises(
+ [1, 2], [1, 2, 3],
+ "sequence not as expected:\n\n"
+ "same:\n[1, 2]\n\n"
+ "first:\n[]\n\n"
+ "second:\n[3]"
+ )
+
+ def test_list_second_shorter(self):
+ self.checkRaises(
+ [1, 2, 3], [1, 2],
+ "sequence not as expected:\n\n"
+ "same:\n[1, 2]\n\n"
+ "first:\n[3]\n\n"
+ "second:\n[]"
+ )
+
+ def test_dict_same(self):
+ compare(dict(x=1), dict(x=1))
+
+ def test_dict_first_missing_keys(self):
+ self.checkRaises(
+ dict(), dict(z=3),
+ "dict not as expected:\n"
+ "\n"
+ "in second but not first:\n"
+ "'z': 3"
+ )
+
+ def test_dict_second_missing_keys(self):
+ self.checkRaises(
+ dict(z=3), dict(),
+ "dict not as expected:\n"
+ "\n"
+ "in first but not second:\n"
+ "'z': 3"
+ )
+
+ def test_dict_values_different(self):
+ self.checkRaises(
+ dict(x=1), dict(x=2),
+ "dict not as expected:\n"
+ "\n"
+ "values differ:\n"
+ "'x': 1 != 2"
+ )
+
+ def test_dict_labels_specified(self):
+ self.checkRaises(
+ dict(x=1, y=2), dict(x=2, z=3),
+ "dict not as expected:\n"
+ "\n"
+ "in expected but not actual:\n"
+ "'y': 2\n"
+ "\n"
+ "in actual but not expected:\n"
+ "'z': 3\n"
+ "\n"
+ "values differ:\n"
+ "'x': 1 (expected) != 2 (actual)",
+ x_label='expected',
+ y_label='actual'
+ )
+
+ def test_dict_tuple_keys_same_value(self):
+ compare({(1, 2): None}, {(1, 2): None})
+
+ def test_dict_tuple_keys_different_value(self):
+ self.checkRaises(
+ {(1, 2): 3},
+ {(1, 2): 42},
+ "dict not as expected:\n"
+ "\n"
+ "values differ:\n"
+ "(1, 2): 3 != 42"
+ )
+
+ def test_dict_full_diff(self):
+ self.checkRaises(
+ dict(x=1, y=2, a=4), dict(x=1, z=3, a=5),
+ "dict not as expected:\n"
+ "\n"
+ 'same:\n'
+ "['x']\n"
+ "\n"
+ "in first but not second:\n"
+ "'y': 2\n"
+ '\n'
+ "in second but not first:\n"
+ "'z': 3\n"
+ '\n'
+ "values differ:\n"
+ "'a': 4 != 5"
+ )
+
+ def test_dict_consistent_ordering(self):
+ self.checkRaises(
+ dict(xa=1, xb=2, ya=1, yb=2, aa=3, ab=4),
+ dict(xa=1, xb=2, za=3, zb=4, aa=5, ab=5),
+ "dict not as expected:\n"
+ "\n"
+ 'same:\n'
+ "['xa', 'xb']\n"
+ "\n"
+ "in first but not second:\n"
+ "'ya': 1\n"
+ "'yb': 2\n"
+ '\n'
+ "in second but not first:\n"
+ "'za': 3\n"
+ "'zb': 4\n"
+ '\n'
+ "values differ:\n"
+ "'aa': 3 != 5\n"
+ "'ab': 4 != 5"
+ )
+
+ def test_dict_consistent_ordering_types_same(self):
+ self.checkRaises(
+ {None: 1, 6: 2, 1: 3},
+ {None: 1, 6: 2, 1: 4},
+ "dict not as expected:\n"
+ "\n"
+ 'same:\n'
+ "[6, None]\n"
+ "\n"
+ "values differ:\n"
+ "1: 3 != 4"
+ )
+
+ def test_dict_consistent_ordering_types_x_not_y(self):
+ self.checkRaises(
+ {None: 1, 3: 2},
+ {},
+ "dict not as expected:\n"
+ "\n"
+ "in first but not second:\n"
+ "3: 2\n"
+ "None: 1"
+ )
+
+ def test_dict_consistent_ordering_types_y_not_x(self):
+ self.checkRaises(
+ {},
+ {None: 1, 3: 2},
+ "dict not as expected:\n"
+ "\n"
+ "in second but not first:\n"
+ "3: 2\n"
+ "None: 1"
+ )
+
+ def test_dict_consistent_ordering_types_value(self):
+ self.checkRaises(
+ {None: 1, 6: 2},
+ {None: 3, 6: 4},
+ "dict not as expected:\n"
+ "\n"
+ "values differ:\n"
+ "6: 2 != 4\n"
+ "None: 1 != 3"
+ )
+
+ def test_set_same(self):
+ compare(set([1]), set([1]))
+
+ def test_set_first_missing_keys(self):
+ self.checkRaises(
+ set(), set([3]),
+ "set not as expected:\n"
+ "\n"
+ "in second but not first:\n"
+ "[3]\n"
+ '\n'
+ )
+
+ def test_set_second_missing_keys(self):
+ self.checkRaises(
+ set([3]), set(),
+ "set not as expected:\n"
+ "\n"
+ "in first but not second:\n"
+ "[3]\n"
+ '\n'
+ )
+
+ def test_set_full_diff(self):
+ self.checkRaises(
+ set([1, 2, 4]), set([1, 3, 5]),
+ "set not as expected:\n"
+ "\n"
+ "in first but not second:\n"
+ "[2, 4]\n"
+ '\n'
+ "in second but not first:\n"
+ "[3, 5]\n"
+ '\n'
+ )
+
+ def test_set_type_ordering(self):
+ self.checkRaises(
+ {None, 1}, {'', 2},
+ "set not as expected:\n"
+ "\n"
+ "in first but not second:\n"
+ "[1, None]\n"
+ '\n'
+ "in second but not first:\n"
+ "['', 2]\n"
+ '\n'
+ )
+
+ def test_set_labels(self):
+ self.checkRaises(
+ set([1, 2, 4]), set([1, 3, 5]),
+ "set not as expected:\n"
+ "\n"
+ "in expected but not actual:\n"
+ "[2, 4]\n"
+ '\n'
+ "in actual but not expected:\n"
+ "[3, 5]\n"
+ '\n',
+ x_label='expected',
+ y_label='actual',
+ )
+
+ def test_tuple_same(self):
+ compare((1, 2, 3), (1, 2, 3))
+
+ def test_tuple_different(self):
+ self.checkRaises(
+ (1, 2, 3), (1, 2, 4),
+ "sequence not as expected:\n\n"
+ "same:\n(1, 2)\n\n"
+ "first:\n(3,)\n\n"
+ "second:\n(4,)"
+ )
+
+ def test_tuple_totally_different(self):
+ self.checkRaises(
+ (1, ), (2, ),
+ "sequence not as expected:\n\n"
+ "same:\n()\n\n"
+ "first:\n(1,)\n\n"
+ "second:\n(2,)"
+ )
+
+ def test_tuple_first_shorter(self):
+ self.checkRaises(
+ (1, 2), (1, 2, 3),
+ "sequence not as expected:\n\n"
+ "same:\n(1, 2)\n\n"
+ "first:\n()\n\n"
+ "second:\n(3,)"
+ )
+
+ def test_tuple_second_shorter(self):
+ self.checkRaises(
+ (1, 2, 3), (1, 2),
+ "sequence not as expected:\n\n"
+ "same:\n(1, 2)\n\n"
+ "first:\n(3,)\n\n"
+ "second:\n()"
+ )
+
+ def test_generator_same(self):
+ compare(generator(1, 2, 3), generator(1, 2, 3))
+
+ def test_generator_different(self):
+ self.checkRaises(
+ generator(1, 2, 3), generator(1, 2, 4),
+ "sequence not as expected:\n\n"
+ "same:\n(1, 2)\n\n"
+ "first:\n(3,)\n\n"
+ "second:\n(4,)"
+ )
+
+ def test_generator_totally_different(self):
+ self.checkRaises(
+ generator(1, ), generator(2, ),
+ "sequence not as expected:\n\n"
+ "same:\n()\n\n"
+ "first:\n(1,)\n\n"
+ "second:\n(2,)"
+ )
+
+ def test_generator_first_shorter(self):
+ self.checkRaises(
+ generator(1, 2), generator(1, 2, 3),
+ "sequence not as expected:\n\n"
+ "same:\n(1, 2)\n\n"
+ "first:\n()\n\n"
+ "second:\n(3,)"
+ )
+
+ def test_generator_second_shorted(self):
+ self.checkRaises(
+ generator(1, 2, 3), generator(1, 2),
+ "sequence not as expected:\n\n"
+ "same:\n(1, 2)\n\n"
+ "first:\n(3,)\n\n"
+ "second:\n()"
+ )
+
+ def test_nested_generator_different(self):
+ self.checkRaises(
+ generator(1, 2, generator(3), 4),
+ generator(1, 2, generator(3), 5),
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(1, 2, <generator object generator at ...>)\n"
+ "\n"
+ "first:\n"
+ "(4,)\n"
+ "\n"
+ "second:\n"
+ "(5,)"
+ )
+
+ def test_nested_generator_tuple_left(self):
+ compare(
+ generator(1, 2, (3, ), 4),
+ generator(1, 2, generator(3), 4),
+ )
+
+ def test_nested_generator_tuple_right(self):
+ compare(
+ generator(1, 2, generator(3), 4),
+ generator(1, 2, (3, ), 4),
+ )
+
+ def test_sequence_and_generator(self):
+ compare((1, 2, 3), generator(1, 2, 3))
+
+ def test_sequence_and_generator_strict(self):
+ expected = compile(
+ "\(1, 2, 3\) \(<(class|type) 'tuple'>\) \(expected\) != "
+ "<generator object (generator )?at... "
+ "\(<(class|type) 'generator'>\) \(actual\)"
+ )
+ self.checkRaises(
+ (1, 2, 3), generator(1, 2, 3),
+ regex=expected,
+ strict=True,
+ x_label='expected',
+ y_label='actual',
+ )
+
+ def test_generator_and_sequence(self):
+ compare(generator(1, 2, 3), (1, 2, 3))
+
+ def test_iterable_with_iterable_same(self):
+ compare(xrange(1, 4), xrange(1, 4))
+
+ def test_iterable_with_iterable_different(self):
+ self.checkRaises(
+ xrange(1, 4), xrange(1, 3),
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(1, 2)\n"
+ "\n"
+ "first:\n"
+ "(3,)\n"
+ "\n"
+ "second:\n"
+ "()"
+ )
+
+ def test_iterable_and_generator(self):
+ compare(xrange(1, 4), generator(1, 2, 3))
+
+ def test_iterable_and_generator_strict(self):
+ expected = compile(
+ "x?range\(1, 4\) \(<(class|type) 'x?range'>\) != "
+ "<generator object (generator )?at... "
+ "\(<(class|type) 'generator'>\)"
+ )
+ self.checkRaises(
+ xrange(1, 4), generator(1, 2, 3),
+ regex=expected,
+ strict=True,
+ )
+
+ def test_generator_and_iterable(self):
+ compare(generator(1, 2, 3), xrange(1, 4))
+
+ def test_tuple_and_list(self):
+ compare((1, 2, 3), [1, 2, 3])
+
+ def test_tuple_and_list_strict(self):
+ if py_2:
+ expected = ("(1, 2, 3) (<type 'tuple'>) != "
+ "[1, 2, 3] (<type 'list'>)")
+ else:
+ expected = ("(1, 2, 3) (<class 'tuple'>) != "
+ "[1, 2, 3] (<class 'list'>)")
+
+ self.checkRaises(
+ (1, 2, 3), [1, 2, 3],
+ expected,
+ strict=True
+ )
+
+ def test_float_subclass_strict(self):
+ class TestFloat(float):
+ pass
+ compare(TestFloat(0.75), TestFloat(0.75), strict=True)
+
+ def test_old_style_classes_same(self):
+ class X:
+ pass
+ compare(X, X)
+
+ def test_old_style_classes_different(self):
+ if PY3:
+ expected = (
+ "<class 'testfixtures.tests.test_compare.TestCompare."
+ "test_old_style_classes_different.<locals>.X'>"
+ " != "
+ "<class 'testfixtures.tests.test_compare.TestCompare."
+ "test_old_style_classes_different.<locals>.Y'>"
+ )
+ else:
+ expected = (
+ "<class testfixtures.tests.test_compare.X at ...>"
+ " != "
+ "<class testfixtures.tests.test_compare.Y at ...>"
+ )
+
+ class X:
+ pass
+
+ class Y:
+ pass
+ self.checkRaises(X, Y, expected)
+
+ def test_show_whitespace(self):
+ # does nothing! ;-)
+ self.checkRaises(
+ ' x \n\r', ' x \n \t',
+ "' x \\n\\r' != ' x \\n \\t'",
+ show_whitespace=True
+ )
+
+ def test_show_whitespace_long(self):
+ self.checkRaises(
+ "\t \n '", '\r \n ',
+ '\n--- first\n'
+ '+++ second\n'
+ '@@ -1,2 +1,2 @@\n'
+ '-\'\\t \\n\'\n'
+ '-" \'"\n'
+ '+\'\\r \\n\'\n'
+ '+\' \'',
+ show_whitespace=True
+ )
+
+ def test_show_whitespace_equal(self):
+ compare('x', 'x', show_whitespace=True)
+
+ def test_show_whitespace_not_used_because_of_other_difference(self):
+ self.checkRaises(
+ (1, 'a'),
+ (2, 'b'),
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "()\n"
+ "\n"
+ "first:\n"
+ "(1, 'a')\n"
+ "\n"
+ "second:\n"
+ "(2, 'b')",
+ show_whitespace=False
+ )
+
+ def test_include_trailing_whitespace(self):
+ self.checkRaises(
+ ' x \n', ' x \n',
+ "' x \\n' != ' x \\n'"
+ )
+
+ def test_ignore_trailing_whitespace(self):
+ compare(' x \t\n', ' x\t \n', trailing_whitespace=False)
+
+ def test_ignore_trailing_whitespace_non_string(self):
+ self.checkRaises(
+ 1, '',
+ "1 != ''",
+ trailing_whitespace=False
+ )
+
+ def test_ignore_trailing_whitespace_but_respect_leading_whitespace(self):
+ # NB: careful: this strips off the last newline too
+ # DON'T use if you care about that!
+ self.checkRaises(
+ 'a\n b\n c\n',
+ 'a\nb\nc\n',
+ "'a\\n b\\n c' != 'a\\nb\\nc'",
+ trailing_whitespace=False
+ )
+
+ def test_include_blank_lines(self):
+ self.checkRaises(
+ '\n \n', '\n ',
+ "'\\n \\n' != '\\n '"
+ )
+
+ def test_ignore_blank_lines(self):
+ compare("""
+ a
+
+\t
+b
+ """,
+ ' a\nb', blanklines=False)
+
+ def test_ignore_blank_lines_non_string(self):
+ self.checkRaises(
+ 1, '',
+ "1 != ''",
+ blanklines=False
+ )
+
+ def test_supply_comparer(self):
+ def compare_dict(x, y, context):
+ self.assertEqual(x, {1: 1})
+ self.assertEqual(y, {2: 2})
+ self.assertEqual(context.get_option('foo'), 'bar')
+ return 'not equal'
+ with ShouldRaise(AssertionError('not equal')):
+ compare({1: 1}, {2: 2},
+ foo='bar',
+ comparers={dict: compare_dict})
+
+ def test_register_more_specific(self):
+ class_ = namedtuple('Test', 'x')
+ with ShouldRaise(AssertionError('compare class_')):
+ compare(class_(1), class_(2),
+ comparers={
+ tuple: Mock(return_value='compare tuple'),
+ class_: Mock(return_value='compare class_')
+ })
+
+ def test_extra_comparers_leave_existing(self):
+ class MyObject(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self):
+ return 'MyObject instance'
+
+ def compare_my_object(x, y, context):
+ return '%s != %s' % (x.name, y.name)
+ with Replacer() as r:
+ r.replace('testfixtures.comparison._registry', {
+ list: compare_sequence,
+ })
+ self.checkRaises(
+ [1, MyObject('foo')], [1, MyObject('bar')],
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "[1]\n"
+ "\n"
+ "first:\n"
+ "[MyObject instance]\n"
+ "\n"
+ "second:\n"
+ "[MyObject instance]\n"
+ "\n"
+ "While comparing [1]: foo != bar",
+ comparers={MyObject: compare_my_object}
+ )
+
+ def test_list_subclass(self):
+ m = Mock()
+ m.aCall()
+ # Mock().method_calls is a list subclass
+ self.checkRaises(
+ [call.bCall()], m.method_calls,
+ "sequence not as expected:\n\n"
+ "same:\n[]\n\n"
+ "first:\n[call.bCall()]\n\n"
+ "second:\n[call.aCall()]"
+ )
+
+ def test_strict_okay(self):
+ m = object()
+ compare(m, m, strict=True)
+
+ def test_strict_comparer_supplied(self):
+
+ compare_obj = Mock()
+ compare_obj.return_value = 'not equal'
+
+ self.checkRaises(
+ object(), object(),
+ "not equal",
+ strict=True,
+ comparers={object: compare_obj},
+ )
+
+ def test_strict_default_comparer(self):
+ class MyList(list):
+ pass
+ # default comparer used!
+ self.checkRaises(
+ MyList((1, 2, 3)), MyList((1, 2, 4)),
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "[1, 2]\n"
+ "\n"
+ "first:\n"
+ "[3]\n"
+ "\n"
+ "second:\n"
+ "[4]",
+ strict=True,
+ )
+
+ def test_list_subclass_strict(self):
+ m = Mock()
+ m.aCall()
+ self.checkRaises(
+ [call.aCall()], m.method_calls,
+ ("[call.aCall()] (<{0} 'list'>) != [call.aCall()] "
+ "({1})").format(class_type_name, call_list_repr),
+ strict=True,
+ )
+
+ def test_list_subclass_long_strict(self):
+ m = Mock()
+ m.call('X'*20)
+ self.checkRaises(
+ [call.call('Y'*20)], m.method_calls,
+ ("[call.call('YYYYYYYYYYYYYYYYYY... "
+ "(<{0} 'list'>) != "
+ "[call.call('XXXXXXXXXXXXXXXXXX... "
+ "({1})").format(class_type_name, call_list_repr),
+ strict=True,
+ )
+
+ def test_prefix(self):
+ self.checkRaises(1, 2, 'wrong number of orders: 1 != 2',
+ prefix='wrong number of orders')
+
+ def test_prefix_multiline(self):
+ self.checkRaises(
+ 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
+ "file content: \n--- first\n+++ second\n"
+ "@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz",
+ prefix='file content'
+ )
+
+ def test_suffix(self):
+ self.checkRaises(
+ 1, 2,
+ '1 != 2\n'
+ 'additional context',
+ suffix='additional context',
+ )
+
+ def test_labels_multiline(self):
+ self.checkRaises(
+ 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
+ "\n--- expected\n+++ actual\n"
+ "@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz",
+ x_label='expected',
+ y_label='actual'
+ )
+
+ def test_generator_with_non_generator(self):
+ self.checkRaises(
+ generator(1, 2, 3), None,
+ '<generator object generator at ...> != None',
+ )
+
+ def test_generator_with_buggy_generator(self):
+ def bad_gen():
+ yield 1
+ # raising a TypeError here is important :-/
+ raise TypeError('foo')
+
+ with ShouldRaise(TypeError('foo')):
+ compare(generator(1, 2, 3), bad_gen())
+
+ def test_nested_dict_tuple_values_different(self):
+ self.checkRaises(
+ dict(x=(1, 2, 3)), dict(x=(1, 2, 4)),
+ "dict not as expected:\n"
+ "\n"
+ "values differ:\n"
+ "'x': (1, 2, 3) != (1, 2, 4)\n"
+ '\n'
+ "While comparing ['x']: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(1, 2)\n"
+ "\n"
+ "first:\n"
+ "(3,)\n"
+ "\n"
+ "second:\n"
+ "(4,)"
+ )
+
+ def test_nested_dict_different(self):
+ self.checkRaises(
+ dict(x=dict(y=1)), dict(x=dict(y=2)),
+ "dict not as expected:\n"
+ "\n"
+ "values differ:\n"
+ "'x': {'y': 1} != {'y': 2}\n"
+ '\n'
+ "While comparing ['x']: dict not as expected:\n"
+ "\n"
+ "values differ:\n"
+ "'y': 1 != 2"
+ )
+
+ def test_tuple_list_different(self):
+ self.checkRaises(
+ (1, [2, 3, 5]), (1, [2, 4, 5]),
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(1,)\n"
+ "\n"
+ "first:\n"
+ "([2, 3, 5],)\n"
+ "\n"
+ "second:\n"
+ "([2, 4, 5],)\n"
+ "\n"
+ "While comparing [1]: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "[2]\n"
+ "\n"
+ "first:\n"
+ "[3, 5]\n"
+ "\n"
+ "second:\n"
+ "[4, 5]"
+ )
+
+ def test_tuple_long_strings_different(self):
+ self.checkRaises(
+ (1, 2, "foo\nbar\nbaz\n", 4),
+ (1, 2, "foo\nbob\nbaz\n", 4),
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(1, 2)\n"
+ "\n"
+ "first:\n"
+ "('foo\\nbar\\nbaz\\n', 4)\n"
+ "\n"
+ "second:\n"
+ "('foo\\nbob\\nbaz\\n', 4)\n"
+ "\n"
+ "While comparing [2]: \n"
+ "--- first\n"
+ "+++ second\n"
+ "@@ -1,4 +1,4 @@\n"
+ # check that show_whitespace bubbles down
+ " 'foo\\n'\n"
+ "-'bar\\n'\n"
+ "+'bob\\n'\n"
+ " 'baz\\n'\n"
+ " ''",
+ show_whitespace=True
+ )
+
+ def test_dict_multiple_differences(self):
+ self.checkRaises(
+ dict(x=(1, 2, 3), y=(4, 5, 6, )),
+ dict(x=(1, 2, 4), y=(4, 5, 7, )),
+ "dict not as expected:\n"
+ "\n"
+ "values differ:\n"
+ "'x': (1, 2, 3) != (1, 2, 4)\n"
+ "'y': (4, 5, 6) != (4, 5, 7)\n"
+ "\n"
+ "While comparing ['x']: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(1, 2)\n"
+ "\n"
+ "first:\n"
+ "(3,)\n"
+ "\n"
+ "second:\n"
+ "(4,)\n"
+ "\n"
+ "While comparing ['y']: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(4, 5)\n"
+ "\n"
+ "first:\n"
+ "(6,)\n"
+ "\n"
+ "second:\n"
+ "(7,)"
+ )
+
+ def test_deep_breadcrumbs(self):
+ obj1 = singleton('obj1')
+ obj2 = singleton('obj2')
+ gen1 = generator(obj1, obj2)
+ gen2 = generator(obj1, )
+ # dict -> list -> tuple -> generator
+ self.checkRaises(
+ dict(x=[1, ('a', 'b', gen1), 3], y=[3, 4]),
+ dict(x=[1, ('a', 'b', gen2), 3], y=[3, 4]), (
+ "dict not as expected:\n"
+ "\n"
+ "same:\n"
+ "['y']\n"
+ "\n"
+ "values differ:\n"
+ "'x': [1, ('a', 'b', {gen1}), 3] != [1, ('a', 'b', {gen2}), 3]"
+ "\n\n"
+ "While comparing ['x']: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "[1]\n"
+ "\n"
+ "first:\n"
+ "[('a', 'b', {gen1}), 3]\n"
+ "\n"
+ "second:\n"
+ "[('a', 'b', {gen2}), 3]\n"
+ "\n"
+ "While comparing ['x'][1]: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "('a', 'b')\n"
+ "\n"
+ "first:\n"
+ "({gen1},)\n"
+ "\n"
+ "second:\n"
+ "({gen2},)\n"
+ "\n"
+ "While comparing ['x'][1][2]: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(<obj1>,)\n"
+ "\n"
+ "first:\n"
+ "(<obj2>,)\n"
+ "\n"
+ "second:\n"
+ "()"
+ ).format(gen1=hexsub(repr(gen1)),
+ gen2=hexsub(repr(gen2)))
+ )
+
+ def test_nested_labels(self):
+ obj1 = singleton('obj1')
+ obj2 = singleton('obj2')
+ gen1 = generator(obj1, obj2)
+ gen2 = generator(obj1, )
+ # dict -> list -> tuple -> generator
+ self.checkRaises(
+ dict(x=[1, ('a', 'b', gen1), 3], y=[3, 4]),
+ dict(x=[1, ('a', 'b', gen2), 3], y=[3, 4]), (
+ "dict not as expected:\n"
+ "\n"
+ "same:\n"
+ "['y']\n"
+ "\n"
+ "values differ:\n"
+ "'x': [1, ('a', 'b', {gen1}), 3] (expected) != "
+ "[1, ('a', 'b', {gen2}), 3] (actual)\n"
+ "\n"
+ "While comparing ['x']: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "[1]\n"
+ "\n"
+ "expected:\n"
+ "[('a', 'b', {gen1}), 3]\n"
+ "\n"
+ "actual:\n"
+ "[('a', 'b', {gen2}), 3]\n"
+ "\n"
+ "While comparing ['x'][1]: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "('a', 'b')\n"
+ "\n"
+ "expected:\n"
+ "({gen1},)\n"
+ "\n"
+ "actual:\n"
+ "({gen2},)\n"
+ "\n"
+ "While comparing ['x'][1][2]: sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(<obj1>,)\n"
+ "\n"
+ "expected:\n"
+ "(<obj2>,)\n"
+ "\n"
+ "actual:\n"
+ "()"
+ ).format(gen1=hexsub(repr(gen1)),
+ gen2=hexsub(repr(gen2))),
+ x_label='expected',
+ y_label='actual',
+ )
+
+ def test_nested_strict_only_type_difference(self):
+ MyTuple = namedtuple('MyTuple', 'x y z')
+ type_repr = repr(MyTuple)
+ tuple_repr = repr(tuple)
+ self.checkRaises(
+ [MyTuple(1, 2, 3)],
+ [(1, 2, 3)],
+ ("sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "[]\n"
+ "\n"
+ "first:\n"
+ "[MyTuple(x=1, y=2, z=3)]\n"
+ "\n"
+ "second:\n"
+ "[(1, 2, 3)]\n"
+ "\n"
+ "While comparing [0]: MyTuple(x=1, y=2, z=3) "
+ "(%s) "
+ "!= (1, 2, 3) "
+ "(%s)") % (type_repr, tuple_repr),
+ strict=True
+ )
+
+ def test_strict_nested_different(self):
+ if py_2:
+ expected = "[1, 2] (<type 'list'>) != (1, 3) (<type 'tuple'>)"
+ else:
+ expected = "[1, 2] (<class 'list'>) != (1, 3) (<class 'tuple'>)"
+
+ self.checkRaises(
+ (1, 2, [1, 2]), (1, 2, (1, 3)),
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "(1, 2)\n"
+ "\n"
+ "first:\n"
+ "([1, 2],)\n"
+ "\n"
+ "second:\n"
+ "((1, 3),)"
+ "\n\n"
+ "While comparing [2]: " + expected,
+ strict=True,
+ )
+
+ def test_namedtuple_equal(self):
+ class_ = namedtuple('Foo', 'x')
+ compare(class_(1), class_(1))
+
+ def test_namedtuple_same_type(self):
+ class_ = namedtuple('Foo', 'x y')
+ self.checkRaises(
+ class_(1, 2), class_(1, 3),
+ "Foo not as expected:\n\n"
+ "same:\n"
+ "['x']\n\n"
+ "values differ:\n"
+ "'y': 2 != 3"
+ )
+
+ def test_namedtuple_different_type(self):
+ class_a = namedtuple('Foo', 'x y')
+ class_b = namedtuple('Bar', 'x y z')
+ self.checkRaises(
+ class_a(1, 2), class_b(1, 2, 3),
+ "Foo(x=1, y=2) (<class 'testfixtures.tests.test_compare.Foo'>) != "
+ "Bar(x=1, y=2, z=3) "
+ "(<class 'testfixtures.tests.test_compare.Bar'>)"
+ )
+
+ def test_dict_with_list(self):
+ self.checkRaises(
+ {1: 'one', 2: 'two'}, [1, 2],
+ "{1: 'one', 2: 'two'} != [1, 2]"
+ )
+
+ def test_explicit_expected(self):
+ self.checkRaises('x', expected='y',
+ message="'y' (expected) != 'x' (actual)")
+
+ def test_explicit_actual(self):
+ self.checkRaises('x', actual='y',
+ message="'x' (expected) != 'y' (actual)")
+
+ def test_explicit_both(self):
+ self.checkRaises(message="'x' (expected) != 'y' (actual)",
+ expected='x', actual='y')
+
+ def test_explicit_and_labels(self):
+ self.checkRaises(message="'x' (x_label) != 'y' (y_label)",
+ expected='x', actual='y',
+ x_label='x_label', y_label='y_label')
+
+ def test_invalid_two_args_expected(self):
+ with ShouldRaise(TypeError(
+ "Exactly two objects needed, you supplied: ['z', 'x', 'y']"
+ )):
+ compare('x', 'y', expected='z')
+
+ def test_invalid_two_args_actual(self):
+ with ShouldRaise(TypeError(
+ "Exactly two objects needed, you supplied: ['x', 'y', 'z']"
+ )):
+ compare('x', 'y', actual='z')
+
+ def test_invalid_zero_args(self):
+ with ShouldRaise(TypeError(
+ 'Exactly two objects needed, you supplied: []'
+ )):
+ compare()
+
+ def test_invalid_one_args(self):
+ with ShouldRaise(TypeError(
+ "Exactly two objects needed, you supplied: ['x']"
+ )):
+ compare('x')
+
+ def test_invalid_three_args(self):
+ with ShouldRaise(TypeError(
+ "Exactly two objects needed, you supplied: ['x', 'y', 'z']"
+ )):
+ compare('x', 'y', 'z')
+
+ def test_dont_raise(self):
+ self.assertEqual(compare('x', 'y', raises=False), "'x' != 'y'")
+
+ class OrmObj(object):
+ def __init__(self, a):
+ self.a = a
+ def __eq__(self, other):
+ return True
+ def __repr__(self):
+ return 'OrmObj: '+str(self.a)
+
+ def test_django_orm_is_horrible(self):
+ def query_set():
+ yield self.OrmObj(1)
+ yield self.OrmObj(2)
+
+ def compare_orm_obj(x, y, context):
+ if x.a != y.a:
+ return 'OrmObj: %s != %s' % (x.a, y.a)
+
+ self.checkRaises(
+ message=(
+ "sequence not as expected:\n\n"
+ "same:\n(OrmObj: 1,)\n\n"
+ "expected:\n(OrmObj: 3,)\n\n"
+ "actual:\n(OrmObj: 2,)\n\n"
+ "While comparing [1]: OrmObj: 3 != 2"
+ ),
+ expected=[self.OrmObj(1), self.OrmObj(3)],
+ actual=query_set(),
+ comparers={self.OrmObj: compare_orm_obj},
+ ignore_eq=True
+ )
+
+ def test_django_orm_is_horrible_part_2(self):
+
+ def compare_orm_obj(x, y, context):
+ return context.different(x.a, y.a, '.a')
+
+ t_compare = partial(compare,
+ comparers={self.OrmObj: compare_orm_obj},
+ ignore_eq=True)
+
+ t_compare(self.OrmObj(1), self.OrmObj(1))
+ t_compare(self.OrmObj('some longish string'),
+ self.OrmObj('some longish string'))
+ t_compare(self.OrmObj(date(2016, 1, 1)),
+ self.OrmObj(date(2016, 1, 1)))
+
+ def test_django_orm_is_horrible_part_3(self):
+ self.checkRaises(
+ message=(
+ "OrmObj: 1 (expected) != OrmObj: 1 (actual)"
+ ),
+ expected=self.OrmObj(1),
+ actual=self.OrmObj(1),
+ ignore_eq=True
+ )
+
+ def test_django_orm_is_horrible_part_4(self):
+ self.checkRaises(
+ message='[1] (expected) != 2 (actual)',
+ expected=[1],
+ actual=2,
+ ignore_eq=True
+ )
diff --git a/testfixtures/tests/test_comparison.py b/testfixtures/tests/test_comparison.py
new file mode 100644
index 0000000..d01a81d
--- /dev/null
+++ b/testfixtures/tests/test_comparison.py
@@ -0,0 +1,742 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from testfixtures import Comparison as C, TempDirectory, compare
+from testfixtures.compat import PY2, PY3, exception_module
+from testfixtures.tests.sample1 import TestClassA, a_function
+from unittest import TestCase
+import sys
+
+from .compat import py_33_plus, py_34_plus
+
+
+class AClass:
+
+ def __init__(self, x, y=None):
+ self.x = x
+ if y:
+ self.y = y
+
+ def __repr__(self):
+ return '<'+self.__class__.__name__+'>'
+
+
+class BClass(AClass):
+ pass
+
+
+class WeirdException(Exception):
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+
+
+class X(object):
+ __slots__ = ['x']
+
+ def __repr__(self):
+ return '<X>'
+
+
+class FussyDefineComparison(object):
+
+ def __init__(self, attr):
+ self.attr = attr
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__): # pragma: no cover
+ raise TypeError()
+ return False # pragma: no cover
+
+ def __ne__(self, other):
+ return not self == other # pragma: no cover
+
+
+class TestC(TestCase):
+
+ def test_example(self):
+ # In this pattern, we want to check a sequence is
+ # of the correct type and order.
+ r = a_function()
+ self.assertEqual(r, (
+ C('testfixtures.tests.sample1.TestClassA'),
+ C('testfixtures.tests.sample1.TestClassB'),
+ C('testfixtures.tests.sample1.TestClassA'),
+ ))
+ # We also want to check specific parts of some
+ # of the returned objects' attributes
+ self.assertEqual(r[0].args[0], 1)
+ self.assertEqual(r[1].args[0], 2)
+ self.assertEqual(r[2].args[0], 3)
+
+ def test_example_with_object(self):
+ # Here we see compare an object with a Comparison
+ # based on an object of the same type and with the
+ # same attributes:
+ self.assertEqual(
+ C(AClass(1, 2)),
+ AClass(1, 2),
+ )
+ # ...even though the original class doesn't support
+ # meaningful comparison:
+ self.assertNotEqual(
+ AClass(1, 2),
+ AClass(1, 2),
+ )
+
+ def test_example_with_vars(self):
+ # Here we use a Comparison to make sure both the
+ # type and attributes of an object are correct.
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass',
+ x=1, y=2),
+ AClass(1, 2),
+ )
+
+ def test_example_with_odd_vars(self):
+ # If the variable names class with parameters to the
+ # Comparison constructor, they can be specified in a
+ # dict:
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass',
+ {'x': 1, 'y': 2}),
+ AClass(1, 2),
+ )
+
+ def test_example_not_strict(self):
+ # Here, we only care about the 'x' attribute of
+ # the AClass object, so we turn strict mode off.
+ # With strict mode off, only attributes specified
+ # in the Comparison object will be checked, and
+ # any others will be ignored.
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass',
+ x=1,
+ strict=False),
+ AClass(1, 2),
+ )
+
+ def test_example_dont_use_c_wrappers_on_both_sides(self):
+ # NB: don't use C wrappers on both sides!
+ e = ValueError('some message')
+ try:
+ self.assertEqual(C(e), C(e))
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(
+ e.args,
+ (("<C(failed):{mod}.ValueError>wrong type</C> != \n"
+ " <C:{mod}.ValueError>\n"
+ " args:('some message',)\n"
+ " </C>"
+ ).format(mod=exception_module), ))
+ else:
+ self.fail('No exception raised!')
+
+ def test_repr_module(self):
+ self.assertEqual(repr(C('datetime')), '<C:datetime>')
+
+ def test_repr_class(self):
+ self.assertEqual(
+ repr(C('testfixtures.tests.sample1.TestClassA')),
+ '<C:testfixtures.tests.sample1.TestClassA>'
+ )
+
+ def test_repr_function(self):
+ self.assertEqual(
+ repr(C('testfixtures.tests.sample1.z')),
+ '<C:testfixtures.tests.sample1.z>'
+ )
+
+ def test_repr_instance(self):
+ self.assertEqual(
+ repr(C(TestClassA('something'))),
+ "\n"
+ " <C:testfixtures.tests.sample1.TestClassA>\n"
+ " args:('something',)\n"
+ " </C>"
+ )
+
+ def test_repr_exception(self):
+ self.assertEqual(
+ repr(C(ValueError('something'))),
+ ("\n"
+ " <C:{0}.ValueError>\n"
+ " args:('something',)\n"
+ " </C>"
+ ).format(exception_module))
+
+ def test_repr_exception_not_args(self):
+ r = repr(C(WeirdException(1, 2)))
+
+ if sys.version_info >= (3, 2, 4):
+ # in PY3, even args that aren't set still appear to be there
+ args = " args:(1, 2)\n"
+ else:
+ args = " args:()\n"
+
+ self.assertEqual(
+ r,
+ "\n"
+ " <C:testfixtures.tests.test_comparison.WeirdException>\n"
+ + args +
+ " x:1\n"
+ " y:2\n"
+ " </C>"
+ )
+
+ def test_repr_class_and_vars(self):
+ self.assertEqual(
+ repr(C(TestClassA, {'args': (1, )})),
+ "\n"
+ " <C:testfixtures.tests.sample1.TestClassA>\n"
+ " args:(1,)\n"
+ " </C>"
+ )
+
+ def test_repr_nested(self):
+ self.assertEqual(
+ repr(C(TestClassA, y=C(AClass), z=C(BClass(1, 2)))),
+ "\n"
+ " <C:testfixtures.tests.sample1.TestClassA>\n"
+ " y:<C:testfixtures.tests.test_comparison.AClass>\n"
+ " z:\n"
+ " <C:testfixtures.tests.test_comparison.BClass>\n"
+ " x:1\n"
+ " y:2\n"
+ " </C>\n"
+ " </C>"
+ )
+
+ def test_repr_failed_wrong_class(self):
+ try:
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass',
+ x=1, y=2),
+ BClass(1, 2)
+ )
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(e.args, ((
+ "<C(failed):testfixtures.tests.test_comparison.AClass>"
+ "wrong type</C> != <BClass>"
+ ), ))
+ else:
+ self.fail('No exception raised!')
+
+ def test_repr_failed_all_reasons_in_one(self):
+ if py_34_plus:
+ expected = (
+ "\n <C(failed):testfixtures.tests.test_com[79 chars] </C>"
+ " != <AClass>",
+ )
+ else:
+ expected = (
+ "\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " x:1 not in Comparison\n"
+ " y:5 != 2\n"
+ " z:'missing' not in other\n"
+ " </C> != <AClass>",
+ )
+ try:
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass',
+ y=5, z='missing'),
+ AClass(1, 2)
+ )
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(e.args, expected)
+ else:
+ self.fail('No exception raised!')
+
+ def test_repr_failed_not_in_other(self):
+ if py_34_plus:
+ expected = (
+ "\n <C(failed):testfixtures.tests.test_com[39 chars] </C>"
+ " != <AClass>",
+ )
+ else:
+ expected = (
+ "\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " z:(3,) not in other\n"
+ " </C> != <AClass>",
+ )
+ # use single element tuple to check %
+ try:
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass',
+ x=1, y=2, z=(3, )),
+ AClass(1, 2)
+ )
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(e.args, expected)
+ else:
+ self.fail('No exception raised!')
+
+ def test_repr_failed_not_in_self_strict(self):
+ # use single element tuple to check %
+ if py_34_plus:
+ expected = (
+ "\n <C(failed):testfixtures.tests.test_com[44 chars] </C>"
+ " != <AClass>",
+ )
+ else:
+ expected = (
+ "\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " x:(1,) not in Comparison\n"
+ " </C> != <AClass>",
+ )
+ try:
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass', y=2),
+ AClass((1, ), 2)
+ )
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(e.args, expected)
+ else:
+ self.fail('No exception raised!')
+
+ def test_repr_failed_not_in_self_not_strict(self):
+ if py_34_plus:
+ expected = (
+ "\n <C(failed):testfixtures.tests.test_com[39 chars] </C>"
+ " != <AClass>",
+ )
+ else:
+ expected = (
+ "\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " z:(3,) not in other\n"
+ " </C> != <AClass>",
+ )
+ try:
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass',
+ x=1, y=2, z=(3, )),
+ AClass(1, 2)
+ )
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(e.args, expected)
+ else:
+ self.fail('No exception raised!')
+
+ def test_repr_failed_one_attribute_not_equal(self):
+ # use single element tuple to check %
+ try:
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.AClass', x=1, y=(2, )),
+ AClass(1, (3, ))
+ )
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(
+ e.args,
+ ("\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " y:(2,) != (3,)\n"
+ " </C> != <AClass>",
+ ))
+ else:
+ self.fail('No exception raised!')
+
+ def test_repr_failed_nested(self):
+ left_side = [C(AClass, x=1, y=2),
+ C(BClass, x=C(AClass, x=1), y=C(AClass))]
+ right_side = [AClass(1, 3), AClass(1, 3)]
+
+ # do the comparison
+ left_side == right_side
+
+ self.assertEqual(
+ "[\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " y:2 != 3\n"
+ " </C>, \n"
+ " <C:testfixtures.tests.test_comparison.BClass>\n"
+ " x:\n"
+ " <C:testfixtures.tests.test_comparison.AClass>\n"
+ " x:1\n"
+ " </C>\n"
+ " y:<C:testfixtures.tests.test_comparison.AClass>\n"
+ " </C>]",
+ repr(left_side)
+ )
+
+ self.assertEqual(
+ "[<AClass>, <AClass>]",
+ repr(right_side)
+ )
+
+ def test_repr_failed_nested_failed(self):
+ left_side = [C(AClass, x=1, y=2),
+ C(BClass,
+ x=C(AClass, x=1, strict=False),
+ y=C(AClass, z=2))]
+ right_side = [AClass(1, 2),
+ BClass(AClass(1, 2), AClass(1, 2))]
+
+ # do the comparison
+ left_side == right_side
+
+ self.assertEqual(
+ "[\n"
+ " <C:testfixtures.tests.test_comparison.AClass>\n"
+ " x:1\n"
+ " y:2\n"
+ " </C>, \n"
+ " <C(failed):testfixtures.tests.test_comparison.BClass>\n"
+ " y:\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " x:1 not in Comparison\n"
+ " y:2 not in Comparison\n"
+ " z:2 not in other\n"
+ " </C> != <AClass>\n"
+ " </C>]",
+ repr(left_side)
+ )
+
+ self.assertEqual(
+ '[<AClass>, <BClass>]',
+ repr(right_side)
+ )
+
+ def test_repr_failed_passed_failed(self):
+ c = C('testfixtures.tests.test_comparison.AClass', x=1, y=2)
+
+ try:
+ self.assertEqual(c, AClass(1, 3))
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(
+ e.args,
+ ("\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " y:2 != 3\n"
+ " </C> != <AClass>",
+ ))
+ else:
+ self.fail('No exception raised!')
+
+ self.assertEqual(c, AClass(1, 2))
+
+ try:
+ self.assertEqual(c, AClass(3, 2))
+ except Exception as e:
+ self.failUnless(isinstance(e, AssertionError))
+ self.assertEqual(
+ e.args,
+ ("\n"
+ " <C(failed):testfixtures.tests.test_comparison.AClass>\n"
+ " x:1 != 3\n"
+ " </C> != <AClass>",
+ ))
+ else:
+ self.fail('No exception raised!')
+
+ def test_first(self):
+ self.assertEqual(
+ C('testfixtures.tests.sample1.TestClassA'),
+ TestClassA()
+ )
+
+ def test_second(self):
+ self.assertEqual(
+ TestClassA(),
+ C('testfixtures.tests.sample1.TestClassA'),
+ )
+
+ def test_not_same_first(self):
+ self.assertNotEqual(
+ C('datetime'),
+ TestClassA()
+ )
+
+ def test_not_same_second(self):
+ self.assertNotEqual(
+ TestClassA(),
+ C('datetime')
+ )
+
+ def test_object_supplied(self):
+ self.assertEqual(
+ TestClassA(1),
+ C(TestClassA(1))
+ )
+
+ def test_class_and_vars(self):
+ self.assertEqual(
+ TestClassA(1),
+ C(TestClassA, {'args': (1, )})
+ )
+
+ def test_class_and_kw(self):
+ self.assertEqual(
+ TestClassA(1),
+ C(TestClassA, args=(1, ))
+ )
+
+ def test_class_and_vars_and_kw(self):
+ self.assertEqual(
+ AClass(1, 2),
+ C(AClass, {'x': 1}, y=2)
+ )
+
+ def test_object_and_vars(self):
+ # vars passed are used instead of the object's
+ self.assertEqual(
+ TestClassA(1),
+ C(TestClassA(), {'args': (1, )})
+ )
+
+ def test_object_and_kw(self):
+ # kws passed are used instead of the object's
+ self.assertEqual(
+ TestClassA(1),
+ C(TestClassA(), args=(1, ))
+ )
+
+ def test_object_not_strict(self):
+ # only attributes on comparison object
+ # are used
+ self.assertEqual(
+ C(AClass(1), strict=False),
+ AClass(1, 2),
+ )
+
+ def test_exception(self):
+ self.assertEqual(
+ ValueError('foo'),
+ C(ValueError('foo'))
+ )
+
+ def test_exception_class_and_args(self):
+ self.assertEqual(
+ ValueError('foo'),
+ C(ValueError, args=('foo', ))
+ )
+
+ def test_exception_instance_and_args(self):
+ self.assertEqual(
+ ValueError('foo'),
+ C(ValueError('bar'), args=('foo', ))
+ )
+
+ def test_exception_not_same(self):
+ self.assertNotEqual(
+ ValueError('foo'),
+ C(ValueError('bar'))
+ )
+
+ def test_exception_no_args_different(self):
+ self.assertNotEqual(
+ WeirdException(1, 2),
+ C(WeirdException(1, 3))
+ )
+
+ def test_exception_no_args_same(self):
+ self.assertEqual(
+ C(WeirdException(1, 2)),
+ WeirdException(1, 2)
+ )
+
+ def test_repr_file_different(self):
+ with TempDirectory() as d:
+ path = d.write('file', b'stuff')
+ f = open(path)
+ f.close()
+ if PY3:
+ c = C('io.TextIOWrapper', name=path, mode='r', closed=False,
+ strict=False)
+ self.assertNotEqual(f, c)
+ compare(repr(c),
+ "\n"
+ " <C(failed):_io.TextIOWrapper>\n"
+ " closed:False != True\n"
+ " </C>",
+ )
+ else:
+ c = C(file, name=path, mode='r', closed=False, strict=False)
+ self.assertNotEqual(f, c)
+ compare(repr(c),
+ "\n"
+ " <C(failed):__builtin__.file>\n"
+ " closed:False != True\n"
+ " </C>",
+ )
+
+ def test_file_same(self):
+ with TempDirectory() as d:
+ path = d.write('file', b'stuff')
+ f = open(path)
+ f.close()
+ if PY3:
+ self.assertEqual(
+ f,
+ C('io.TextIOWrapper', name=path, mode='r', closed=True,
+ strict=False)
+ )
+ else:
+ self.assertEqual(
+ f,
+ C(file, name=path, mode='r', closed=True, strict=False)
+ )
+
+ def test_no___dict___strict(self):
+ x = X()
+ try:
+ self.assertEqual(
+ C(X, x=1),
+ x
+ )
+ except TypeError as e:
+ self.assertEqual(e.args, (
+ '<X> does not support vars() so cannot do strict comparison',
+ ))
+ else:
+ self.fail('No exception raised!')
+
+ def test_no___dict___not_strict_same(self):
+ x = X()
+ x.x = 1
+ self.assertEqual(C(X, x=1, strict=False), x)
+
+ def test_no___dict___not_strict_different(self):
+ if py_34_plus:
+ expected = (
+ "\n <C(failed):testfixtures.tests.test_com[42 chars] </C>"
+ " != <X>",
+ )
+ else:
+ expected = (
+ "\n"
+ " <C(failed):testfixtures.tests.test_comparison.X>\n"
+ " x:1 != 2\n"
+ " y:2 not in other\n"
+ " </C> != <X>",
+ )
+ x = X()
+ x.x = 2
+ try:
+ self.assertEqual(
+ C(X, x=1, y=2, strict=False),
+ x
+ )
+ except AssertionError as e:
+ compare(e.args, expected)
+ else:
+ self.fail('No exception raised!')
+
+ def test_compared_object_defines_eq(self):
+ # If an object defines eq, such as Django instances,
+ # things become tricky
+
+ class Annoying:
+ def __init__(self):
+ self.eq_called = 0
+
+ def __eq__(self, other):
+ self.eq_called += 1
+ if isinstance(other, Annoying):
+ return True
+ return False
+
+ self.assertEqual(Annoying(), Annoying())
+
+ # Suddenly, order matters.
+
+ # This order is wrong, as it uses the class's __eq__:
+ self.assertFalse(Annoying() == C(Annoying))
+ if PY2:
+ # although this, which is subtly different, does not:
+ self.assertFalse(Annoying() != C(Annoying))
+ else:
+ # but on PY3 __eq__ is used as a fallback:
+ self.assertTrue(Annoying() != C(Annoying))
+
+ # This is the right ordering:
+ self.assertTrue(C(Annoying) == Annoying())
+ self.assertFalse(C(Annoying) != Annoying())
+
+ # When the ordering is right, you still get the useful
+ # comparison representation afterwards
+ c = C(Annoying, eq_called=1)
+ c == Annoying()
+ self.assertEqual(
+ repr(c),
+ '\n <C(failed):testfixtures.tests.test_comparison.Annoying>\n'
+ ' eq_called:1 != 0\n'
+ ' </C>')
+
+ def test_compared_object_class_attributes(self):
+
+ class Classy(object):
+ x = 1
+ y = 2
+
+ self.assertEqual(C(Classy, x=1, y=2), Classy())
+
+ c = C(Classy, x=1, y=1)
+ self.assertNotEqual(c, Classy())
+ self.assertEqual(
+ repr(c),
+ '\n <C(failed):testfixtures.tests.test_comparison.Classy>\n'
+ ' y:1 != 2\n'
+ ' </C>')
+
+ ce = C(Classy, x=1, y=1)
+ ca = Classy()
+ ca.y = 1
+ self.assertEqual(ce, ca)
+
+ ce = C(Classy, x=1, y=2)
+ ca = Classy()
+ ca.y = 1
+ self.assertNotEqual(ce, ca)
+ self.assertEqual(
+ repr(ce),
+ '\n <C(failed):testfixtures.tests.test_comparison.Classy>\n'
+ ' y:2 != 1\n'
+ ' </C>')
+
+ def test_importerror(self):
+ self.failIf(
+ C(ImportError('x')) != ImportError('x')
+ )
+
+ def test_class_defines_comparison_strictly(self):
+ self.assertEqual(
+ C('testfixtures.tests.test_comparison.FussyDefineComparison',
+ attr=1),
+ FussyDefineComparison(1)
+ )
+
+ def test_cant_resolve(self):
+ try:
+ C('testfixtures.bonkers')
+ except Exception as e:
+ self.failUnless(isinstance(e, AttributeError))
+ self.assertEqual(
+ e.args,
+ ("'testfixtures.bonkers' could not be resolved", )
+ )
+ else:
+ self.fail('No exception raised!')
+
+ def test_no_name(self):
+ class NoName(object):
+ pass
+ NoName.__name__ = ''
+ NoName.__module__ = ''
+ c = C(NoName)
+ if py_33_plus:
+ expected = "<C:<class '.TestC.test_no_name.<locals>.NoName'>>"
+ else:
+ expected = "<C:<class '.'>>"
+ self.assertEqual(repr(c), expected)
diff --git a/testfixtures/tests/test_components.py b/testfixtures/tests/test_components.py
new file mode 100644
index 0000000..3bdf0b5
--- /dev/null
+++ b/testfixtures/tests/test_components.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2011-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from nose.plugins.skip import SkipTest
+
+try:
+ from testfixtures.components import TestComponents
+except ImportError: # pragma: no cover
+ raise SkipTest('zope.component is not available')
+
+from mock import Mock, call
+from testfixtures import Replacer, compare
+from unittest import TestCase
+
+from warnings import catch_warnings
+
+
+class ComponentsTests(TestCase):
+
+ def test_atexit(self):
+ m = Mock()
+ with Replacer() as r:
+ r.replace('atexit.register', m.register)
+
+ c = TestComponents()
+
+ expected = [call.register(c.atexit)]
+
+ compare(expected, m.mock_calls)
+
+ with catch_warnings(record=True) as w:
+ c.atexit()
+ self.assertTrue(len(w), 1)
+ compare(str(w[0].message), ( # pragma: no branch
+ "TestComponents instances not uninstalled by shutdown!"
+ ))
+
+ c.uninstall()
+
+ compare(expected, m.mock_calls)
+
+ # check re-running has no ill effects
+ c.atexit()
+
+ compare(expected, m.mock_calls)
diff --git a/testfixtures/tests/test_date.py b/testfixtures/tests/test_date.py
new file mode 100644
index 0000000..078f45c
--- /dev/null
+++ b/testfixtures/tests/test_date.py
@@ -0,0 +1,246 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+from datetime import date as d
+from time import strptime
+from testfixtures import ShouldRaise, test_date, replace, compare
+from testfixtures.tests import sample1, sample2
+from unittest import TestCase
+
+
+class TestDate(TestCase):
+
+ # NB: Only the today method is currently stubbed out,
+ # if you need other methods, tests and patches
+ # greatfully received!
+
+ @replace('datetime.date', test_date())
+ def test_today(self):
+ from datetime import date
+ compare(date.today(), d(2001, 1, 1))
+ compare(date.today(), d(2001, 1, 2))
+ compare(date.today(), d(2001, 1, 4))
+
+ @replace('datetime.date', test_date(2001, 2, 3))
+ def test_today_supplied(self):
+ from datetime import date
+ compare(date.today(), d(2001, 2, 3))
+
+ @replace('datetime.date', test_date(year=2001, month=2, day=3))
+ def test_today_all_kw(self):
+ from datetime import date
+ compare(date.today(), d(2001, 2, 3))
+
+ @replace('datetime.date', test_date(None))
+ def test_today_sequence(self, t):
+ t.add(2002, 1, 1)
+ t.add(2002, 1, 2)
+ t.add(2002, 1, 3)
+ from datetime import date
+ compare(date.today(), d(2002, 1, 1))
+ compare(date.today(), d(2002, 1, 2))
+ compare(date.today(), d(2002, 1, 3))
+
+ @replace('datetime.date', test_date(None))
+ def test_today_requested_longer_than_supplied(self, t):
+ t.add(2002, 1, 1)
+ t.add(2002, 1, 2)
+ from datetime import date
+ compare(date.today(), d(2002, 1, 1))
+ compare(date.today(), d(2002, 1, 2))
+ compare(date.today(), d(2002, 1, 3))
+ compare(date.today(), d(2002, 1, 5))
+
+ @replace('datetime.date', test_date(None))
+ def test_add_date_supplied(self):
+ from datetime import date
+ date.add(d(2001, 1, 2))
+ date.add(date(2001, 1, 3))
+ compare(date.today(), d(2001, 1, 2))
+ compare(date.today(), d(2001, 1, 3))
+
+ @replace('datetime.date', test_date(strict=True))
+ def test_call(self, t):
+ compare(t(2002, 1, 2), d(2002, 1, 2))
+ from datetime import date
+ dt = date(2003, 2, 1)
+ self.failIf(dt.__class__ is d)
+ compare(dt, d(2003, 2, 1))
+
+ def test_gotcha_import(self):
+ # standard `replace` caveat, make sure you
+ # patch all revelent places where date
+ # has been imported:
+
+ @replace('datetime.date', test_date())
+ def test_something():
+ from datetime import date
+ compare(date.today(), d(2001, 1, 1))
+ compare(sample1.str_today_1(), '2001-01-02')
+
+ with ShouldRaise(AssertionError) as s:
+ test_something()
+ # This convoluted check is because we can't stub
+ # out the date, since we're testing stubbing out
+ # the date ;-)
+ j, dt1, j, dt2, j = s.raised.args[0].split("'")
+ # check we can parse the date
+ dt1 = strptime(dt1, '%Y-%m-%d')
+ # check the dt2 bit was as it should be
+ compare(dt2, '2001-01-02')
+
+ # What you need to do is replace the imported type:
+ @replace('testfixtures.tests.sample1.date', test_date())
+ def test_something():
+ compare(sample1.str_today_1(), '2001-01-01')
+
+ test_something()
+
+ def test_gotcha_import_and_obtain(self):
+ # Another gotcha is where people have locally obtained
+ # a class attributes, where the normal patching doesn't
+ # work:
+
+ @replace('testfixtures.tests.sample1.date', test_date())
+ def test_something():
+ compare(sample1.str_today_2(), '2001-01-01')
+
+ with ShouldRaise(AssertionError) as s:
+ test_something()
+ # This convoluted check is because we can't stub
+ # out the date, since we're testing stubbing out
+ # the date ;-)
+ j, dt1, j, dt2, j = s.raised.args[0].split("'")
+ # check we can parse the date
+ dt1 = strptime(dt1, '%Y-%m-%d')
+ # check the dt2 bit was as it should be
+ compare(dt2, '2001-01-01')
+
+ # What you need to do is replace the imported name:
+ @replace('testfixtures.tests.sample1.today', test_date().today)
+ def test_something():
+ compare(sample1.str_today_2(), '2001-01-01')
+
+ test_something()
+
+ # if you have an embedded `today` as above, *and* you need to supply
+ # a list of required dates, then it's often simplest just to
+ # do a manual try-finally with a replacer:
+ def test_import_and_obtain_with_lists(self):
+
+ t = test_date(None)
+ t.add(2002, 1, 1)
+ t.add(2002, 1, 2)
+
+ from testfixtures import Replacer
+ r = Replacer()
+ r.replace('testfixtures.tests.sample1.today', t.today)
+ try:
+ compare(sample1.str_today_2(), '2002-01-01')
+ compare(sample1.str_today_2(), '2002-01-02')
+ finally:
+ r.restore()
+
+ @replace('datetime.date', test_date())
+ def test_repr(self):
+ from datetime import date
+ compare(repr(date), "<class 'testfixtures.tdatetime.tdate'>")
+
+ @replace('datetime.date', test_date(delta=2))
+ def test_delta(self):
+ from datetime import date
+ compare(date.today(), d(2001, 1, 1))
+ compare(date.today(), d(2001, 1, 3))
+ compare(date.today(), d(2001, 1, 5))
+
+ @replace('datetime.date', test_date(delta_type='weeks'))
+ def test_delta_type(self):
+ from datetime import date
+ compare(date.today(), d(2001, 1, 1))
+ compare(date.today(), d(2001, 1, 8))
+ compare(date.today(), d(2001, 1, 22))
+
+ @replace('datetime.date', test_date(None))
+ def test_set(self):
+ from datetime import date
+ date.set(2001, 1, 2)
+ compare(date.today(), d(2001, 1, 2))
+ date.set(2002, 1, 1)
+ compare(date.today(), d(2002, 1, 1))
+ compare(date.today(), d(2002, 1, 3))
+
+ @replace('datetime.date', test_date(None))
+ def test_set_date_supplied(self):
+ from datetime import date
+ date.set(d(2001, 1, 2))
+ compare(date.today(), d(2001, 1, 2))
+ date.set(date(2001, 1, 3))
+ compare(date.today(), d(2001, 1, 3))
+
+ @replace('datetime.date', test_date(None))
+ def test_set_kw(self):
+ from datetime import date
+ date.set(year=2001, month=1, day=2)
+ compare(date.today(), d(2001, 1, 2))
+
+ @replace('datetime.date', test_date(None))
+ def test_add_kw(self, t):
+ t.add(year=2002, month=1, day=1)
+ from datetime import date
+ compare(date.today(), d(2002, 1, 1))
+
+ @replace('datetime.date', test_date(strict=True))
+ def test_isinstance_strict_true(self):
+ from datetime import date
+ to_check = []
+ to_check.append(date(1999, 1, 1))
+ to_check.append(date.today())
+ date.set(2001, 1, 2)
+ to_check.append(date.today())
+ date.add(2001, 1, 3)
+ to_check.append(date.today())
+ to_check.append(date.today())
+ date.set(date(2001, 1, 4))
+ to_check.append(date.today())
+ date.add(date(2001, 1, 5))
+ to_check.append(date.today())
+ to_check.append(date.today())
+ date.set(d(2001, 1, 4))
+ to_check.append(date.today())
+ date.add(d(2001, 1, 5))
+ to_check.append(date.today())
+ to_check.append(date.today())
+
+ for inst in to_check:
+ self.failUnless(isinstance(inst, date), inst)
+ self.failUnless(inst.__class__ is date, inst)
+ self.failUnless(isinstance(inst, d), inst)
+ self.failIf(inst.__class__ is d, inst)
+
+ @replace('datetime.date', test_date())
+ def test_isinstance_default(self):
+ from datetime import date
+ to_check = []
+ to_check.append(date(1999, 1, 1))
+ to_check.append(date.today())
+ date.set(2001, 1, 2)
+ to_check.append(date.today())
+ date.add(2001, 1, 3)
+ to_check.append(date.today())
+ to_check.append(date.today())
+ date.set(date(2001, 1, 4))
+ to_check.append(date.today())
+ date.add(date(2001, 1, 5))
+ to_check.append(date.today())
+ to_check.append(date.today())
+ date.set(d(2001, 1, 4))
+ to_check.append(date.today())
+ date.add(d(2001, 1, 5))
+ to_check.append(date.today())
+ to_check.append(date.today())
+
+ for inst in to_check:
+ self.failIf(isinstance(inst, date), inst)
+ self.failIf(inst.__class__ is date, inst)
+ self.failUnless(isinstance(inst, d), inst)
+ self.failUnless(inst.__class__ is d, inst)
diff --git a/testfixtures/tests/test_datetime.py b/testfixtures/tests/test_datetime.py
new file mode 100644
index 0000000..7f2340a
--- /dev/null
+++ b/testfixtures/tests/test_datetime.py
@@ -0,0 +1,356 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from datetime import date
+from datetime import datetime as d
+from datetime import timedelta
+from datetime import tzinfo
+from testfixtures import test_datetime, test_date
+from testfixtures import replace, Replacer, compare, ShouldRaise
+from testfixtures.tests import sample1
+from unittest import TestCase
+
+
+class TestTZInfo(tzinfo):
+
+ __test__ = False
+
+ def utcoffset(self, dt):
+ return timedelta(minutes=3) + self.dst(dt)
+
+ def dst(self, dt):
+ return timedelta(minutes=1)
+
+
+class TestTZ2Info(tzinfo):
+
+ __test__ = False
+
+ def utcoffset(self, dt):
+ return timedelta(minutes=5)
+
+ def dst(self, dt):
+ return timedelta(minutes=0)
+
+
+class TestDateTime(TestCase):
+
+ @replace('datetime.datetime', test_datetime())
+ def test_now(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2001, 1, 1, 0, 0, 0))
+ compare(datetime.now(), d(2001, 1, 1, 0, 0, 10))
+ compare(datetime.now(), d(2001, 1, 1, 0, 0, 30))
+
+ @replace('datetime.datetime', test_datetime())
+ def test_now_with_tz_supplied(self):
+ from datetime import datetime
+ info = TestTZInfo()
+ compare(datetime.now(info), d(2001, 1, 1, 0, 4, tzinfo=TestTZInfo()))
+
+ @replace('datetime.datetime', test_datetime(tzinfo=TestTZInfo()))
+ def test_now_with_tz_setup(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2001, 1, 1))
+
+ @replace('datetime.datetime', test_datetime(tzinfo=TestTZInfo()))
+ def test_now_with_tz_setup_and_supplied(self):
+ from datetime import datetime
+ info = TestTZ2Info()
+ compare(datetime.now(info), d(2001, 1, 1, 0, 1, tzinfo=info))
+
+ @replace('datetime.datetime', test_datetime(tzinfo=TestTZInfo()))
+ def test_now_with_tz_setup_and_same_supplied(self):
+ from datetime import datetime
+ info = TestTZInfo()
+ compare(datetime.now(info), d(2001, 1, 1, tzinfo=info))
+
+ @replace('datetime.datetime', test_datetime(2002, 1, 1, 1, 2, 3))
+ def test_now_supplied(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2002, 1, 1, 1, 2, 3))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_now_sequence(self, t):
+ t.add(2002, 1, 1, 1, 0, 0)
+ t.add(2002, 1, 1, 2, 0, 0)
+ t.add(2002, 1, 1, 3, 0, 0)
+ from datetime import datetime
+ compare(datetime.now(), d(2002, 1, 1, 1, 0, 0))
+ compare(datetime.now(), d(2002, 1, 1, 2, 0, 0))
+ compare(datetime.now(), d(2002, 1, 1, 3, 0, 0))
+
+ @replace('datetime.datetime', test_datetime())
+ def test_add_and_set(self, t):
+ t.add(2002, 1, 1, 1, 0, 0)
+ t.add(2002, 1, 1, 2, 0, 0)
+ t.set(2002, 1, 1, 3, 0, 0)
+ from datetime import datetime
+ compare(datetime.now(), d(2002, 1, 1, 3, 0, 0))
+ compare(datetime.now(), d(2002, 1, 1, 3, 0, 10))
+ compare(datetime.now(), d(2002, 1, 1, 3, 0, 30))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_add_datetime_supplied(self, t):
+ from datetime import datetime
+ t.add(d(2002, 1, 1, 1))
+ t.add(datetime(2002, 1, 1, 2))
+ compare(datetime.now(), d(2002, 1, 1, 1, 0, 0))
+ compare(datetime.now(), d(2002, 1, 1, 2, 0, 0))
+ with ShouldRaise(ValueError(
+ 'Cannot add datetime with tzinfo set'
+ )):
+ t.add(d(2001, 1, 1, tzinfo=TestTZInfo()))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_now_requested_longer_than_supplied(self, t):
+ t.add(2002, 1, 1, 1, 0, 0)
+ t.add(2002, 1, 1, 2, 0, 0)
+ from datetime import datetime
+ compare(datetime.now(), d(2002, 1, 1, 1, 0, 0))
+ compare(datetime.now(), d(2002, 1, 1, 2, 0, 0))
+ compare(datetime.now(), d(2002, 1, 1, 2, 0, 10))
+ compare(datetime.now(), d(2002, 1, 1, 2, 0, 30))
+
+ @replace('datetime.datetime', test_datetime(strict=True))
+ def test_call(self, t):
+ compare(t(2002, 1, 2, 3, 4, 5), d(2002, 1, 2, 3, 4, 5))
+ from datetime import datetime
+ dt = datetime(2001, 1, 1, 1, 0, 0)
+ self.failIf(dt.__class__ is d)
+ compare(dt, d(2001, 1, 1, 1, 0, 0))
+
+ def test_date_return_type(self):
+ with Replacer() as r:
+ r.replace('datetime.datetime', test_datetime())
+ from datetime import datetime
+ dt = datetime(2001, 1, 1, 1, 0, 0)
+ d = dt.date()
+ compare(d, date(2001, 1, 1))
+ self.failUnless(d.__class__ is date)
+
+ def test_date_return_type_picky(self):
+ # type checking is a bitch :-/
+ date_type = test_date(strict=True)
+ with Replacer() as r:
+ r.replace('datetime.datetime', test_datetime(date_type=date_type,
+ strict=True,
+ ))
+ from datetime import datetime
+ dt = datetime(2010, 8, 26, 14, 33, 13)
+ d = dt.date()
+ compare(d, date_type(2010, 8, 26))
+ self.failUnless(d.__class__ is date_type)
+
+ # if you have an embedded `now` as above, *and* you need to supply
+ # a list of required datetimes, then it's often simplest just to
+ # do a manual try-finally with a replacer:
+ def test_import_and_obtain_with_lists(self):
+
+ t = test_datetime(None)
+ t.add(2002, 1, 1, 1, 0, 0)
+ t.add(2002, 1, 1, 2, 0, 0)
+
+ from testfixtures import Replacer
+ r = Replacer()
+ r.replace('testfixtures.tests.sample1.now', t.now)
+ try:
+ compare(sample1.str_now_2(), '2002-01-01 01:00:00')
+ compare(sample1.str_now_2(), '2002-01-01 02:00:00')
+ finally:
+ r.restore()
+
+ @replace('datetime.datetime', test_datetime())
+ def test_repr(self):
+ from datetime import datetime
+ compare(repr(datetime), "<class 'testfixtures.tdatetime.tdatetime'>")
+
+ @replace('datetime.datetime', test_datetime(delta=1))
+ def test_delta(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2001, 1, 1, 0, 0, 0))
+ compare(datetime.now(), d(2001, 1, 1, 0, 0, 1))
+ compare(datetime.now(), d(2001, 1, 1, 0, 0, 2))
+
+ @replace('datetime.datetime', test_datetime(delta_type='minutes'))
+ def test_delta_type(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2001, 1, 1, 0, 0, 0))
+ compare(datetime.now(), d(2001, 1, 1, 0, 10, 0))
+ compare(datetime.now(), d(2001, 1, 1, 0, 30, 0))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_set(self):
+ from datetime import datetime
+ datetime.set(2001, 1, 1, 1, 0, 1)
+ compare(datetime.now(), d(2001, 1, 1, 1, 0, 1))
+ datetime.set(2002, 1, 1, 1, 0, 0)
+ compare(datetime.now(), d(2002, 1, 1, 1, 0, 0))
+ compare(datetime.now(), d(2002, 1, 1, 1, 0, 20))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_set_datetime_supplied(self, t):
+ from datetime import datetime
+ t.set(d(2002, 1, 1, 1))
+ compare(datetime.now(), d(2002, 1, 1, 1, 0, 0))
+ t.set(datetime(2002, 1, 1, 2))
+ compare(datetime.now(), d(2002, 1, 1, 2, 0, 0))
+ with ShouldRaise(ValueError(
+ 'Cannot set datetime with tzinfo set'
+ )):
+ t.set(d(2001, 1, 1, tzinfo=TestTZInfo()))
+
+ @replace('datetime.datetime', test_datetime(None, tzinfo=TestTZInfo()))
+ def test_set_tz_setup(self):
+ from datetime import datetime
+ datetime.set(year=2002, month=1, day=1)
+ compare(datetime.now(), d(2002, 1, 1))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_set_kw(self):
+ from datetime import datetime
+ datetime.set(year=2002, month=1, day=1)
+ compare(datetime.now(), d(2002, 1, 1))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_set_tzinfo_kw(self):
+ from datetime import datetime
+ with ShouldRaise(TypeError('Cannot set tzinfo on tdatetime')):
+ datetime.set(year=2002, month=1, day=1, tzinfo=TestTZInfo())
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_set_tzinfo_args(self):
+ from datetime import datetime
+ with ShouldRaise(TypeError('Cannot set tzinfo on tdatetime')):
+ datetime.set(2002, 1, 2, 3, 4, 5, 6, TestTZInfo())
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_add_kw(self, t):
+ from datetime import datetime
+ t.add(year=2002, day=1, month=1)
+ compare(datetime.now(), d(2002, 1, 1))
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_add_tzinfo_kw(self, t):
+ from datetime import datetime
+ with ShouldRaise(TypeError('Cannot add tzinfo to tdatetime')):
+ datetime.add(year=2002, month=1, day=1, tzinfo=TestTZInfo())
+
+ @replace('datetime.datetime', test_datetime(None))
+ def test_add_tzinfo_args(self, t):
+ from datetime import datetime
+ with ShouldRaise(TypeError('Cannot add tzinfo to tdatetime')):
+ datetime.add(2002, 1, 2, 3, 4, 5, 6, TestTZInfo())
+
+ @replace('datetime.datetime',
+ test_datetime(2001, 1, 2, 3, 4, 5, 6, TestTZInfo()))
+ def test_max_number_args(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2001, 1, 2, 3, 4, 5, 6))
+
+ @replace('datetime.datetime', test_datetime(2001, 1, 2))
+ def test_min_number_args(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2001, 1, 2))
+
+ @replace('datetime.datetime', test_datetime(
+ year=2001,
+ month=1,
+ day=2,
+ hour=3,
+ minute=4,
+ second=5,
+ microsecond=6,
+ tzinfo=TestTZInfo()
+ ))
+ def test_all_kw(self):
+ from datetime import datetime
+ compare(datetime.now(), d(2001, 1, 2, 3, 4, 5, 6))
+
+ @replace('datetime.datetime', test_datetime(2001, 1, 2))
+ def test_utc_now(self):
+ from datetime import datetime
+ compare(datetime.utcnow(), d(2001, 1, 2))
+
+ @replace('datetime.datetime',
+ test_datetime(2001, 1, 2, tzinfo=TestTZInfo()))
+ def test_utc_now_with_tz(self):
+ from datetime import datetime
+ compare(datetime.utcnow(), d(2001, 1, 1, 23, 56))
+
+ @replace('datetime.datetime', test_datetime(strict=True))
+ def test_isinstance_strict(self):
+ from datetime import datetime
+ to_check = []
+ to_check.append(datetime(1999, 1, 1))
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+ to_check.append(datetime.utcnow())
+ datetime.set(2001, 1, 1, 20)
+ to_check.append(datetime.now())
+ datetime.add(2001, 1, 1, 21)
+ to_check.append(datetime.now())
+ to_check.append(datetime.now())
+ datetime.set(datetime(2001, 1, 1, 22))
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+ datetime.add(datetime(2001, 1, 1, 23))
+ to_check.append(datetime.now())
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+ datetime.set(d(2001, 1, 1, 22))
+ to_check.append(datetime.now())
+ datetime.add(d(2001, 1, 1, 23))
+ to_check.append(datetime.now())
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+
+ for inst in to_check:
+ self.failUnless(isinstance(inst, datetime), inst)
+ self.failUnless(inst.__class__ is datetime, inst)
+ self.failUnless(isinstance(inst, d), inst)
+ self.failIf(inst.__class__ is d, inst)
+
+ @replace('datetime.datetime', test_datetime())
+ def test_isinstance_default(self):
+ from datetime import datetime
+ to_check = []
+ to_check.append(datetime(1999, 1, 1))
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+ to_check.append(datetime.utcnow())
+ datetime.set(2001, 1, 1, 20)
+ to_check.append(datetime.now())
+ datetime.add(2001, 1, 1, 21)
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+ datetime.set(datetime(2001, 1, 1, 22))
+ to_check.append(datetime.now())
+ datetime.add(datetime(2001, 1, 1, 23))
+ to_check.append(datetime.now())
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+ datetime.set(d(2001, 1, 1, 22))
+ to_check.append(datetime.now())
+ datetime.add(d(2001, 1, 1, 23))
+ to_check.append(datetime.now())
+ to_check.append(datetime.now())
+ to_check.append(datetime.now(TestTZInfo()))
+
+ for inst in to_check:
+ self.failIf(isinstance(inst, datetime), inst)
+ self.failIf(inst.__class__ is datetime, inst)
+ self.failUnless(isinstance(inst, d), inst)
+ self.failUnless(inst.__class__ is d, inst)
+
+ def test_subsecond_deltas(self):
+ datetime = test_datetime(delta=0.5)
+ compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 0))
+ compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 500000))
+ compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 1, 0))
+
+ def test_ms_delta(self):
+ datetime = test_datetime(delta=100, delta_type='microseconds')
+ compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 0))
+ compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 100))
+ compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 200))
diff --git a/testfixtures/tests/test_diff.py b/testfixtures/tests/test_diff.py
new file mode 100644
index 0000000..251fff4
--- /dev/null
+++ b/testfixtures/tests/test_diff.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2008-2012 Simplistix Ltd
+# See license.txt for license details.
+
+from unittest import TestCase, TestSuite, makeSuite
+
+from testfixtures import diff
+
+
+class TestDiff(TestCase):
+
+ def test_example(self):
+ actual = diff('''
+ line1
+ line2
+ line3
+ ''',
+ '''
+ line1
+ line changed
+ line3
+ ''')
+ expected = '''\
+--- first
++++ second
+@@ -1,5 +1,5 @@
+
+ line1
+- line2
++ line changed
+ line3
+ '''
+ self.assertEqual(
+ [line.strip() for line in expected.split("\n")],
+ [line.strip() for line in actual.split("\n")],
+ '\n%r\n!=\n%r' % (expected, actual)
+ )
+
+ def test_no_newlines(self):
+ actual = diff('x', 'y')
+ # no rhyme or reason as to which of these comes back :-/
+ try:
+ expected = '@@ -1 +1 @@\n-x\n+y'
+ self.assertEqual(
+ expected,
+ actual,
+ '\n%r\n!=\n%r' % (expected, actual)
+ )
+ except AssertionError: # pragma: no cover
+ expected = '--- first\n+++ second\n@@ -1 +1 @@\n-x\n+y'
+ self.assertEqual(
+ expected,
+ actual,
+ '\n%r\n!=\n%r' % (expected, actual)
+ )
diff --git a/testfixtures/tests/test_docs.py b/testfixtures/tests/test_docs.py
new file mode 100644
index 0000000..d3226ce
--- /dev/null
+++ b/testfixtures/tests/test_docs.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2009-2013 Simplistix Ltd
+#
+# See license.txt for more details.
+
+from doctest import REPORT_NDIFF, ELLIPSIS
+from glob import glob
+from manuel import doctest, capture
+from manuel.testing import TestSuite
+from nose.plugins.skip import SkipTest
+from os.path import dirname, join, pardir
+
+from . import compat
+
+tests = glob(join(join(dirname(__file__), pardir, pardir), 'docs', '*.txt'))
+
+if not tests:
+ # tox can't find docs and installing an sdist doesn't install the docs
+ raise SkipTest('No docs found to test') # pragma: no cover
+
+
+def test_suite():
+ m = doctest.Manuel(optionflags=REPORT_NDIFF | ELLIPSIS,
+ checker=compat.DocTestChecker())
+ m += compat.Manuel()
+ m += capture.Manuel()
+ return TestSuite(m, *tests)
diff --git a/testfixtures/tests/test_generator.py b/testfixtures/tests/test_generator.py
new file mode 100644
index 0000000..3fe07cb
--- /dev/null
+++ b/testfixtures/tests/test_generator.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2008 Simplistix Ltd
+# See license.txt for license details.
+
+from unittest import TestCase, TestSuite, makeSuite
+
+from testfixtures import generator
+from types import GeneratorType
+
+
+class TestG(TestCase):
+
+ def test_example(self):
+ g = generator(1, 2, 3)
+ self.failUnless(isinstance(g, GeneratorType))
+ self.assertEqual(tuple(g), (1, 2, 3))
+
+ def test_from_sequence(self):
+ s = (1, 2, 3)
+ g = generator(*s)
+ self.failUnless(isinstance(g, GeneratorType))
+ self.assertEqual(tuple(g), (1, 2, 3))
diff --git a/testfixtures/tests/test_log_capture.py b/testfixtures/tests/test_log_capture.py
new file mode 100644
index 0000000..28b4535
--- /dev/null
+++ b/testfixtures/tests/test_log_capture.py
@@ -0,0 +1,233 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from testfixtures import (
+ log_capture, compare, Comparison as C, LogCapture, ShouldRaise
+)
+from unittest import TestCase
+
+from logging import getLogger
+
+root = getLogger()
+one = getLogger('one')
+two = getLogger('two')
+child = getLogger('one.child')
+
+
+class TestLog_Capture(TestCase):
+
+ @log_capture('two', 'one.child')
+ @log_capture('one')
+ @log_capture()
+ def test_logging(self, l1, l2, l3):
+ # we can now log as normal
+ root.info('1')
+ one.info('2')
+ two.info('3')
+ child.info('4')
+ # and later check what was logged
+ l1.check(
+ ('root', 'INFO', '1'),
+ ('one', 'INFO', '2'),
+ ('two', 'INFO', '3'),
+ ('one.child', 'INFO', '4'),
+ )
+ l2.check(
+ ('one', 'INFO', '2'),
+ ('one.child', 'INFO', '4')
+ )
+ l3.check(
+ ('two', 'INFO', '3'),
+ ('one.child', 'INFO', '4')
+ )
+ # each logger also exposes the real
+ # log records should anything else be neeeded
+ compare(l3.records, [
+ C('logging.LogRecord'),
+ C('logging.LogRecord'),
+ ])
+
+ def test_uninstall_properly(self):
+ root = getLogger()
+ child = getLogger('child')
+ before_root = root.handlers[:]
+ before_child = child.handlers[:]
+ try:
+ old_root_level = root.level
+ root.setLevel(49)
+ old_child_level = child.level
+ child.setLevel(69)
+
+ @log_capture('child')
+ @log_capture()
+ def test_method(l1, l2):
+ root = getLogger()
+ root.info('1')
+ self.assertEqual(root.level, 1)
+ child = getLogger('child')
+ self.assertEqual(child.level, 1)
+ child.info('2')
+ l1.check(
+ ('root', 'INFO', '1'),
+ ('child', 'INFO', '2'),
+ )
+ l2.check(
+ ('child', 'INFO', '2'),
+ )
+
+ test_method()
+
+ self.assertEqual(root.level, 49)
+ self.assertEqual(child.level, 69)
+
+ self.assertEqual(root.handlers, before_root)
+ self.assertEqual(child.handlers, before_child)
+
+ finally:
+ root.setLevel(old_root_level)
+ child.setLevel(old_child_level)
+
+ @log_capture()
+ def test_decorator_returns_logcapture(self, l):
+ # check for what we get, so we only have to write
+ # tests in test_logcapture.py
+ self.failUnless(isinstance(l, LogCapture))
+
+ def test_remove_existing_handlers(self):
+ logger = getLogger()
+ # get original handlers
+ original = logger.handlers
+ try:
+ # put in a stub which will blow up if used
+ logger.handlers = start = [object()]
+
+ @log_capture()
+ def test_method(l):
+ logger.info('during')
+ l.check(('root', 'INFO', 'during'))
+
+ test_method()
+
+ compare(logger.handlers, start)
+
+ finally:
+ logger.handlers = original
+
+ def test_clear_global_state(self):
+ from logging import _handlers, _handlerList
+ capture = LogCapture()
+ capture.uninstall()
+ self.assertFalse(capture in _handlers)
+ self.assertFalse(capture in _handlerList)
+
+ def test_no_propogate(self):
+ logger = getLogger('child')
+ # paranoid check
+ compare(logger.propagate, True)
+
+ @log_capture('child', propagate=False)
+ def test_method(l):
+ logger.info('a log message')
+ l.check(('child', 'INFO', 'a log message'))
+
+ with LogCapture() as global_log:
+ test_method()
+
+ global_log.check()
+ compare(logger.propagate, True)
+
+ def test_different_attributes(self):
+ with LogCapture(attributes=('funcName', 'processName')) as log:
+ getLogger().info('oh hai')
+ log.check(
+ ('test_different_attributes', 'MainProcess')
+ )
+
+ def test_missing_attribute(self):
+ with LogCapture(attributes=('msg', 'lolwut')) as log:
+ getLogger().info('oh %s', 'hai')
+ log.check(
+ ('oh %s', None)
+ )
+
+ def test_single_attribute(self):
+ # one which isn't a string, to boot!
+ with LogCapture(attributes=['msg']) as log:
+ getLogger().info(dict(foo='bar', baz='bob'))
+ log.check(
+ dict(foo='bar', baz='bob'),
+ )
+
+ def test_callable_instead_of_attribute(self):
+ def extract_msg(record):
+ return {k: v for (k, v) in record.msg.items()
+ if k != 'baz'}
+ with LogCapture(attributes=extract_msg) as log:
+ getLogger().info(dict(foo='bar', baz='bob'))
+ log.check(
+ dict(foo='bar'),
+ )
+
+ def test_msg_is_none(self):
+ with LogCapture(attributes=('msg', 'foo')) as log:
+ getLogger().info(None, extra=dict(foo='bar'))
+ log.check(
+ (None, 'bar')
+ )
+
+ def test_normal_check(self):
+ with LogCapture() as log:
+ getLogger().info('oh hai')
+
+ with ShouldRaise(AssertionError) as s:
+ log.check(('root', 'INFO', 'oh noez'))
+
+ compare(str(s.raised), expected=(
+ "sequence not as expected:\n\n"
+ "same:\n"
+ "()\n\n"
+ "expected:\n"
+ "(('root', 'INFO', 'oh noez'),)\n\n"
+ "actual:\n"
+ "(('root', 'INFO', 'oh hai'),)"
+ ))
+
+ def test_recursive_check(self):
+
+ with LogCapture(recursive_check=True) as log:
+ getLogger().info('oh hai')
+
+ with ShouldRaise(AssertionError) as s:
+ log.check(('root', 'INFO', 'oh noez'))
+
+ compare(str(s.raised), expected=(
+ "sequence not as expected:\n\n"
+ "same:\n()\n\n"
+ "expected:\n(('root', 'INFO', 'oh noez'),)\n\n"
+ "actual:\n(('root', 'INFO', 'oh hai'),)\n\n"
+ "While comparing [0]: sequence not as expected:\n\n"
+ "same:\n('root', 'INFO')\n\n"
+ "expected:\n"
+ "('oh noez',)\n\n"
+ "actual:\n"
+ "('oh hai',)\n\n"
+ "While comparing [0][2]: 'oh noez' (expected) != 'oh hai' (actual)"
+ ))
+
+
+class BaseCaptureTest(TestCase):
+ a = 33
+
+ @log_capture()
+ def test_logs_if_a_smaller_than_44(self, logs):
+ logger = getLogger()
+ if self.a < 44:
+ logger.info('{} is smaller than 44'.format(self.a))
+
+ logs.check(
+ ('root', 'INFO', '{} is smaller than 44'.format(self.a)),
+ )
+
+
+class SubclassCaptureTest(BaseCaptureTest):
+ a = 2
diff --git a/testfixtures/tests/test_logcapture.py b/testfixtures/tests/test_logcapture.py
new file mode 100644
index 0000000..104d89f
--- /dev/null
+++ b/testfixtures/tests/test_logcapture.py
@@ -0,0 +1,384 @@
+from __future__ import print_function
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from doctest import DocTestSuite
+from mock import Mock
+from testfixtures import Replacer, LogCapture, compare
+from unittest import TestSuite, TestCase, makeSuite
+
+from logging import getLogger
+
+from warnings import catch_warnings
+
+root = getLogger()
+one = getLogger('one')
+two = getLogger('two')
+child = getLogger('one.child')
+
+
+class DemoLogCapture:
+
+ def test_simple(self): # pragma: no branch
+ """
+ >>> root.info('some logging')
+ >>> print(log_capture)
+ root INFO
+ some logging
+ >>> log_capture.clear()
+ >>> print(log_capture)
+ No logging captured
+ >>> root.info('some more logging')
+ >>> print(log_capture)
+ root INFO
+ some more logging
+ """
+
+
+class TestLogCapture:
+
+ def test_simple(self): # pragma: no branch
+ """
+ >>> root.info('before')
+ >>> l = LogCapture()
+ >>> root.info('during')
+ >>> l.uninstall()
+ >>> root.info('after')
+ >>> print(l)
+ root INFO
+ during
+ """
+
+ def test_specific_logger(self): # pragma: no branch
+ """
+ >>> l = LogCapture('one')
+ >>> root.info('1')
+ >>> one.info('2')
+ >>> two.info('3')
+ >>> child.info('4')
+ >>> l.uninstall()
+ >>> print(l)
+ one INFO
+ 2
+ one.child INFO
+ 4
+ """
+
+ def test_multiple_loggers(self): # pragma: no branch
+ """
+ >>> l = LogCapture(('one.child','two'))
+ >>> root.info('1')
+ >>> one.info('2')
+ >>> two.info('3')
+ >>> child.info('4')
+ >>> l.uninstall()
+ >>> print(l)
+ two INFO
+ 3
+ one.child INFO
+ 4
+ """
+
+ def test_simple_manual_install(self): # pragma: no branch
+ """
+ >>> l = LogCapture(install=False)
+ >>> root.info('before')
+ >>> l.install()
+ >>> root.info('during')
+ >>> l.uninstall()
+ >>> root.info('after')
+ >>> print(l)
+ root INFO
+ during
+ """
+
+ def test_uninstall(self): # pragma: no branch
+ """
+ Lets start off with a couple of loggers:
+
+ >>> root = getLogger()
+ >>> child = getLogger('child')
+
+ Lets also record the handlers for these loggers before
+ we start the test:
+
+ >>> before_root = root.handlers[:]
+ >>> before_child = child.handlers[:]
+
+ Lets also record the levels for the loggers:
+
+ >>> old_root_level=root.level
+ >>> old_child_level=child.level
+
+ Now the test:
+
+ >>> try:
+ ... root.setLevel(49)
+ ... child.setLevel(69)
+ ...
+ ... l1 = LogCapture()
+ ... l2 = LogCapture('child')
+ ...
+ ... root = getLogger()
+ ... root.info('1')
+ ... print('root level during test:',root.level)
+ ... child = getLogger('child')
+ ... print('child level during test:',child.level)
+ ... child.info('2')
+ ... print('l1 contents:')
+ ... print(l1)
+ ... print('l2 contents:')
+ ... print(l2)
+ ...
+ ... l2.uninstall()
+ ... l1.uninstall()
+ ...
+ ... print('root level after test:',root.level)
+ ... print('child level after test:',child.level)
+ ...
+ ... finally:
+ ... root.setLevel(old_root_level)
+ ... child.setLevel(old_child_level)
+ root level during test: 1
+ child level during test: 1
+ l1 contents:
+ root INFO
+ 1
+ child INFO
+ 2
+ l2 contents:
+ child INFO
+ 2
+ root level after test: 49
+ child level after test: 69
+
+ Now we check the handlers are as they were before
+ the test:
+
+ >>> root.handlers == before_root
+ True
+ >>> child.handlers == before_child
+ True
+ """
+
+ def test_uninstall_all(self): # pragma: no branch
+ """
+ For this test, it's better if we don't have any
+ LogCaptures around when we start:
+
+ >>> log_capture.uninstall()
+
+ If you create several LogCaptures during a doctest,
+ it can create clutter to uninstall them all.
+ If this is the case, use the classmethod
+ LogCapture.uninstall_all() as a tearDown function
+ to remove them all:
+
+ >>> before_handlers_root = root.handlers[:]
+ >>> before_handlers_child = child.handlers[:]
+
+ >>> l1 = LogCapture()
+ >>> l2 = LogCapture('one.child')
+
+ We can see that the LogCaptures have changed the
+ handlers, removing existing ones and installing
+ their own:
+
+ >>> len(root.handlers)
+ 1
+ >>> root.handlers==before_handlers_root
+ False
+ >>> len(child.handlers)
+ 1
+ >>> child.handlers==before_handlers_child
+ False
+
+ Now we show the function in action:
+
+ >>> LogCapture.uninstall_all()
+
+ ...and we can see the handlers are back as
+ they were beefore:
+
+ >>> before_handlers_root == root.handlers
+ True
+ >>> before_handlers_child == child.handlers
+ True
+ """
+
+ def test_two_logcaptures_on_same_logger(self): # pragma: no branch
+ """
+ If you create more than one LogCaptures on a single
+ logger, the 2nd one installed will stop the first
+ one working!
+
+ >>> l1 = LogCapture()
+ >>> root.info('1st message')
+ >>> print(l1)
+ root INFO
+ 1st message
+ >>> l2 = LogCapture()
+ >>> root.info('2nd message')
+
+ So, l1 missed this message:
+
+ >>> print(l1)
+ root INFO
+ 1st message
+
+ ...because l2 kicked it out and recorded the message:
+
+ >>> print(l2)
+ root INFO
+ 2nd message
+ """
+
+ def test_uninstall_more_than_once(self): # pragma: no branch
+ """
+ For this test, it's better if we don't have any
+ LogCaptures around when we start:
+
+ >>> log_capture.uninstall()
+
+ There's no problem with uninstalling a LogCapture
+ more than once:
+
+ >>> old_level = root.level
+ >>> try:
+ ... root.setLevel(49)
+ ...
+ ... l = LogCapture()
+ ...
+ ... print('root level during test:',root.level)
+ ...
+ ... l.uninstall()
+ ...
+ ... print('root level after uninstall:',root.level)
+ ...
+ ... root.setLevel(69)
+ ...
+ ... l.uninstall()
+ ...
+ ... print('root level after another uninstall:',root.level)
+ ...
+ ... finally:
+ ... root.setLevel(old_level)
+ root level during test: 1
+ root level after uninstall: 49
+ root level after another uninstall: 69
+
+ And even when loggers have been uninstalled, there's
+ no problem having uninstall_all as a backstop:
+
+ >>> log_capture.uninstall_all()
+ """
+
+ def test_with_statement(self): # pragma: no branch
+ """
+ >>> root.info('before')
+ >>> with LogCapture() as l:
+ ... root.info('during')
+ >>> root.info('after')
+ >>> print(l)
+ root INFO
+ during
+ """
+
+
+class LogCaptureTests(TestCase):
+
+ def test_remove_existing_handlers(self):
+ logger = getLogger()
+ # get original handlers
+ original_handlers = logger.handlers
+ # put in a stub which will blow up if used
+ try:
+ logger.handlers = start = [object()]
+
+ with LogCapture() as l:
+ logger.info('during') # pragma: no branch
+
+ l.check(('root', 'INFO', 'during'))
+
+ compare(logger.handlers, start)
+
+ finally:
+ # only executed if the test fails
+ logger.handlers = original_handlers
+
+ def test_atexit(self):
+ # http://bugs.python.org/issue25532
+ from mock import call
+
+ m = Mock()
+ with Replacer() as r:
+ # make sure the marker is false, other tests will
+ # probably have set it
+ r.replace('testfixtures.LogCapture.atexit_setup', False)
+ r.replace('atexit.register', m.register)
+
+ l = LogCapture()
+
+ expected = [call.register(l.atexit)]
+
+ compare(expected, m.mock_calls)
+
+ with catch_warnings(record=True) as w:
+ l.atexit()
+ self.assertTrue(len(w), 1)
+ compare(str(w[0].message), ( # pragma: no branch
+ "LogCapture instances not uninstalled by shutdown, "
+ "loggers captured:\n"
+ "(None,)"
+ ))
+
+ l.uninstall()
+
+ compare(set(), LogCapture.instances)
+
+ # check re-running has no ill effects
+ l.atexit()
+
+ def test_numeric_log_level(self):
+ with LogCapture() as log:
+ getLogger().log(42, 'running in the family')
+
+ log.check(('root', 'Level 42', 'running in the family'))
+
+ def test_enable_disabled_logger(self):
+ logger = getLogger('disabled')
+ logger.disabled = True
+ with LogCapture('disabled') as log:
+ logger.info('a log message')
+ log.check(('disabled', 'INFO', 'a log message'))
+ compare(logger.disabled, True)
+
+ def test_no_propogate(self):
+ logger = getLogger('child')
+ # paranoid check
+ compare(logger.propagate, True)
+ with LogCapture() as global_log:
+ with LogCapture('child', propagate=False) as child_log:
+ logger.info('a log message')
+ child_log.check(('child', 'INFO', 'a log message'))
+ global_log.check()
+ compare(logger.propagate, True)
+
+# using a set up and teardown function
+# gets rid of the need for the imports in
+# doc tests
+
+
+def setUp(test):
+ test.globs['log_capture'] = LogCapture()
+
+
+def tearDown(test):
+ test.globs['log_capture'].uninstall_all()
+
+
+def test_suite():
+ return TestSuite((
+ DocTestSuite(setUp=setUp, tearDown=tearDown),
+ makeSuite(LogCaptureTests),
+ ))
diff --git a/testfixtures/tests/test_manuel.py b/testfixtures/tests/test_manuel.py
new file mode 100644
index 0000000..0dad9cb
--- /dev/null
+++ b/testfixtures/tests/test_manuel.py
@@ -0,0 +1,235 @@
+# Copyright (c) 2010-2013 Simplistix Ltd
+#
+# See license.txt for more details.
+
+import re
+
+from manuel import Document, Region, RegionContainer, Manuel
+from mock import Mock
+from testfixtures import compare, Comparison as C, TempDirectory
+from testfixtures.manuel import Files, FileBlock, FileResult
+from unittest import TestCase
+
+
+class TestContainer(RegionContainer):
+ def __init__(self, attr, *blocks):
+ self.regions = []
+ for block in blocks:
+ region = Region(0, ' ')
+ setattr(region, attr, block)
+ self.regions.append(region)
+
+
+class TestManuel(TestCase):
+
+ def tearDown(self):
+ TempDirectory.cleanup_all()
+
+ def test_multiple_files(self):
+ d = Document("""
+
+.. topic:: file.txt
+ :class: write-file
+
+ line 1
+
+ line 2
+ line 3
+
+.. topic:: file2.txt
+ :class: read-file
+
+
+ line 4
+
+ line 5
+ line 6
+
+""")
+ d.parse_with(Files('td'))
+ compare([
+ None,
+ C(FileBlock,
+ path='file.txt',
+ content="line 1\n\nline 2\nline 3\n",
+ action='write'),
+ C(FileBlock,
+ path='file2.txt',
+ content='line 4\n\nline 5\nline 6\n',
+ action='read'),
+ ], [r.parsed for r in d])
+
+ def test_ignore_literal_blocking(self):
+ d = Document("""
+.. topic:: file.txt
+ :class: write-file
+
+ ::
+
+ line 1
+
+ line 2
+ line 3
+""")
+ d.parse_with(Files('td'))
+ compare([
+ None,
+ C(FileBlock,
+ path='file.txt',
+ content="line 1\n\nline 2\nline 3\n",
+ action='write'),
+ ], [r.parsed for r in d])
+
+ def test_file_followed_by_text(self):
+ d = Document("""
+
+.. topic:: file.txt
+ :class: write-file
+
+ .. code-block:: python
+
+ print "hello"
+ out = 'there'
+
+ foo = 'bar'
+
+This is just some normal text!
+""")
+ d.parse_with(Files('td'))
+ compare([
+ None,
+ C(FileBlock,
+ path='file.txt',
+ content='.. code-block:: python\n\nprint "hello"'
+ '\nout = \'there\'\n\nfoo = \'bar\'\n',
+ action='write'),
+ None,
+ ], [r.parsed for r in d])
+
+ def test_red_herring(self):
+ d = Document("""
+.. topic:: file.txt
+ :class: not-a-file
+
+ print "hello"
+ out = 'there'
+
+""")
+ d.parse_with(Files('td'))
+ compare([r.parsed for r in d], [None])
+
+ def test_no_class(self):
+ d = Document("""
+.. topic:: file.txt
+
+ print "hello"
+ out = 'there'
+
+""")
+ d.parse_with(Files('td'))
+ compare([r.parsed for r in d], [None])
+
+ def test_unclaimed_works(self):
+ # a test manuel
+ CLASS = re.compile(r'^\s+:class:', re.MULTILINE)
+
+ class Block(object):
+ def __init__(self, source):
+ self.source = source
+
+ def find_class_blocks(document):
+ for region in document.find_regions(CLASS):
+ region.parsed = Block(region.source)
+ document.claim_region(region)
+
+ def Test():
+ return Manuel(parsers=[find_class_blocks])
+
+ # now our test
+ d = Document("""
+
+.. topic:: something-else
+ :class: not-a-file
+ line 1
+ line 2
+ line 3
+
+""")
+ d.parse_with(Files('td')+Test())
+ # now check FileBlock didn't mask class block
+ compare([
+ None,
+ C(Block,
+ source=' :class:\n'),
+ None,
+ ], [r.parsed for r in d])
+
+ def test_evaluate_non_fileblock(self):
+ m = Mock()
+ d = TestContainer('parsed', m)
+ d.evaluate_with(Files('td'), globs={})
+ compare([None], [r.evaluated for r in d])
+ compare(m.call_args_list, [])
+ compare(m.method_calls, [])
+
+ def test_evaluate_read_same(self):
+ dir = TempDirectory()
+ dir.write('foo', b'content')
+ d = TestContainer('parsed', FileBlock('foo', 'content', 'read'))
+ d.evaluate_with(Files('td'), globs={'td': dir})
+ compare([C(FileResult,
+ passed=True,
+ expected=None,
+ actual=None)],
+ [r.evaluated for r in d])
+
+ def test_evaluate_read_difference(self):
+ dir = TempDirectory()
+ dir.write('foo', b'actual')
+ d = TestContainer('parsed', FileBlock('foo', 'expected', 'read'))
+ d.evaluate_with(Files('td'), globs={'td': dir})
+ compare([C(FileResult,
+ passed=False,
+ path='foo',
+ expected='expected',
+ actual='actual')],
+ [r.evaluated for r in d])
+
+ def test_evaulate_write(self):
+ dir = TempDirectory()
+ d = TestContainer('parsed', FileBlock('foo', 'content', 'write'))
+ d.evaluate_with(Files('td'), globs={'td': dir})
+ compare([C(FileResult,
+ passed=True,
+ expected=None,
+ actual=None)],
+ [r.evaluated for r in d])
+ dir.compare(['foo'])
+ compare(dir.read('foo', 'ascii'), 'content')
+
+ def test_formatter_non_fileblock(self):
+ d = TestContainer('evaluated', object)
+ d.format_with(Files('td'))
+ compare(d.formatted(), '')
+
+ def test_formatter_passed(self):
+ d = TestContainer('evaluated', FileResult())
+ d.format_with(Files('td'))
+ compare(d.formatted(), '')
+
+ def test_formatter_failed(self):
+ r = FileResult()
+ r.passed = False
+ r.path = '/foo/bar'
+ r.expected = 'same\nexpected\n'
+ r.actual = 'same\nactual\n'
+ d = TestContainer('evaluated', r)
+ d.format_with(Files('td'))
+ compare('--- File "<memory>", line 0:\n'
+ '+++ Reading from "/foo/bar":\n'
+ '@@ -1,3 +1,3 @@\n'
+ ' same\n'
+ '-expected\n'
+ '+actual\n ',
+ d.formatted()
+ )
diff --git a/testfixtures/tests/test_manuel_examples.py b/testfixtures/tests/test_manuel_examples.py
new file mode 100644
index 0000000..8164c18
--- /dev/null
+++ b/testfixtures/tests/test_manuel_examples.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2010-2012 Simplistix Ltd
+#
+# See license.txt for more details.
+from os.path import dirname
+path_to_your_docs = dirname(__file__)
+
+from glob import glob
+from manuel import doctest, capture
+from manuel.testing import TestSuite
+from os.path import join
+from testfixtures import TempDirectory
+from testfixtures.manuel import Files
+
+from . import compat
+
+
+def setUp(test):
+ test.globs['tempdir'] = TempDirectory()
+
+
+def tearDown(test):
+ test.globs['tempdir'].cleanup()
+
+
+def test_suite():
+ m = doctest.Manuel()
+ m += compat.Manuel()
+ m += capture.Manuel()
+ m += Files('tempdir')
+ return TestSuite(
+ m,
+ setUp=setUp,
+ tearDown=tearDown,
+ *glob(join(path_to_your_docs, '*.txt'))
+ )
diff --git a/testfixtures/tests/test_outputcapture.py b/testfixtures/tests/test_outputcapture.py
new file mode 100644
index 0000000..3683f64
--- /dev/null
+++ b/testfixtures/tests/test_outputcapture.py
@@ -0,0 +1,76 @@
+from __future__ import print_function
+# Copyright (c) 2010-2011 Simplistix Ltd
+# Copyright (c) 2015 Chris Withers
+# See license.txt for license details.
+
+from testfixtures import OutputCapture, compare
+from unittest import TestCase, TestSuite, makeSuite
+
+import sys
+
+
+class TestOutputCapture(TestCase):
+
+ def test_compare_strips(self):
+ with OutputCapture() as o:
+ print(' Bar! ')
+ o.compare('Bar!')
+
+ def test_stdout_and_stderr(self):
+ with OutputCapture() as o:
+ print('hello', file=sys.stdout)
+ print('out', file=sys.stderr)
+ print('there', file=sys.stdout)
+ print('now', file=sys.stderr)
+ o.compare("hello\nout\nthere\nnow\n")
+
+ def test_unicode(self):
+ with OutputCapture() as o:
+ print(u'\u65e5', file=sys.stdout)
+ o.compare(u'\u65e5\n')
+
+ def test_separate_capture(self):
+ with OutputCapture(separate=True) as o:
+ print('hello', file=sys.stdout)
+ print('out', file=sys.stderr)
+ print('there', file=sys.stdout)
+ print('now', file=sys.stderr)
+ o.compare(stdout="hello\nthere\n",
+ stderr="out\nnow\n")
+
+ def test_original_restore(self):
+ o_out, o_err = sys.stdout, sys.stderr
+ with OutputCapture() as o:
+ self.assertFalse(sys.stdout is o_out)
+ self.assertFalse(sys.stderr is o_err)
+ self.assertTrue(sys.stdout is o_out)
+ self.assertTrue(sys.stderr is o_err)
+
+ def test_double_disable(self):
+ o_out, o_err = sys.stdout, sys.stderr
+ with OutputCapture() as o:
+ self.assertFalse(sys.stdout is o_out)
+ self.assertFalse(sys.stderr is o_err)
+ o.disable()
+ self.assertTrue(sys.stdout is o_out)
+ self.assertTrue(sys.stderr is o_err)
+ o.disable()
+ self.assertTrue(sys.stdout is o_out)
+ self.assertTrue(sys.stderr is o_err)
+ self.assertTrue(sys.stdout is o_out)
+ self.assertTrue(sys.stderr is o_err)
+
+ def test_double_enable(self):
+ o_out, o_err = sys.stdout, sys.stderr
+ with OutputCapture() as o:
+ o.disable()
+ self.assertTrue(sys.stdout is o_out)
+ self.assertTrue(sys.stderr is o_err)
+ o.enable()
+ self.assertFalse(sys.stdout is o_out)
+ self.assertFalse(sys.stderr is o_err)
+ o.enable()
+ self.assertFalse(sys.stdout is o_out)
+ self.assertFalse(sys.stderr is o_err)
+ self.assertTrue(sys.stdout is o_out)
+ self.assertTrue(sys.stderr is o_err)
diff --git a/testfixtures/tests/test_popen.py b/testfixtures/tests/test_popen.py
new file mode 100644
index 0000000..9f24006
--- /dev/null
+++ b/testfixtures/tests/test_popen.py
@@ -0,0 +1,395 @@
+from subprocess import PIPE
+from unittest import TestCase
+
+from mock import call
+from testfixtures import ShouldRaise, compare
+
+from testfixtures.popen import MockPopen
+from testfixtures.compat import PY2
+
+import signal
+
+
+class Tests(TestCase):
+
+ def test_command_min_args(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE)
+ # process started, no return code
+ compare(process.pid, 1234)
+ compare(None, process.returncode)
+
+ out, err = process.communicate()
+
+ # test the rest
+ compare(out, b'')
+ compare(err, b'')
+ compare(process.returncode, 0)
+ # test call list
+ compare([
+ call.Popen('a command', stderr=-1, stdout=-1),
+ call.Popen_instance.communicate(),
+ ], Popen.mock.method_calls)
+
+ def test_command_max_args(self):
+
+ Popen = MockPopen()
+ Popen.set_command('a command', b'out', b'err', 1, 345)
+
+ process = Popen('a command', stdout=PIPE, stderr=PIPE)
+ compare(process.pid, 345)
+ compare(None, process.returncode)
+
+ out, err = process.communicate()
+
+ # test the rest
+ compare(out, b'out')
+ compare(err, b'err')
+ compare(process.returncode, 1)
+ # test call list
+ compare([
+ call.Popen('a command', stderr=-1, stdout=-1),
+ call.Popen_instance.communicate(),
+ ], Popen.mock.method_calls)
+
+ def test_command_is_sequence(self):
+ Popen = MockPopen()
+ Popen.set_command('a command')
+
+ process = Popen(['a', 'command'], stdout=PIPE, stderr=PIPE)
+
+ compare(process.wait(), 0)
+ compare([
+ call.Popen(['a', 'command'], stderr=-1, stdout=-1),
+ call.Popen_instance.wait(),
+ ], Popen.mock.method_calls)
+
+ def test_communicate_with_input(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ out, err = process.communicate('foo')
+ # test call list
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.communicate('foo'),
+ ], Popen.mock.method_calls)
+
+ def test_read_from_stdout(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', stdout=b'foo')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ self.assertTrue(isinstance(process.stdout.fileno(), int))
+ compare(process.stdout.read(), b'foo')
+ # test call list
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ ], Popen.mock.method_calls)
+
+ def test_read_from_stderr(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', stderr=b'foo')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ self.assertTrue(isinstance(process.stdout.fileno(), int))
+ compare(process.stderr.read(), b'foo')
+ # test call list
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ ], Popen.mock.method_calls)
+
+ def test_read_from_stdout_and_stderr(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ compare(process.stdout.read(), b'foo')
+ compare(process.stderr.read(), b'bar')
+ # test call list
+ compare([
+ call.Popen('a command', shell=True, stderr=PIPE, stdout=PIPE),
+ ], Popen.mock.method_calls)
+
+ def test_wait_and_return_code(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', returncode=3)
+ # usage
+ process = Popen('a command')
+ compare(process.returncode, None)
+ # result checking
+ compare(process.wait(), 3)
+ compare(process.returncode, 3)
+ # test call list
+ compare([
+ call.Popen('a command'),
+ call.Popen_instance.wait(),
+ ], Popen.mock.method_calls)
+
+ def test_multiple_uses(self):
+ Popen = MockPopen()
+ Popen.set_command('a command', b'a')
+ Popen.set_command('b command', b'b')
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ out, err = process.communicate('foo')
+ compare(out, b'a')
+ process = Popen(['b', 'command'], stdout=PIPE, stderr=PIPE, shell=True)
+ out, err = process.communicate('foo')
+ compare(out, b'b')
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.communicate('foo'),
+ call.Popen(['b', 'command'], shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.communicate('foo'),
+ ], Popen.mock.method_calls)
+
+ def test_send_signal(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ process.send_signal(0)
+ # result checking
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.send_signal(0),
+ ], Popen.mock.method_calls)
+
+ def test_terminate(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ process.terminate()
+ # result checking
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.terminate(),
+ ], Popen.mock.method_calls)
+
+ def test_kill(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ process.kill()
+ # result checking
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.kill(),
+ ], Popen.mock.method_calls)
+
+ def test_all_signals(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command')
+ process.send_signal(signal.SIGINT)
+ process.terminate()
+ process.kill()
+ # test call list
+ compare([
+ call.Popen('a command'),
+ call.Popen_instance.send_signal(signal.SIGINT),
+ call.Popen_instance.terminate(),
+ call.Popen_instance.kill(),
+ ], Popen.mock.method_calls)
+
+ def test_poll_no_setup(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ compare(process.poll(), None)
+ compare(process.poll(), None)
+ compare(process.wait(), 0)
+ compare(process.poll(), 0)
+ # result checking
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.poll(),
+ call.Popen_instance.poll(),
+ call.Popen_instance.wait(),
+ call.Popen_instance.poll(),
+ ], Popen.mock.method_calls)
+
+ def test_poll_setup(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', poll_count=1)
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ compare(process.poll(), None)
+ compare(process.poll(), 0)
+ compare(process.wait(), 0)
+ compare(process.poll(), 0)
+ # result checking
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.poll(),
+ call.Popen_instance.poll(),
+ call.Popen_instance.wait(),
+ call.Popen_instance.poll(),
+ ], Popen.mock.method_calls)
+
+ def test_poll_until_result(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', returncode=3, poll_count=2)
+ # example usage
+ process = Popen('a command')
+ while process.poll() is None:
+ # you'd probably have a sleep here, or go off and
+ # do some other work.
+ pass
+ # result checking
+ compare(process.returncode, 3)
+ compare([
+ call.Popen('a command'),
+ call.Popen_instance.poll(),
+ call.Popen_instance.poll(),
+ call.Popen_instance.poll(),
+ ], Popen.mock.method_calls)
+
+ def test_command_not_specified(self):
+ Popen = MockPopen()
+ with ShouldRaise(KeyError(
+ "Nothing specified for command 'a command'"
+ )):
+ Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+
+ def test_default_command_min_args(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_default()
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE)
+ # process started, no return code
+ compare(process.pid, 1234)
+ compare(None, process.returncode)
+
+ out, err = process.communicate()
+
+ # test the rest
+ compare(out, b'')
+ compare(err, b'')
+ compare(process.returncode, 0)
+ # test call list
+ compare([
+ call.Popen('a command', stderr=-1, stdout=-1),
+ call.Popen_instance.communicate(),
+ ], Popen.mock.method_calls)
+
+ def test_default_command_max_args(self):
+ Popen = MockPopen()
+ Popen.set_default(b'out', b'err', 1, 345)
+
+ process = Popen('a command', stdout=PIPE, stderr=PIPE)
+ compare(process.pid, 345)
+ compare(None, process.returncode)
+
+ out, err = process.communicate()
+
+ # test the rest
+ compare(out, b'out')
+ compare(err, b'err')
+ compare(process.returncode, 1)
+ # test call list
+ compare([
+ call.Popen('a command', stderr=-1, stdout=-1),
+ call.Popen_instance.communicate(),
+ ], Popen.mock.method_calls)
+
+ def test_invalid_parameters(self):
+ Popen = MockPopen()
+ with ShouldRaise(TypeError(
+ "Popen() got an unexpected keyword argument 'foo'"
+ )):
+ Popen(foo='bar')
+
+ def test_invalid_method_or_attr(self):
+ Popen = MockPopen()
+ Popen.set_command('command')
+ process = Popen('command')
+ with ShouldRaise(
+ AttributeError("Mock object has no attribute 'foo'")):
+ process.foo()
+
+ def test_invalid_attribute(self):
+ Popen = MockPopen()
+ Popen.set_command('command')
+ process = Popen('command')
+ with ShouldRaise(AttributeError("Mock object has no attribute 'foo'")):
+ process.foo
+
+ def test_invalid_communicate_call(self):
+ Popen = MockPopen()
+ Popen.set_command('bar')
+ process = Popen('bar')
+ with ShouldRaise(TypeError(
+ "communicate() got an unexpected keyword argument 'foo'"
+ )):
+ process.communicate(foo='bar')
+
+ def test_invalid_wait_call(self):
+ Popen = MockPopen()
+ Popen.set_command('bar')
+ process = Popen('bar')
+ with ShouldRaise(TypeError(
+ "wait() got an unexpected keyword argument 'foo'"
+ )):
+ process.wait(foo='bar')
+
+ def test_invalid_send_signal(self):
+ Popen = MockPopen()
+ Popen.set_command('bar')
+ process = Popen('bar')
+ with ShouldRaise(TypeError(
+ "send_signal() got an unexpected keyword argument 'foo'"
+ )):
+ process.send_signal(foo='bar')
+
+ def test_invalid_terminate(self):
+ Popen = MockPopen()
+ Popen.set_command('bar')
+ process = Popen('bar')
+ with ShouldRaise(TypeError(
+ "terminate() got an unexpected keyword argument 'foo'"
+ )):
+ process.terminate(foo='bar')
+
+ def test_invalid_kill(self):
+ Popen = MockPopen()
+ Popen.set_command('bar')
+ process = Popen('bar')
+ if PY2:
+ text = 'kill() takes exactly 1 argument (2 given)'
+ else:
+ text = 'kill() takes 1 positional argument but 2 were given'
+ with ShouldRaise(TypeError(text)):
+ process.kill('moo')
+
+ def test_invalid_poll(self):
+ Popen = MockPopen()
+ Popen.set_command('bar')
+ process = Popen('bar')
+ if PY2:
+ text = 'poll() takes exactly 1 argument (2 given)'
+ else:
+ text = 'poll() takes 1 positional argument but 2 were given'
+ with ShouldRaise(TypeError(text)):
+ process.poll('moo')
diff --git a/testfixtures/tests/test_popen_docs.py b/testfixtures/tests/test_popen_docs.py
new file mode 100644
index 0000000..e017931
--- /dev/null
+++ b/testfixtures/tests/test_popen_docs.py
@@ -0,0 +1,140 @@
+# NB: This file is used in the documentation, if you make changes, ensure
+# you update the line numbers in popen.txt!
+
+from subprocess import Popen, PIPE
+
+
+def my_func():
+ process = Popen('svn ls -R foo', stdout=PIPE, stderr=PIPE, shell=True)
+ out, err = process.communicate()
+ if process.returncode:
+ raise RuntimeError('something bad happened')
+ return out
+
+dotted_path = 'testfixtures.tests.test_popen_docs.Popen'
+
+from unittest import TestCase
+
+from mock import call
+from testfixtures import Replacer, ShouldRaise, compare
+from testfixtures.popen import MockPopen
+
+
+class TestMyFunc(TestCase):
+
+ def setUp(self):
+ self.Popen = MockPopen()
+ self.r = Replacer()
+ self.r.replace(dotted_path, self.Popen)
+ self.addCleanup(self.r.restore)
+
+ def test_example(self):
+ # set up
+ self.Popen.set_command('svn ls -R foo', stdout=b'o', stderr=b'e')
+
+ # testing of results
+ compare(my_func(), b'o')
+
+ # testing calls were in the right order and with the correct parameters:
+ compare([
+ call.Popen('svn ls -R foo',
+ shell=True, stderr=PIPE, stdout=PIPE),
+ call.Popen_instance.communicate()
+ ], Popen.mock.method_calls)
+
+ def test_example_bad_returncode(self):
+ # set up
+ Popen.set_command('svn ls -R foo', stdout=b'o', stderr=b'e',
+ returncode=1)
+
+ # testing of error
+ with ShouldRaise(RuntimeError('something bad happened')):
+ my_func()
+
+ def test_communicate_with_input(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ out, err = process.communicate('foo')
+ # test call list
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.communicate('foo'),
+ ], Popen.mock.method_calls)
+
+ def test_read_from_stdout_and_stderr(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ compare(process.stdout.read(), b'foo')
+ compare(process.stderr.read(), b'bar')
+ # test call list
+ compare([
+ call.Popen('a command', shell=True, stderr=PIPE, stdout=PIPE),
+ ], Popen.mock.method_calls)
+
+ def test_wait_and_return_code(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', returncode=3)
+ # usage
+ process = Popen('a command')
+ compare(process.returncode, None)
+ # result checking
+ compare(process.wait(), 3)
+ compare(process.returncode, 3)
+ # test call list
+ compare([
+ call.Popen('a command'),
+ call.Popen_instance.wait(),
+ ], Popen.mock.method_calls)
+
+ def test_send_signal(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command')
+ # usage
+ process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
+ process.send_signal(0)
+ # result checking
+ compare([
+ call.Popen('a command', shell=True, stderr=-1, stdout=-1),
+ call.Popen_instance.send_signal(0),
+ ], Popen.mock.method_calls)
+
+ def test_poll_until_result(self):
+ # setup
+ Popen = MockPopen()
+ Popen.set_command('a command', returncode=3, poll_count=2)
+ # example usage
+ process = Popen('a command')
+ while process.poll() is None:
+ # you'd probably have a sleep here, or go off and
+ # do some other work.
+ pass
+ # result checking
+ compare(process.returncode, 3)
+ compare([
+ call.Popen('a command'),
+ call.Popen_instance.poll(),
+ call.Popen_instance.poll(),
+ call.Popen_instance.poll(),
+ ], Popen.mock.method_calls)
+
+ def test_default_behaviour(self):
+ # set up
+ self.Popen.set_default(stdout=b'o', stderr=b'e')
+
+ # testing of results
+ compare(my_func(), b'o')
+
+ # testing calls were in the right order and with the correct parameters:
+ compare([
+ call.Popen('svn ls -R foo',
+ shell=True, stderr=PIPE, stdout=PIPE),
+ call.Popen_instance.communicate()
+ ], Popen.mock.method_calls)
diff --git a/testfixtures/tests/test_rangecomparison.py b/testfixtures/tests/test_rangecomparison.py
new file mode 100644
index 0000000..74137e5
--- /dev/null
+++ b/testfixtures/tests/test_rangecomparison.py
@@ -0,0 +1,189 @@
+from decimal import Decimal
+from testfixtures import RangeComparison as R, ShouldRaise, compare
+from unittest import TestCase
+
+from ..compat import PY2, PY3
+
+
+class Tests(TestCase):
+
+ def test_equal_yes_rhs(self):
+ self.assertTrue(5 == R(2, 5))
+
+ def test_equal_yes_lhs(self):
+ self.assertTrue(R(2, 5) == 2)
+
+ def test_equal_no_rhs(self):
+ self.assertFalse(5 == R(2, 4))
+
+ def test_equal_no_lhs(self):
+ self.assertFalse(R(2, 3) == 5)
+
+ def test_not_equal_yes_rhs(self):
+ self.assertTrue(5 != R(2, 2))
+
+ def test_not_equal_yes_lhs(self):
+ self.assertTrue(R(2, 4) != 1)
+
+ def test_not_equal_no_rhs(self):
+ self.assertFalse(5 != R(-10, 10))
+
+ def test_not_equal_no_lhs(self):
+ self.assertFalse(R(2, 5) != 2)
+
+ def test_equal_in_sequence_rhs(self):
+ self.assertEqual((1, 2, 5),
+ (1, 2, R(2, 5)))
+
+ def test_equal_in_sequence_lhs(self):
+ self.assertEqual((1, 2, R(2, 5)),
+ (1, 2, 5))
+
+ def test_not_equal_in_sequence_rhs(self):
+ self.assertNotEqual((1, 2, 5),
+ (1, 2, R(2, 4)))
+
+ def test_not_equal_in_sequence_lhs(self):
+ self.assertNotEqual((1, 2, R(2, 4)),
+ (1, 2, 5))
+
+ def test_not_numeric_rhs(self):
+ if PY2:
+ self.assertFalse('abc' == R(2, 5))
+ self.assertFalse({} == R(2, 5))
+ self.assertFalse([] == R(2, 5))
+ else:
+ with ShouldRaise(TypeError):
+ 'abc' == R(2, 5)
+ with ShouldRaise(TypeError):
+ {} == R(2, 5)
+ with ShouldRaise(TypeError):
+ [] == R(2, 5)
+
+ def test_not_numeric_lhs(self):
+ if PY2:
+ self.assertFalse(R(2, 5) == 'abc')
+ self.assertFalse(R(2, 5) == {})
+ self.assertFalse(R(2, 5) == [])
+ else:
+ with ShouldRaise(TypeError):
+ R(2, 5) == 'abc'
+ with ShouldRaise(TypeError):
+ R(2, 5) == {}
+ with ShouldRaise(TypeError):
+ R(2, 5) == []
+
+ def test_repr(self):
+ compare('<Range: [2, 5]>',
+ repr(R(2, 5)))
+
+ def test_str(self):
+ compare('<Range: [2, 5]>',
+ str(R(2, 5)))
+
+ def test_str_negative(self):
+ if PY3:
+ expected = '<Range: [2, 5]>'
+ else:
+ expected = '<Range: [2, 5]>'
+ compare(expected, repr(R(2, 5)))
+
+ def test_equal_yes_decimal_lhs(self):
+ self.assertTrue(R(2, 5) == Decimal(3))
+
+ def test_equal_yes_decimal_rhs(self):
+ self.assertTrue(Decimal(3) == R(2, 5))
+
+ def test_equal_no_decimal_lhs(self):
+ self.assertFalse(R(2, 5) == Decimal(1.0))
+
+ def test_equal_no_decimal_rhs(self):
+ self.assertFalse(Decimal(1.0) == R(2, 5))
+
+ def test_equal_yes_float_lhs(self):
+ self.assertTrue(R(2, 5) == 3.0)
+
+ def test_equal_yes_float_rhs(self):
+ self.assertTrue(3.0 == R(2, 5))
+
+ def test_equal_no_float_lhs(self):
+ self.assertFalse(R(2, 5) == 1.0)
+
+ def test_equal_no_float_rhs(self):
+ self.assertFalse(1.0 == R(2, 5))
+
+ def test_equal_yes_decimal_in_range_lhs(self):
+ self.assertTrue(R(Decimal(1), 5) == 3)
+ self.assertTrue(R(1, Decimal(5)) == 3)
+ self.assertTrue(R(Decimal(1), Decimal(5)) == 3)
+
+ def test_equal_yes_decimal_in_range_rhs(self):
+ self.assertTrue(3 == R(Decimal(1), 5))
+ self.assertTrue(3 == R(1, Decimal(5)))
+ self.assertTrue(3 == R(Decimal(1), Decimal(5)))
+
+ def test_equal_no_decimal_in_range_lhs(self):
+ self.assertFalse(R(Decimal(1), 5) == 6)
+ self.assertFalse(R(1, Decimal(5)) == 6)
+ self.assertFalse(R(Decimal(1), Decimal(5)) == 6)
+
+ def test_equal_no_decimal_in_range_rhs(self):
+ self.assertFalse(6 == R(Decimal(1), 5))
+ self.assertFalse(6 == R(1, Decimal(5)))
+ self.assertFalse(6 == R(Decimal(1), Decimal(5)))
+
+ def test_equal_yes_float_in_range_lhs(self):
+ self.assertTrue(R(1.0, 5) == 3)
+ self.assertTrue(R(1, 5.0) == 3)
+ self.assertTrue(R(1.0, 5.0) == 3)
+
+ def test_equal_yes_float_in_range_rhs(self):
+ self.assertTrue(3 == R(1.0, 5))
+ self.assertTrue(3 == R(1, 5.0))
+ self.assertTrue(3 == R(1.0, 5.0))
+
+ def test_equal_no_float_in_range_lhs(self):
+ self.assertFalse(R(1.0, 5) == 6)
+ self.assertFalse(R(1, 5.0) == 6)
+ self.assertFalse(R(1.0, 5.0) == 6)
+
+ def test_equal_no_float_in_range_rhs(self):
+ self.assertFalse(6 == R(1.0, 5))
+ self.assertFalse(6 == R(1, 5.0))
+ self.assertFalse(6 == R(1.0, 5.0))
+
+ def test_equal_yes_negative_lhs(self):
+ self.assertTrue(R(-5, 5) == -3)
+ self.assertTrue(R(-10, -5) == -7)
+
+ def test_equal_yes_negative_rhs(self):
+ self.assertTrue(-2 == R(-5, 5))
+ self.assertTrue(-7 == R(-10, -5))
+
+ def test_equal_no_negative_lhs(self):
+ self.assertFalse(R(-5, 5) == -10)
+ self.assertFalse(R(-10, -5) == -3)
+
+ def test_equal_no_negative_rhs(self):
+ self.assertFalse(-10 == R(-5, 5))
+ self.assertFalse(-30 == R(-10, -5))
+
+ def test_equal_yes_no_range_lhs(self):
+ self.assertTrue(R(0, 0) == 0)
+ self.assertTrue(R(2, 2) == 2)
+ self.assertTrue(R(-1, -1) == -1)
+
+ def test_equal_yes_no_range_rhs(self):
+ self.assertTrue(0 == R(0, 0))
+ self.assertTrue(2 == R(2, 2))
+ self.assertTrue(-1 == R(-1, -1))
+
+ def test_equal_no_no_range_lhs(self):
+ self.assertFalse(R(0, 0) == 1)
+ self.assertFalse(R(2, 2) == 1)
+ self.assertFalse(R(-1, -1) == 11)
+
+ def test_equal_no_no_range_rhs(self):
+ self.assertFalse(1 == R(0, 0))
+ self.assertFalse(1 == R(2, 2))
+ self.assertFalse(1 == R(-1, -1)) \ No newline at end of file
diff --git a/testfixtures/tests/test_replace.py b/testfixtures/tests/test_replace.py
new file mode 100644
index 0000000..98e2dbe
--- /dev/null
+++ b/testfixtures/tests/test_replace.py
@@ -0,0 +1,455 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+from mock import Mock
+
+from testfixtures import (
+ Replacer,
+ Replace,
+ ShouldRaise,
+ TempDirectory,
+ replace,
+ compare,
+ not_there,
+ )
+from unittest import TestCase
+
+import os
+
+from testfixtures.tests import sample1
+from testfixtures.tests import sample2
+from ..compat import PY3
+
+from warnings import catch_warnings
+
+
+class TestReplace(TestCase):
+
+ def test_function(self):
+
+ def test_z():
+ return 'replacement z'
+
+ compare(sample1.z(), 'original z')
+
+ @replace('testfixtures.tests.sample1.z', test_z)
+ def test_something():
+ compare(sample1.z(), 'replacement z')
+
+ compare(sample1.z(), 'original z')
+
+ test_something()
+
+ compare(sample1.z(), 'original z')
+
+ def test_class(self):
+
+ OriginalX = sample1.X
+
+ class ReplacementX(sample1.X):
+ pass
+
+ self.failIf(OriginalX is ReplacementX)
+ self.failUnless(isinstance(sample1.X(), OriginalX))
+
+ @replace('testfixtures.tests.sample1.X', ReplacementX)
+ def test_something():
+ self.failIf(OriginalX is ReplacementX)
+ self.failUnless(isinstance(sample1.X(), ReplacementX))
+
+ self.failIf(OriginalX is ReplacementX)
+ self.failUnless(isinstance(sample1.X(), OriginalX))
+
+ test_something()
+
+ self.failIf(OriginalX is ReplacementX)
+ self.failUnless(isinstance(sample1.X(), OriginalX))
+
+ def test_method(self):
+
+ def test_y(self):
+ return self
+
+ compare(sample1.X().y(), 'original y')
+
+ @replace('testfixtures.tests.sample1.X.y', test_y)
+ def test_something():
+ self.failUnless(isinstance(sample1.X().y(), sample1.X))
+
+ compare(sample1.X().y(), 'original y')
+
+ test_something()
+
+ compare(sample1.X().y(), 'original y')
+
+ def test_class_method(self):
+
+ def rMethod(cls):
+ return (cls, 1)
+
+ compare(sample1.X().aMethod(), sample1.X)
+
+ @replace('testfixtures.tests.sample1.X.aMethod', rMethod)
+ def test_something(r):
+ compare(r, rMethod)
+ compare(sample1.X().aMethod(), (sample1.X, 1))
+
+ compare(sample1.X().aMethod(), sample1.X)
+
+ test_something()
+
+ compare(sample1.X().aMethod(), sample1.X)
+
+ def test_multiple_replace(self):
+
+ def test_y(self):
+ return 'test y'
+
+ def test_z():
+ return 'test z'
+
+ compare(sample1.z(), 'original z')
+ compare(sample1.X().y(), 'original y')
+
+ @replace('testfixtures.tests.sample1.z', test_z)
+ @replace('testfixtures.tests.sample1.X.y', test_y)
+ def test_something(passed_test_y, passed_test_z):
+ compare(test_z, passed_test_z)
+ compare(test_y, passed_test_y)
+ compare(sample1.z(), 'test z')
+ compare(sample1.X().y(), 'test y')
+
+ compare(sample1.z(), 'original z')
+ compare(sample1.X().y(), 'original y')
+
+ test_something()
+
+ compare(sample1.z(), 'original z')
+ compare(sample1.X().y(), 'original y')
+
+ def test_gotcha(self):
+ # Just because you replace an object in one context,
+ # doesn't meant that it's replaced in all contexts!
+
+ def test_z():
+ return 'test z'
+
+ compare(sample1.z(), 'original z')
+ compare(sample2.z(), 'original z')
+
+ @replace('testfixtures.tests.sample1.z', test_z)
+ def test_something():
+ compare(sample1.z(), 'test z')
+ compare(sample2.z(), 'original z')
+
+ compare(sample1.z(), 'original z')
+ compare(sample2.z(), 'original z')
+
+ test_something()
+
+ compare(sample1.z(), 'original z')
+ compare(sample2.z(), 'original z')
+
+ def test_raises(self):
+
+ def test_z():
+ return 'replacement z'
+
+ compare(sample1.z(), 'original z')
+
+ @replace('testfixtures.tests.sample1.z', test_z)
+ def test_something():
+ compare(sample1.z(), 'replacement z')
+ raise Exception()
+
+ compare(sample1.z(), 'original z')
+
+ with ShouldRaise():
+ test_something()
+
+ compare(sample1.z(), 'original z')
+
+ def test_want_replacement(self):
+
+ o = object()
+
+ @replace('testfixtures.tests.sample1.z', o)
+ def test_something(r):
+ self.failUnless(r is o)
+ self.failUnless(sample1.z is o)
+
+ test_something()
+
+ def test_not_there(self):
+
+ o = object()
+
+ @replace('testfixtures.tests.sample1.bad', o)
+ def test_something(r):
+ pass # pragma: no cover
+
+ with ShouldRaise(AttributeError("Original 'bad' not found")):
+ test_something()
+
+ def test_not_there_ok(self):
+
+ o = object()
+
+ @replace('testfixtures.tests.sample1.bad', o, strict=False)
+ def test_something(r):
+ self.failUnless(r is o)
+ self.failUnless(sample1.bad is o)
+
+ test_something()
+
+ def test_replace_dict(self):
+
+ from testfixtures.tests.sample1 import someDict
+
+ original = someDict['key']
+ replacement = object()
+
+ @replace('testfixtures.tests.sample1.someDict.key', replacement)
+ def test_something(obj):
+ self.failUnless(obj is replacement)
+ self.failUnless(someDict['key'] is replacement)
+
+ test_something()
+
+ self.failUnless(someDict['key'] is original)
+
+ def test_replace_delattr(self):
+
+ from testfixtures.tests import sample1
+
+ @replace('testfixtures.tests.sample1.someDict', not_there)
+ def test_something(obj):
+ self.failIf(hasattr(sample1, 'someDict'))
+
+ test_something()
+
+ self.assertEqual(sample1.someDict,
+ {'complex_key': [1, 2, 3], 'key': 'value'})
+
+ def test_replace_delattr_not_there(self):
+
+ @replace('testfixtures.tests.sample1.foo', not_there)
+ def test_something(obj):
+ pass # pragma: no cover
+
+ with ShouldRaise(AttributeError("Original 'foo' not found")):
+ test_something()
+
+ def test_replace_delattr_not_there_not_strict(self):
+
+ from testfixtures.tests import sample1
+
+ @replace('testfixtures.tests.sample1.foo',
+ not_there, strict=False)
+ def test_something(obj):
+ self.failIf(hasattr(sample1, 'foo'))
+
+ test_something()
+
+ def test_replace_delattr_cant_remove(self):
+ with Replacer() as r:
+ with ShouldRaise(TypeError(
+ "can't set attributes of "
+ "built-in/extension type 'datetime.datetime'"
+ )):
+ r.replace('datetime.datetime.today', not_there)
+
+ def test_replace_delattr_cant_remove_not_strict(self):
+ with Replacer() as r:
+ with ShouldRaise(TypeError(
+ "can't set attributes of "
+ "built-in/extension type 'datetime.datetime'"
+ )):
+ r.replace('datetime.datetime.today', not_there, strict=False)
+
+ def test_replace_dict_remove_key(self):
+
+ from testfixtures.tests.sample1 import someDict
+
+ @replace('testfixtures.tests.sample1.someDict.key', not_there)
+ def test_something(obj):
+ self.failIf('key' in someDict)
+
+ test_something()
+
+ self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key'])
+
+ def test_replace_dict_remove_key_not_there(self):
+
+ from testfixtures.tests.sample1 import someDict
+
+ @replace('testfixtures.tests.sample1.someDict.badkey', not_there)
+ def test_something(obj):
+ self.failIf('badkey' in someDict) # pragma: no cover
+
+ with ShouldRaise(AttributeError("Original 'badkey' not found")):
+ test_something()
+
+ self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key'])
+
+ def test_replace_dict_remove_key_not_there_not_strict(self):
+
+ from testfixtures.tests.sample1 import someDict
+
+ @replace('testfixtures.tests.sample1.someDict.badkey',
+ not_there, strict=False)
+ def test_something(obj):
+ self.failIf('badkey' in someDict)
+
+ test_something()
+
+ self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key'])
+
+ def test_replace_dict_not_there(self):
+
+ from testfixtures.tests.sample1 import someDict
+
+ replacement = object()
+
+ @replace('testfixtures.tests.sample1.someDict.key2',
+ replacement,
+ strict=False)
+ def test_something(obj):
+ self.failUnless(obj is replacement)
+ self.failUnless(someDict['key2'] is replacement)
+
+ test_something()
+
+ self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key'])
+
+ def test_replace_dict_not_there_empty_string(self):
+
+ from testfixtures.tests.sample1 import someDict
+
+ @replace('testfixtures.tests.sample1.someDict.key2', '', strict=False)
+ def test_something():
+ self.assertEqual(someDict['key2'], '')
+
+ test_something()
+
+ self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key'])
+
+ def test_replace_complex(self):
+
+ from testfixtures.tests.sample1 import someDict
+
+ original = someDict['complex_key'][1]
+ replacement = object()
+
+ @replace('testfixtures.tests.sample1.someDict.complex_key.1',
+ replacement)
+ def test_something(obj):
+ self.failUnless(obj is replacement)
+ self.assertEqual(someDict['complex_key'], [1, obj, 3])
+
+ test_something()
+
+ self.assertEqual(someDict['complex_key'], [1, 2, 3])
+
+ self.failUnless(original is someDict['complex_key'][1])
+
+ def test_replacer_del(self):
+ r = Replacer()
+ r.replace('testfixtures.tests.sample1.left_behind',
+ object(), strict=False)
+ with catch_warnings(record=True) as w:
+ del r
+ self.assertTrue(len(w), 1)
+ compare(str(w[0].message),
+ "Replacer deleted without being restored, originals left:"
+ " {'testfixtures.tests.sample1.left_behind': <not_there>}")
+
+ def test_multiple_replaces(self):
+ orig = os.path.sep
+ with Replacer() as r:
+ r.replace('os.path.sep', '$')
+ compare(os.path.sep, '$')
+ r.replace('os.path.sep', '=')
+ compare(os.path.sep, '=')
+ compare(orig, os.path.sep)
+
+ def test_sub_module_import(self):
+ with TempDirectory() as dir:
+ dir.write('module/__init__.py', b'')
+ dir.write('module/submodule.py', b'def foo(): return "foo"')
+
+ with Replacer() as r:
+ r.replace('sys.path', [dir.path])
+
+ def bar():
+ return "bar"
+ # now test
+
+ r.replace('module.submodule.foo', bar)
+
+ from module.submodule import foo
+ compare(foo(), "bar")
+
+ def test_staticmethod(self):
+ compare(sample1.X.bMethod(), 2)
+ with Replacer() as r:
+ r.replace('testfixtures.tests.sample1.X.bMethod', lambda: 1)
+ compare(sample1.X.bMethod(), 1)
+ compare(sample1.X.bMethod(), 2)
+
+ def test_use_as_cleanup(self):
+ def test_z():
+ return 'replacement z'
+
+ compare(sample1.z(), 'original z')
+ replace = Replacer()
+ compare(sample1.z(), 'original z')
+ replace('testfixtures.tests.sample1.z', test_z)
+ cleanup = replace.restore
+ try:
+ compare(sample1.z(), 'replacement z')
+ finally:
+ cleanup()
+ compare(sample1.z(), 'original z')
+
+ def test_replace_context_manager(self):
+ def test_z():
+ return 'replacement z'
+
+ compare(sample1.z(), 'original z')
+
+ with Replace('testfixtures.tests.sample1.z', test_z) as z:
+ compare(z(), 'replacement z')
+ compare(sample1.z(), 'replacement z')
+
+ compare(sample1.z(), 'original z')
+
+ def test_multiple_context_managers(self):
+
+ def test_y(self):
+ return 'test y'
+
+ def test_z():
+ return 'test z'
+
+ compare(sample1.z(), 'original z')
+ compare(sample1.X().y(), 'original y')
+
+ with Replacer() as replace:
+ z = replace('testfixtures.tests.sample1.z', test_z)
+ y = replace('testfixtures.tests.sample1.X.y', test_y)
+ compare(z(), 'test z')
+ if PY3:
+ compare(y, sample1.X.y)
+ compare(sample1.X().y(), 'test y')
+ compare(sample1.z(), 'test z')
+ compare(sample1.X().y(), 'test y')
+
+ compare(sample1.z(), 'original z')
+ compare(sample1.X().y(), 'original y')
+
+ def test_context_manager_not_strict(self):
+ def test_z():
+ return 'replacement z'
+
+ with Replace('testfixtures.tests.sample1.foo', test_z, strict=False):
+ compare(sample1.foo(), 'replacement z')
diff --git a/testfixtures/tests/test_replacer.py b/testfixtures/tests/test_replacer.py
new file mode 100644
index 0000000..9dd5556
--- /dev/null
+++ b/testfixtures/tests/test_replacer.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from doctest import DocTestSuite, REPORT_NDIFF, ELLIPSIS
+from testfixtures import Replacer
+from unittest import TestSuite
+
+
+class TestReplacer:
+
+ def test_function(self): # pragma: no branch
+ """
+ >>> from testfixtures.tests import sample1
+ >>> sample1.z()
+ 'original z'
+
+ >>> def test_z():
+ ... return 'replacement z'
+
+ >>> r = Replacer()
+ >>> r.replace('testfixtures.tests.sample1.z',test_z)
+
+ >>> sample1.z()
+ 'replacement z'
+
+ >>> r.restore()
+
+ >>> sample1.z()
+ 'original z'
+ """
+
+ def test_class(self): # pragma: no branch
+ """
+ >>> from testfixtures.tests import sample1
+ >>> sample1.X()
+ <testfixtures.tests.sample1.X ...>
+
+ >>> class XReplacement(sample1.X): pass
+
+ >>> r = Replacer()
+ >>> r.replace('testfixtures.tests.sample1.X', XReplacement)
+
+ >>> sample1.X()
+ <testfixtures.tests.test_replacer.XReplacement ...>
+ >>> sample1.X().y()
+ 'original y'
+
+ >>> r.restore()
+
+ >>> sample1.X()
+ <testfixtures.tests.sample1.X ...>
+ """
+
+ def test_method(self): # pragma: no branch
+ """
+ >>> from testfixtures.tests import sample1
+ >>> sample1.X().y()
+ 'original y'
+
+ >>> def test_y(self):
+ ... return 'replacement y'
+
+ >>> r = Replacer()
+ >>> r.replace('testfixtures.tests.sample1.X.y',test_y)
+
+ >>> sample1.X().y()[:38]
+ 'replacement y'
+
+ >>> r.restore()
+
+ >>> sample1.X().y()
+ 'original y'
+ """
+
+ def test_class_method(self): # pragma: no branch
+ """
+ >>> from testfixtures.tests import sample1
+ >>> sample1.X.aMethod()
+ <class ...testfixtures.tests.sample1.X...>
+
+ >>> def rMethod(cls):
+ ... return (cls,1)
+
+ >>> r = Replacer()
+ >>> r.replace('testfixtures.tests.sample1.X.aMethod',rMethod)
+
+ >>> sample1.X.aMethod()
+ (<class ...testfixtures.tests.sample1.X...>, 1)
+
+ >>> r.restore()
+
+ >>> sample1.X.aMethod()
+ <class ...testfixtures.tests.sample1.X...>
+ """
+
+ def test_multiple_replace(self): # pragma: no branch
+ """
+ >>> from testfixtures.tests import sample1
+ >>> sample1.z()
+ 'original z'
+ >>> sample1.X().y()
+ 'original y'
+
+ >>> def test_y(self):
+ ... return repr(self)
+ >>> def test_z():
+ ... return 'replacement z'
+
+ >>> r = Replacer()
+ >>> r.replace('testfixtures.tests.sample1.z',test_z)
+ >>> r.replace('testfixtures.tests.sample1.X.y',test_y)
+
+ >>> sample1.z()
+ 'replacement z'
+ >>> sample1.X().y()
+ '<testfixtures.tests.sample1.X ...>'
+
+ >>> r.restore()
+
+ >>> sample1.z()
+ 'original z'
+ >>> sample1.X().y()
+ 'original y'
+ """
+
+ def test_gotcha(self): # pragma: no branch
+ """
+ Just because you replace an object in one context:
+
+ >>> from testfixtures.tests import sample1
+ >>> from testfixtures.tests import sample2
+ >>> sample1.z()
+ 'original z'
+
+ >>> def test_z():
+ ... return 'replacement z'
+
+ >>> r = Replacer()
+ >>> r.replace('testfixtures.tests.sample1.z',test_z)
+
+ >>> sample1.z()
+ 'replacement z'
+
+ Doesn't meant that it's replaced in all contexts:
+
+ >>> sample2.z()
+ 'original z'
+
+ >>> r.restore()
+ """
+
+ def test_remove_called_twice(self): # pragma: no branch
+ """
+ >>> from testfixtures.tests import sample1
+
+ >>> def test_z():
+ ... return 'replacement z'
+
+ >>> r = Replacer()
+ >>> r.replace('testfixtures.tests.sample1.z',test_z)
+
+ >>> r.restore()
+ >>> sample1.z()
+ 'original z'
+
+ >>> r.restore()
+ >>> sample1.z()
+ 'original z'
+ """
+
+ def test_with_statement(self): # pragma: no branch
+ """
+ >>> from testfixtures.tests import sample1
+ >>> sample1.z()
+ 'original z'
+
+ >>> def test_z():
+ ... return 'replacement z'
+
+ >>> with Replacer() as r:
+ ... r.replace('testfixtures.tests.sample1.z',test_z)
+ ... sample1.z()
+ 'replacement z'
+
+ >>> sample1.z()
+ 'original z'
+ """
+
+ def test_not_there(self): # pragma: no branch
+ """
+ >>> def test_bad():
+ ... return 'moo'
+
+ >>> with Replacer() as r:
+ ... r.replace('testfixtures.tests.sample1.bad',test_bad)
+ Traceback (most recent call last):
+ ...
+ AttributeError: Original 'bad' not found
+ """
+
+
+def test_suite():
+ return DocTestSuite(optionflags=REPORT_NDIFF | ELLIPSIS)
diff --git a/testfixtures/tests/test_roundcomparison.py b/testfixtures/tests/test_roundcomparison.py
new file mode 100644
index 0000000..3f1168e
--- /dev/null
+++ b/testfixtures/tests/test_roundcomparison.py
@@ -0,0 +1,155 @@
+# Copyright (c) 2014 Simplistix Ltd
+# See license.txt for license details.
+
+from decimal import Decimal
+from testfixtures import RoundComparison as R, compare, ShouldRaise
+from unittest import TestCase
+
+from ..compat import PY2, PY3
+
+
+class Tests(TestCase):
+
+ def test_equal_yes_rhs(self):
+ self.assertTrue(0.123457 == R(0.123456, 5))
+
+ def test_equal_yes_lhs(self):
+ self.assertTrue(R(0.123456, 5) == 0.123457)
+
+ def test_equal_no_rhs(self):
+ self.assertFalse(0.123453 == R(0.123456, 5))
+
+ def test_equal_no_lhs(self):
+ self.assertFalse(R(0.123456, 5) == 0.123453)
+
+ def test_not_equal_yes_rhs(self):
+ self.assertFalse(0.123457 != R(0.123456, 5))
+
+ def test_not_equal_yes_lhs(self):
+ self.assertFalse(R(0.123456, 5) != 0.123457)
+
+ def test_not_equal_no_rhs(self):
+ self.assertTrue(0.123453 != R(0.123456, 5))
+
+ def test_not_equal_no_lhs(self):
+ self.assertTrue(R(0.123456, 5) != 0.123453)
+
+ def test_equal_in_sequence_rhs(self):
+ self.assertEqual((1, 2, 0.123457),
+ (1, 2, R(0.123456, 5)))
+
+ def test_equal_in_sequence_lhs(self):
+ self.assertEqual((1, 2, R(0.123456, 5)),
+ (1, 2, 0.123457))
+
+ def test_not_equal_in_sequence_rhs(self):
+ self.assertNotEqual((1, 2, 0.1236),
+ (1, 2, R(0.123456, 5)))
+
+ def test_not_equal_in_sequence_lhs(self):
+ self.assertNotEqual((1, 2, R(0.123456, 5)),
+ (1, 2, 0.1236))
+
+ def test_not_numeric_rhs(self):
+ with ShouldRaise(TypeError):
+ 'abc' == R(0.123456, 5)
+
+ def test_not_numeric_lhs(self):
+ with ShouldRaise(TypeError):
+ R(0.123456, 5) == 'abc'
+
+ def test_repr(self):
+ compare('<R:0.12346 to 5 digits>',
+ repr(R(0.123456, 5)))
+
+ def test_str(self):
+ compare('<R:0.12346 to 5 digits>',
+ repr(R(0.123456, 5)))
+
+ def test_str_negative(self):
+ if PY3:
+ expected = '<R:123500 to -2 digits>'
+ else:
+ expected = '<R:123500.0 to -2 digits>'
+ compare(expected, repr(R(123456, -2)))
+
+ TYPE_ERROR_DECIMAL = TypeError(
+ "Cannot compare <R:0.12346 to 5 digits> with <class 'decimal.Decimal'>"
+ )
+
+ def test_equal_yes_decimal_to_float_rhs(self):
+ with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
+ self.assertTrue(Decimal("0.123457") == R(0.123456, 5))
+
+ def test_equal_yes_decimal_to_float_lhs(self):
+ with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
+ self.assertTrue(R(0.123456, 5) == Decimal("0.123457"))
+
+ def test_equal_no_decimal_to_float_rhs(self):
+ with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
+ self.assertFalse(Decimal("0.123453") == R(0.123456, 5))
+
+ def test_equal_no_decimal_to_float_lhs(self):
+ with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
+ self.assertFalse(R(0.123456, 5) == Decimal("0.123453"))
+
+ TYPE_ERROR_FLOAT = TypeError(
+ "Cannot compare <R:0.12346 to 5 digits> with <class 'float'>"
+ )
+
+ def test_equal_yes_float_to_decimal_rhs(self):
+ with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
+ self.assertTrue(0.123457 == R(Decimal("0.123456"), 5))
+
+ def test_equal_yes_float_to_decimal_lhs(self):
+ with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
+ self.assertTrue(R(Decimal("0.123456"), 5) == 0.123457)
+
+ def test_equal_no_float_to_decimal_rhs(self):
+ with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
+ self.assertFalse(0.123453 == R(Decimal("0.123456"), 5))
+
+ def test_equal_no_float_to_decimal_lhs(self):
+ with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
+ self.assertFalse(R(Decimal("0.123456"), 5) == 0.123453)
+
+ def test_integer_float(self):
+ with ShouldRaise(TypeError, unless=PY2):
+ 1 == R(1.000001, 5)
+
+ def test_float_integer(self):
+ with ShouldRaise(TypeError, unless=PY2):
+ R(1.000001, 5) == 1
+
+ def test_equal_yes_integer_other_rhs(self):
+ self.assertTrue(10 == R(11, -1))
+
+ def test_equal_yes_integer_lhs(self):
+ self.assertTrue(R(11, -1) == 10)
+
+ def test_equal_no_integer_rhs(self):
+ self.assertFalse(10 == R(16, -1))
+
+ def test_equal_no_integer_lhs(self):
+ self.assertFalse(R(16, -1) == 10)
+
+ def test_equal_integer_zero_precision(self):
+ self.assertTrue(1 == R(1, 0))
+
+ def test_equal_yes_negative_precision(self):
+ self.assertTrue(149.123 == R(101.123, -2))
+
+ def test_equal_no_negative_precision(self):
+ self.assertFalse(149.123 == R(150.001, -2))
+
+ def test_decimal_yes_rhs(self):
+ self.assertTrue(Decimal('0.123457') == R(Decimal('0.123456'), 5))
+
+ def test_decimal_yes_lhs(self):
+ self.assertTrue(R(Decimal('0.123456'), 5) == Decimal('0.123457'))
+
+ def test_decimal_no_rhs(self):
+ self.assertFalse(Decimal('0.123453') == R(Decimal('0.123456'), 5))
+
+ def test_decimal_no_lhs(self):
+ self.assertFalse(R(Decimal('0.123456'), 5) == Decimal('0.123453'))
diff --git a/testfixtures/tests/test_should_raise.py b/testfixtures/tests/test_should_raise.py
new file mode 100644
index 0000000..e7a708e
--- /dev/null
+++ b/testfixtures/tests/test_should_raise.py
@@ -0,0 +1,292 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+from testfixtures import Comparison as C, ShouldRaise, should_raise
+from unittest import TestCase
+
+from .compat import py_33_plus
+
+
+class TestShouldRaise(TestCase):
+
+ def test_no_params(self):
+ def to_test():
+ raise ValueError('wrong value supplied')
+ should_raise(ValueError('wrong value supplied'))(to_test)()
+
+ def test_no_exception(self):
+ def to_test():
+ pass
+ try:
+ should_raise(ValueError())(to_test)()
+ except AssertionError as e:
+ self.assertEqual(
+ e,
+ C(AssertionError('None raised, ValueError() expected'))
+ )
+ else:
+ self.fail('No exception raised!')
+
+ def test_wrong_exception(self):
+ def to_test():
+ raise ValueError('bar')
+ try:
+ should_raise(ValueError('foo'))(to_test)()
+ except AssertionError as e:
+ self.assertEqual(
+ e,
+ C(AssertionError(
+ "ValueError('bar',) raised, ValueError('foo',) expected"
+ )))
+ else:
+ self.fail('No exception raised!')
+
+ def test_only_exception_class(self):
+ def to_test():
+ raise ValueError('bar')
+ should_raise(ValueError)(to_test)()
+
+ def test_no_supplied_or_raised(self):
+ # effectvely we're saying "something should be raised!"
+ # but we want to inspect s.raised rather than making
+ # an up-front assertion
+ def to_test():
+ pass
+ try:
+ should_raise()(to_test)()
+ except AssertionError as e:
+ self.assertEqual(
+ e,
+ C(AssertionError("No exception raised!"))
+ )
+ else:
+ self.fail('No exception raised!')
+
+ def test_args(self):
+ def to_test(*args):
+ raise ValueError('%s' % repr(args))
+ should_raise(ValueError('(1,)'))(to_test)(1)
+
+ def test_kw_to_args(self):
+ def to_test(x):
+ raise ValueError('%s' % x)
+ should_raise(ValueError('1'))(to_test)(x=1)
+
+ def test_kw(self):
+ def to_test(**kw):
+ raise ValueError('%r' % kw)
+ should_raise(ValueError("{'x': 1}"))(to_test)(x=1)
+
+ def test_both(self):
+ def to_test(*args, **kw):
+ raise ValueError('%r %r' % (args, kw))
+ should_raise(ValueError("(1,) {'x': 2}"))(to_test)(1, x=2)
+
+ def test_method_args(self):
+ class X:
+ def to_test(self, *args):
+ self.args = args
+ raise ValueError()
+ x = X()
+ should_raise(ValueError)(x.to_test)(1, 2, 3)
+ self.assertEqual(x.args, (1, 2, 3))
+
+ def test_method_kw(self):
+ class X:
+ def to_test(self, **kw):
+ self.kw = kw
+ raise ValueError()
+ x = X()
+ should_raise(ValueError)(x.to_test)(x=1, y=2)
+ self.assertEqual(x.kw, {'x': 1, 'y': 2})
+
+ def test_method_both(self):
+ class X:
+ def to_test(self, *args, **kw):
+ self.args = args
+ self.kw = kw
+ raise ValueError()
+ x = X()
+ should_raise(ValueError)(x.to_test)(1, y=2)
+ self.assertEqual(x.args, (1, ))
+ self.assertEqual(x.kw, {'y': 2})
+
+ def test_class_class(self):
+ class Test:
+ def __init__(self, x):
+ # The TypeError is raised due to the mis-matched parameters
+ # so the pass never gets executed
+ pass # pragma: no cover
+ should_raise(TypeError)(Test)()
+
+ def test_raised(self):
+ with ShouldRaise() as s:
+ raise ValueError('wrong value supplied')
+ self.assertEqual(s.raised, C(ValueError('wrong value supplied')))
+
+ def test_catch_baseexception_1(self):
+ with ShouldRaise(SystemExit):
+ raise SystemExit()
+
+ def test_catch_baseexception_2(self):
+ with ShouldRaise(KeyboardInterrupt):
+ raise KeyboardInterrupt()
+
+ def test_with_exception_class_supplied(self):
+ with ShouldRaise(ValueError):
+ raise ValueError('foo bar')
+
+ def test_with_exception_supplied(self):
+ with ShouldRaise(ValueError('foo bar')):
+ raise ValueError('foo bar')
+
+ def test_with_exception_supplied_wrong_args(self):
+ try:
+ with ShouldRaise(ValueError('foo')):
+ raise ValueError('bar')
+ except AssertionError as e:
+ self.assertEqual(
+ e,
+ C(AssertionError(
+ "ValueError('bar',) raised, ValueError('foo',) expected"
+ )))
+ else:
+ self.fail('No exception raised!')
+
+ def test_neither_supplied(self):
+ with ShouldRaise():
+ raise ValueError('foo bar')
+
+ def test_with_no_exception_when_expected(self):
+ try:
+ with ShouldRaise(ValueError('foo')):
+ pass
+ except AssertionError as e:
+ self.assertEqual(
+ e,
+ C(AssertionError("None raised, ValueError('foo',) expected"))
+ )
+ else:
+ self.fail('No exception raised!')
+
+ def test_with_no_exception_when_neither_expected(self):
+ try:
+ with ShouldRaise():
+ pass
+ except AssertionError as e:
+ self.assertEqual(
+ e,
+ C(AssertionError("No exception raised!"))
+ )
+ else:
+ self.fail('No exception raised!')
+
+ def test_with_getting_raised_exception(self):
+ with ShouldRaise() as s:
+ raise ValueError('foo bar')
+ self.assertEqual(C(ValueError('foo bar')), s.raised)
+
+ def test_import_errors_1(self):
+ if py_33_plus:
+ message = "No module named 'textfixtures'"
+ else:
+ message = 'No module named textfixtures.foo.bar'
+ with ShouldRaise(ImportError(message)):
+ import textfixtures.foo.bar
+
+ def test_import_errors_2(self):
+ with ShouldRaise(ImportError('X')):
+ raise ImportError('X')
+
+ def test_custom_exception(self):
+
+ class FileTypeError(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ with ShouldRaise(FileTypeError('X')):
+ raise FileTypeError('X')
+
+ def test_assert_keyerror_raised(self):
+ expected = "KeyError('foo',) raised, AttributeError('foo',) expected"
+
+ class Dodgy(dict):
+ def __getattr__(self, name):
+ # NB: we forgot to turn our KeyError into an attribute error
+ return self[name]
+ try:
+ with ShouldRaise(AttributeError('foo')):
+ Dodgy().foo
+ except AssertionError as e:
+ self.assertEqual(
+ C(AssertionError(expected)),
+ e
+ )
+ else:
+ self.fail('No exception raised!')
+
+ def test_decorator_usage(self):
+
+ @should_raise(ValueError('bad'))
+ def to_test():
+ raise ValueError('bad')
+
+ to_test()
+
+ def test_unless_false_okay(self):
+ with ShouldRaise(unless=False):
+ raise AttributeError()
+
+ def test_unless_false_bad(self):
+ try:
+ with ShouldRaise(unless=False):
+ pass
+ except AssertionError as e:
+ self.assertEqual(e, C(AssertionError("No exception raised!")))
+ else:
+ self.fail('No exception raised!')
+
+ def test_unless_true_okay(self):
+ with ShouldRaise(unless=True):
+ pass
+
+ def test_unless_true_not_okay(self):
+ try:
+ with ShouldRaise(unless=True):
+ raise AttributeError('foo')
+ except AssertionError as e:
+ self.assertEqual(e, C(AssertionError(
+ "AttributeError('foo',) raised, no exception expected"
+ )))
+ else:
+ self.fail('No exception raised!')
+
+ def test_unless_decorator_usage(self):
+
+ @should_raise(unless=True)
+ def to_test():
+ pass
+
+ to_test()
+
+ def test_identical_reprs(self):
+
+ class AnnoyingException(Exception):
+ def __init__(self, **kw):
+ self.other = kw.get('other')
+
+ try:
+ with ShouldRaise(AnnoyingException(other='bar')):
+ raise AnnoyingException(other='baz')
+ except AssertionError as e:
+ print(repr(e))
+ self.assertEqual(
+ C(AssertionError(
+ "AnnoyingException() raised, AnnoyingException() expected,"
+ " attributes differ:\n"
+ " other:'bar' != 'baz'"
+ )),
+ e,
+ )
+ else:
+ self.fail('No exception raised!')
diff --git a/testfixtures/tests/test_shouldwarn.py b/testfixtures/tests/test_shouldwarn.py
new file mode 100644
index 0000000..3586a09
--- /dev/null
+++ b/testfixtures/tests/test_shouldwarn.py
@@ -0,0 +1,119 @@
+from unittest import TestCase
+
+import warnings
+
+from testfixtures import (
+ ShouldWarn, compare, ShouldRaise, ShouldNotWarn,
+ Comparison as C
+)
+from testfixtures.compat import PY3
+
+if PY3:
+ warn_module = 'builtins'
+else:
+ warn_module = 'exceptions'
+
+
+class ShouldWarnTests(TestCase):
+
+ def test_warn_expected(self):
+ with warnings.catch_warnings(record=True) as backstop:
+ with ShouldWarn(UserWarning('foo')):
+ warnings.warn('foo')
+ compare(len(backstop), expected=0)
+
+ def test_warn_not_expected(self):
+ with ShouldRaise(AssertionError(
+ "sequence not as expected:\n\n"
+ "same:\n[]\n\n"
+ "expected:\n[]\n\n"
+ "actual:\n[UserWarning('foo',)]"
+ )):
+ with warnings.catch_warnings(record=True) as backstop:
+ with ShouldNotWarn():
+ warnings.warn('foo')
+ compare(len(backstop), expected=0)
+
+ def test_no_warn_expected(self):
+ with ShouldNotWarn():
+ pass
+
+ def test_no_warn_not_expected(self):
+ with ShouldRaise(AssertionError(
+ "sequence not as expected:\n\n"
+ "same:\n[]\n\n"
+ "expected:\n[\n <C:"+warn_module+".UserWarning>\n"
+ " args:('foo',)\n </C>]"
+ "\n\nactual:\n[]"
+ )):
+ with ShouldWarn(UserWarning('foo')):
+ pass
+
+ def test_filters_removed(self):
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ with ShouldWarn(UserWarning("foo")):
+ warnings.warn('foo')
+
+ def test_multiple_warnings(self):
+ with ShouldRaise(AssertionError) as s:
+ with ShouldWarn(UserWarning('foo')):
+ warnings.warn('foo')
+ warnings.warn('bar')
+ content = str(s.raised)
+ self.assertTrue('foo' in content)
+ self.assertTrue('bar' in content)
+
+ def test_minimal_ok(self):
+ with ShouldWarn(UserWarning):
+ warnings.warn('foo')
+
+ def test_minimal_bad(self):
+ with ShouldRaise(AssertionError(
+ "sequence not as expected:\n\n"
+ "same:\n[]\n\n"
+ "expected:\n"
+ "[<C(failed):"+warn_module+".DeprecationWarning>wrong type</C>]\n\n"
+ "actual:\n[UserWarning('foo',)]"
+ )):
+ with ShouldWarn(DeprecationWarning):
+ warnings.warn('foo')
+
+ def test_maximal_ok(self):
+ with ShouldWarn(DeprecationWarning('foo')):
+ warnings.warn_explicit(
+ 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module'
+ )
+
+ def test_maximal_bad(self):
+ with ShouldRaise(AssertionError(
+ "sequence not as expected:\n\n"
+ "same:\n[]\n\n"
+ "expected:\n[\n"
+ " <C(failed):"+warn_module+".DeprecationWarning>\n"
+ " args:('bar',) != ('foo',)"
+ "\n </C>]\n\n"
+ "actual:\n[DeprecationWarning('foo',)]"
+ )):
+ with ShouldWarn(DeprecationWarning('bar')):
+ warnings.warn_explicit(
+ 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module'
+ )
+
+ def test_maximal_explore(self):
+ with ShouldWarn() as recorded:
+ warnings.warn_explicit(
+ 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module'
+ )
+ compare(len(recorded), expected=1)
+ compare(C(warnings.WarningMessage,
+ _category_name='DeprecationWarning',
+ category=DeprecationWarning,
+ file=None,
+ filename='bar.py',
+ line=None,
+ lineno=42,
+ message=C(DeprecationWarning('foo'))
+ ), recorded[0])
+
+
diff --git a/testfixtures/tests/test_stringcomparison.py b/testfixtures/tests/test_stringcomparison.py
new file mode 100644
index 0000000..1380a02
--- /dev/null
+++ b/testfixtures/tests/test_stringcomparison.py
@@ -0,0 +1,52 @@
+from testfixtures import StringComparison as S, compare
+from testfixtures.compat import PY2
+from unittest import TestCase
+
+
+class Tests(TestCase):
+
+ def test_equal_yes(self):
+ self.failUnless('on 40220' == S('on \d+'))
+
+ def test_equal_no(self):
+ self.failIf('on xxx' == S('on \d+'))
+
+ def test_not_equal_yes(self):
+ self.failIf('on 40220' != S('on \d+'))
+
+ def test_not_equal_no(self):
+ self.failUnless('on xxx' != S('on \d+'))
+
+ def test_comp_in_sequence(self):
+ self.failUnless((
+ 1, 2, 'on 40220'
+ ) == (
+ 1, 2, S('on \d+')
+ ))
+
+ def test_not_string(self):
+ self.failIf(40220 == S('on \d+'))
+
+ def test_repr(self):
+ compare('<S:on \\d+>',
+ repr(S('on \d+')))
+
+ def test_str(self):
+ compare('<S:on \\d+>',
+ str(S('on \d+')))
+
+ def test_sort(self):
+ a = S('a')
+ b = S('b')
+ c = S('c')
+ compare(sorted(('d', c, 'e', a, 'a1', b)),
+ [a, 'a1', b, c, 'd', 'e'])
+
+ if PY2:
+ # cmp no longer exists in Python 3!
+
+ def test_cmp_yes(self):
+ self.failIf(cmp(S('on \d+'), 'on 4040'))
+
+ def test_cmp_no(self):
+ self.failUnless(cmp(S('on \d+'), 'on xx'))
diff --git a/testfixtures/tests/test_tempdir.py b/testfixtures/tests/test_tempdir.py
new file mode 100644
index 0000000..0ee5dd9
--- /dev/null
+++ b/testfixtures/tests/test_tempdir.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+import os
+
+from mock import Mock
+from tempfile import mkdtemp
+from testfixtures import Replacer, ShouldRaise, TempDirectory, compare, tempdir
+from unittest import TestCase
+
+from ..rmtree import rmtree
+
+
+class TestTempDir(TestCase):
+
+ @tempdir()
+ def test_simple(self, d):
+ d.write('something', b'stuff')
+ d.write('.svn', b'stuff')
+ d.compare((
+ '.svn',
+ 'something',
+ ))
+
+ @tempdir()
+ def test_subdirs(self, d):
+ subdir = ['some', 'thing']
+ d.write(subdir+['something'], b'stuff')
+ d.write(subdir+['.svn'], b'stuff')
+ d.compare(path=subdir, expected=(
+ '.svn',
+ 'something',
+ ))
+
+ @tempdir()
+ def test_not_same(self, d):
+ d.write('something', b'stuff')
+
+ with ShouldRaise(AssertionError(
+ "sequence not as expected:\n"
+ "\n"
+ "same:\n"
+ "()\n"
+ "\n"
+ "expected:\n"
+ "('.svn', 'something')\n"
+ "\n"
+ "actual:\n"
+ "('something',)"
+ )):
+ d.compare(['.svn', 'something'])
+
+ @tempdir(ignore=('.svn', ))
+ def test_ignore(self, d):
+ d.write('something', b'stuff')
+ d.write('.svn', b'stuff')
+ d.compare(['something'])
+
+ def test_cleanup_properly(self):
+ r = Replacer()
+ try:
+ m = Mock()
+ d = mkdtemp()
+ m.return_value = d
+ r.replace('testfixtures.tempdirectory.mkdtemp', m)
+
+ self.failUnless(os.path.exists(d))
+
+ self.assertFalse(m.called)
+
+ @tempdir()
+ def test_method(d):
+ d.write('something', b'stuff')
+ d.compare(['something'])
+
+ self.assertFalse(m.called)
+ compare(os.listdir(d), [])
+
+ test_method()
+
+ self.assertTrue(m.called)
+ self.failIf(os.path.exists(d))
+
+ finally:
+ r.restore()
+ if os.path.exists(d):
+ # only runs if the test fails!
+ rmtree(d) # pragma: no cover
+
+ @tempdir()
+ def test_cleanup_test_okay_with_deleted_dir(self, d):
+ rmtree(d.path)
+
+ @tempdir()
+ def test_decorator_returns_tempdirectory(self, d):
+ # check for what we get, so we only have to write
+ # tests in test_tempdirectory.py
+ self.failUnless(isinstance(d, TempDirectory))
+
+ def test_dont_create_or_cleanup_with_path(self):
+ with Replacer() as r:
+ m = Mock()
+ r.replace('testfixtures.tempdirectory.mkdtemp', m)
+ r.replace('testfixtures.tempdirectory.rmtree', m)
+
+ @tempdir(path='foo')
+ def test_method(d):
+ compare(d.path, 'foo')
+
+ test_method()
+
+ self.assertFalse(m.called)
diff --git a/testfixtures/tests/test_tempdirectory.py b/testfixtures/tests/test_tempdirectory.py
new file mode 100644
index 0000000..9ada298
--- /dev/null
+++ b/testfixtures/tests/test_tempdirectory.py
@@ -0,0 +1,411 @@
+# Copyright (c) 2008-2014 Simplistix Ltd
+# See license.txt for license details.
+
+import os
+
+from doctest import DocTestSuite, ELLIPSIS
+from mock import Mock
+from tempfile import mkdtemp
+from testfixtures import TempDirectory, Replacer, ShouldRaise, compare
+from unittest import TestCase, TestSuite, makeSuite
+
+from ..compat import Unicode, PY3
+from testfixtures.tests.compat import py_35_plus
+from warnings import catch_warnings
+
+from ..rmtree import rmtree
+
+if PY3:
+ some_bytes = '\xa3'.encode('utf-8')
+ some_text = '\xa3'
+else:
+ some_bytes = '\xc2\xa3'
+ some_text = '\xc2\xa3'.decode('utf-8')
+
+
+class DemoTempDirectory:
+
+ def test_return_path(self): # pragma: no branch
+ """
+ If you want the path created when you use `write`, you
+ can do:
+
+ >>> temp_dir.write('filename', b'data')
+ '...filename'
+ """
+
+ def test_ignore(self): # pragma: no branch
+ """
+ TempDirectories can also be set up to ignore certain files:
+
+ >>> d = TempDirectory(ignore=('.svn', ))
+ >>> p = d.write('.svn', b'stuff')
+ >>> temp_dir.listdir()
+ No files or directories found.
+ """
+
+ def test_ignore_regex(self): # pragma: no branch
+ """
+ TempDirectories can also be set up to ignore certain files:
+
+ >>> d = TempDirectory(ignore=('^\.svn$', '.pyc$'))
+ >>> p = d.write('.svn', b'stuff')
+ >>> p = d.write('foo.svn', b'')
+ >>> p = d.write('foo.pyc', b'')
+ >>> p = d.write('bar.pyc', b'')
+ >>> d.listdir()
+ foo.svn
+ """
+
+
+class TestTempDirectory:
+
+ def test_cleanup(self): # pragma: no branch
+ """
+ >>> d = TempDirectory()
+ >>> p = d.path
+ >>> os.path.exists(p)
+ True
+ >>> p = d.write('something', b'stuff')
+ >>> d.cleanup()
+ >>> os.path.exists(p)
+ False
+ """
+
+ def test_cleanup_all(self): # pragma: no branch
+ """
+ If you create several TempDirecories during a doctest,
+ or if exceptions occur while running them,
+ it can create clutter on disk.
+ For this reason, it's recommended to use the classmethod
+ TempDirectory.cleanup_all() as a tearDown function
+ to remove them all:
+
+ >>> d1 = TempDirectory()
+ >>> d2 = TempDirectory()
+
+ Some sanity checks:
+
+ >>> os.path.exists(d1.path)
+ True
+ >>> p1 = d1.path
+ >>> os.path.exists(d2.path)
+ True
+ >>> p2 = d2.path
+
+ Now we show the function in action:
+
+ >>> TempDirectory.cleanup_all()
+
+ >>> os.path.exists(p1)
+ False
+ >>> os.path.exists(p2)
+ False
+ """
+
+ def test_with_statement(self): # pragma: no branch
+ """
+ >>> with TempDirectory() as d:
+ ... p = d.path
+ ... print(os.path.exists(p))
+ ... path = d.write('something', b'stuff')
+ ... os.listdir(p)
+ ... with open(os.path.join(p, 'something')) as f:
+ ... print(repr(f.read()))
+ True
+ ['something']
+ 'stuff'
+ >>> os.path.exists(p)
+ False
+ """
+
+ def test_listdir_sort(self): # pragma: no branch
+ """
+ >>> with TempDirectory() as d:
+ ... p = d.write('ga', b'')
+ ... p = d.write('foo1', b'')
+ ... p = d.write('Foo2', b'')
+ ... p = d.write('g.o', b'')
+ ... d.listdir()
+ Foo2
+ foo1
+ g.o
+ ga
+ """
+
+
+class TempDirectoryTests(TestCase):
+
+ def test_write_with_slash_at_start(self):
+ with TempDirectory() as d:
+ with ShouldRaise(ValueError(
+ 'Attempt to read or write outside the temporary Directory'
+ )):
+ d.write('/some/folder', 'stuff')
+
+ def test_makedir_with_slash_at_start(self):
+ with TempDirectory() as d:
+ with ShouldRaise(ValueError(
+ 'Attempt to read or write outside the temporary Directory'
+ )):
+ d.makedir('/some/folder')
+
+ def test_read_with_slash_at_start(self):
+ with TempDirectory() as d:
+ with ShouldRaise(ValueError(
+ 'Attempt to read or write outside the temporary Directory'
+ )):
+ d.read('/some/folder')
+
+ def test_listdir_with_slash_at_start(self):
+ with TempDirectory() as d:
+ with ShouldRaise(ValueError(
+ 'Attempt to read or write outside the temporary Directory'
+ )):
+ d.listdir('/some/folder')
+
+ def test_compare_with_slash_at_start(self):
+ with TempDirectory() as d:
+ with ShouldRaise(ValueError(
+ 'Attempt to read or write outside the temporary Directory'
+ )):
+ d.compare((), path='/some/folder')
+
+ def test_read_with_slash_at_start_ok(self):
+ with TempDirectory() as d:
+ path = d.write('foo', b'bar')
+ compare(d.read(path), b'bar')
+
+ def test_dont_cleanup_with_path(self):
+ d = mkdtemp()
+ fp = os.path.join(d, 'test')
+ with open(fp, 'w') as f:
+ f.write('foo')
+ try:
+ td = TempDirectory(path=d)
+ self.assertEqual(d, td.path)
+ td.cleanup()
+ # checks
+ self.assertEqual(os.listdir(d), ['test'])
+ with open(fp) as f:
+ self.assertEqual(f.read(), 'foo')
+ finally:
+ rmtree(d)
+
+ def test_dont_create_with_path(self):
+ d = mkdtemp()
+ rmtree(d)
+ td = TempDirectory(path=d)
+ self.assertEqual(d, td.path)
+ self.failIf(os.path.exists(d))
+
+ def test_deprecated_check(self):
+ with TempDirectory() as d:
+ d.write('x', b'')
+ d.check('x')
+
+ def test_deprecated_check_dir(self):
+ with TempDirectory() as d:
+ d.write('foo/x', b'')
+ d.check_dir('foo', 'x')
+
+ def test_deprecated_check_all(self):
+ with TempDirectory() as d:
+ d.write('a/b/c', b'')
+ d.check_all('', 'a/', 'a/b/', 'a/b/c')
+ d.check_all('a', 'b/', 'b/c')
+
+ def test_compare_sort_actual(self):
+ with TempDirectory() as d:
+ d.write('ga', b'')
+ d.write('foo1', b'')
+ d.write('Foo2', b'')
+ d.write('g.o', b'')
+ d.compare(['Foo2', 'foo1', 'g.o', 'ga'])
+
+ def test_compare_sort_expected(self):
+ with TempDirectory() as d:
+ d.write('ga', b'')
+ d.write('foo1', b'')
+ d.write('Foo2', b'')
+ d.write('g.o', b'')
+ d.compare(['Foo2', 'ga', 'foo1', 'g.o'])
+
+ def test_compare_path_tuple(self):
+ with TempDirectory() as d:
+ d.write('a/b/c', b'')
+ d.compare(path=('a', 'b'),
+ expected=['c'])
+
+ def test_recursive_ignore(self):
+ with TempDirectory(ignore=['.svn']) as d:
+ d.write('.svn/rubbish', b'')
+ d.write('a/.svn/rubbish', b'')
+ d.write('a/b/.svn', b'')
+ d.write('a/b/c', b'')
+ d.write('a/d/.svn/rubbish', b'')
+ d.compare([
+ 'a/',
+ 'a/b/',
+ 'a/b/c',
+ 'a/d/',
+ ])
+
+ def test_files_only(self):
+ with TempDirectory() as d:
+ d.write('a/b/c', b'')
+ d.compare(['a/b/c'], files_only=True)
+
+ def test_path(self):
+ with TempDirectory() as d:
+ expected1 = d.makedir('foo')
+ expected2 = d.write('baz/bob', b'')
+ expected3 = d.getpath('a/b/c')
+
+ actual1 = d.getpath('foo')
+ actual2 = d.getpath('baz/bob')
+ actual3 = d.getpath(('a', 'b', 'c'))
+
+ self.assertEqual(expected1, actual1)
+ self.assertEqual(expected2, actual2)
+ self.assertEqual(expected3, actual3)
+
+ def test_atexit(self):
+ # http://bugs.python.org/issue25532
+ from mock import call
+
+ m = Mock()
+ with Replacer() as r:
+ # make sure the marker is false, other tests will
+ # probably have set it
+ r.replace('testfixtures.TempDirectory.atexit_setup', False)
+ r.replace('atexit.register', m.register)
+
+ d = TempDirectory()
+
+ expected = [call.register(d.atexit)]
+
+ compare(expected, m.mock_calls)
+
+ with catch_warnings(record=True) as w:
+ d.atexit()
+ self.assertTrue(len(w), 1)
+ compare(str(w[0].message), ( # pragma: no branch
+ "TempDirectory instances not cleaned up by shutdown:\n" +
+ d.path
+ ))
+
+ d.cleanup()
+
+ compare(set(), TempDirectory.instances)
+
+ # check re-running has no ill effects
+ d.atexit()
+
+ def test_read_decode(self):
+ with TempDirectory() as d:
+ with open(os.path.join(d.path, 'test.file'), 'wb') as f:
+ f.write(b'\xc2\xa3')
+ compare(d.read('test.file', 'utf8'), some_text)
+
+ def test_read_no_decode(self):
+ with TempDirectory() as d:
+ with open(os.path.join(d.path, 'test.file'), 'wb') as f:
+ f.write(b'\xc2\xa3')
+ compare(d.read('test.file'), b'\xc2\xa3')
+
+ def test_write_bytes(self):
+ with TempDirectory() as d:
+ d.write('test.file', b'\xc2\xa3')
+ with open(os.path.join(d.path, 'test.file'), 'rb') as f:
+ compare(f.read(), b'\xc2\xa3')
+
+ def test_write_unicode(self):
+ with TempDirectory() as d:
+ d.write('test.file', some_text, 'utf8')
+ with open(os.path.join(d.path, 'test.file'), 'rb') as f:
+ compare(f.read(), b'\xc2\xa3')
+
+ def test_write_unicode_bad(self):
+ if py_35_plus:
+ expected = TypeError(
+ "a bytes-like object is required, not 'str'"
+ )
+ elif PY3:
+ expected = TypeError(
+ "'str' does not support the buffer interface"
+ )
+ else:
+ expected = UnicodeDecodeError(
+ 'ascii', '\xa3', 0, 1, 'ordinal not in range(128)'
+ )
+ with TempDirectory() as d:
+ with ShouldRaise(expected):
+ d.write('test.file', Unicode('\xa3'))
+
+ def test_just_empty_non_recursive(self):
+ with TempDirectory() as d:
+ d.makedir('foo/bar')
+ d.makedir('foo/baz')
+ d.compare(path='foo',
+ expected=['bar', 'baz'],
+ recursive=False)
+
+ def test_just_empty_dirs(self):
+ with TempDirectory() as d:
+ d.makedir('foo/bar')
+ d.makedir('foo/baz')
+ d.compare(['foo/', 'foo/bar/', 'foo/baz/'])
+
+ def test_symlink(self):
+ with TempDirectory() as d:
+ d.write('foo/bar.txt', b'x')
+ os.symlink(d.getpath('foo'), d.getpath('baz'))
+ d.compare(['baz/', 'foo/', 'foo/bar.txt'])
+
+ def test_follow_symlinks(self):
+ with TempDirectory() as d:
+ d.write('foo/bar.txt', b'x')
+ os.symlink(d.getpath('foo'), d.getpath('baz'))
+ d.compare(['baz/', 'baz/bar.txt', 'foo/', 'foo/bar.txt'],
+ followlinks=True)
+
+ def test_trailing_slash(self):
+ with TempDirectory() as d:
+ d.write('source/foo/bar.txt', b'x')
+ d.compare(path='source/', expected=['foo/', 'foo/bar.txt'])
+
+ def test_default_encoding(self):
+ encoded = b'\xc2\xa3'
+ decoded = encoded.decode('utf-8')
+ with TempDirectory(encoding='utf-8') as d:
+ d.write('test.txt', decoded)
+ compare(d.read('test.txt'), expected=decoded)
+
+ def test_override_default_encoding(self):
+ encoded = b'\xc2\xa3'
+ decoded = encoded.decode('utf-8')
+ with TempDirectory(encoding='ascii') as d:
+ d.write('test.txt', decoded, encoding='utf-8')
+ compare(d.read('test.txt', encoding='utf-8'), expected=decoded)
+
+
+# using a set up and teardown function
+# gets rid of the need for the imports in
+# doc tests
+
+
+def setUp(test):
+ test.globs['temp_dir'] = TempDirectory()
+
+
+def tearDown(test):
+ TempDirectory.cleanup_all()
+
+
+def test_suite():
+ return TestSuite((
+ DocTestSuite(setUp=setUp, tearDown=tearDown,
+ optionflags=ELLIPSIS),
+ makeSuite(TempDirectoryTests),
+ ))
diff --git a/testfixtures/tests/test_time.py b/testfixtures/tests/test_time.py
new file mode 100644
index 0000000..def7089
--- /dev/null
+++ b/testfixtures/tests/test_time.py
@@ -0,0 +1,185 @@
+# Copyright (c) 2008-2013 Simplistix Ltd
+# See license.txt for license details.
+
+from testfixtures import test_time, replace, compare, ShouldRaise
+from unittest import TestCase
+from testfixtures.tests.test_datetime import TestTZInfo
+
+
+class TestTime(TestCase):
+
+ @replace('time.time', test_time())
+ def test_time_call(self):
+ from time import time
+ compare(time(), 978307200.0)
+ compare(time(), 978307201.0)
+ compare(time(), 978307203.0)
+
+ @replace('time.time', test_time(2002, 1, 1, 1, 2, 3))
+ def test_time_supplied(self):
+ from time import time
+ compare(time(), 1009846923.0)
+
+ @replace('time.time', test_time(None))
+ def test_time_sequence(self, t):
+ t.add(2002, 1, 1, 1, 0, 0)
+ t.add(2002, 1, 1, 2, 0, 0)
+ t.add(2002, 1, 1, 3, 0, 0)
+ from time import time
+ compare(time(), 1009846800.0)
+ compare(time(), 1009850400.0)
+ compare(time(), 1009854000.0)
+
+ @replace('time.time', test_time(None))
+ def test_add_datetime_supplied(self, t):
+ from datetime import datetime
+ from time import time
+ t.add(datetime(2002, 1, 1, 2))
+ compare(time(), 1009850400.0)
+ with ShouldRaise(ValueError(
+ 'Cannot add datetime with tzinfo set'
+ )):
+ t.add(datetime(2001, 1, 1, tzinfo=TestTZInfo()))
+
+ @replace('time.time', test_time(None))
+ def test_now_requested_longer_than_supplied(self, t):
+ t.add(2002, 1, 1, 1, 0, 0)
+ t.add(2002, 1, 1, 2, 0, 0)
+ from time import time
+ compare(time(), 1009846800.0)
+ compare(time(), 1009850400.0)
+ compare(time(), 1009850401.0)
+ compare(time(), 1009850403.0)
+
+ @replace('time.time', test_time())
+ def test_call(self, t):
+ compare(t(), 978307200.0)
+ from time import time
+ compare(time(), 978307201.0)
+
+ @replace('time.time', test_time())
+ def test_repr_time(self):
+ from time import time
+ compare(repr(time), "<class 'testfixtures.tdatetime.ttime'>")
+
+ @replace('time.time', test_time(delta=10))
+ def test_delta(self):
+ from time import time
+ compare(time(), 978307200.0)
+ compare(time(), 978307210.0)
+ compare(time(), 978307220.0)
+
+ @replace('time.time', test_time(delta_type='minutes'))
+ def test_delta_type(self):
+ from time import time
+ compare(time(), 978307200.0)
+ compare(time(), 978307260.0)
+ compare(time(), 978307380.0)
+
+ @replace('time.time', test_time(None))
+ def test_set(self):
+ from time import time
+ time.set(2001, 1, 1, 1, 0, 1)
+ compare(time(), 978310801.0)
+ time.set(2002, 1, 1, 1, 0, 0)
+ compare(time(), 1009846800.0)
+ compare(time(), 1009846802.0)
+
+ @replace('time.time', test_time(None))
+ def test_set_datetime_supplied(self, t):
+ from datetime import datetime
+ from time import time
+ t.set(datetime(2001, 1, 1, 1, 0, 1))
+ compare(time(), 978310801.0)
+ with ShouldRaise(ValueError(
+ 'Cannot set datetime with tzinfo set'
+ )):
+ t.set(datetime(2001, 1, 1, tzinfo=TestTZInfo()))
+
+ @replace('time.time', test_time(None))
+ def test_set_kw(self):
+ from time import time
+ time.set(year=2001, month=1, day=1, hour=1, second=1)
+ compare(time(), 978310801.0)
+
+ @replace('time.time', test_time(None))
+ def test_set_kw_tzinfo(self):
+ from time import time
+ with ShouldRaise(TypeError('Cannot set tzinfo on ttime')):
+ time.set(year=2001, tzinfo=TestTZInfo())
+
+ @replace('time.time', test_time(None))
+ def test_set_args_tzinfo(self):
+ from time import time
+ with ShouldRaise(TypeError('Cannot set tzinfo on ttime')):
+ time.set(2002, 1, 2, 3, 4, 5, 6, TestTZInfo())
+
+ @replace('time.time', test_time(None))
+ def test_add_kw(self):
+ from time import time
+ time.add(year=2001, month=1, day=1, hour=1, second=1)
+ compare(time(), 978310801.0)
+
+ @replace('time.time', test_time(None))
+ def test_add_tzinfo_kw(self):
+ from time import time
+ with ShouldRaise(TypeError('Cannot add tzinfo to ttime')):
+ time.add(year=2001, tzinfo=TestTZInfo())
+
+ @replace('time.time', test_time(None))
+ def test_add_tzinfo_args(self):
+ from time import time
+ with ShouldRaise(TypeError('Cannot add tzinfo to ttime')):
+ time.add(2001, 1, 2, 3, 4, 5, 6, TestTZInfo())
+
+ @replace('time.time', test_time(2001, 1, 2, 3, 4, 5, 600000))
+ def test_max_number_args(self):
+ from time import time
+ compare(time(), 978404645.6)
+
+ def test_max_number_tzinfo(self):
+ with ShouldRaise(TypeError(
+ "You don't want to use tzinfo with test_time"
+ )):
+ @replace('time.time',
+ test_time(2001, 1, 2, 3, 4, 5, 6, TestTZInfo()))
+ def myfunc():
+ pass # pragma: no cover
+
+ @replace('time.time', test_time(2001, 1, 2))
+ def test_min_number_args(self):
+ from time import time
+ compare(time(), 978393600.0)
+
+ @replace('time.time', test_time(
+ year=2001,
+ month=1,
+ day=2,
+ hour=3,
+ minute=4,
+ second=5,
+ microsecond=6,
+ ))
+ def test_all_kw(self):
+ from time import time
+ compare(time(), 978404645.000006)
+
+ def test_kw_tzinfo(self):
+ with ShouldRaise(TypeError(
+ "You don't want to use tzinfo with test_time"
+ )):
+ @replace('time.time', test_time(year=2001, tzinfo=TestTZInfo()))
+ def myfunc():
+ pass # pragma: no cover
+
+ def test_subsecond_deltas(self):
+ time = test_time(delta=0.5)
+ compare(time(), 978307200.0)
+ compare(time(), 978307200.5)
+ compare(time(), 978307201.0)
+
+ def test_ms_deltas(self):
+ time = test_time(delta=1000, delta_type='microseconds')
+ compare(time(), 978307200.0)
+ compare(time(), 978307200.001)
+ compare(time(), 978307200.002)
diff --git a/testfixtures/tests/test_wrap.py b/testfixtures/tests/test_wrap.py
new file mode 100644
index 0000000..bd6ae5b
--- /dev/null
+++ b/testfixtures/tests/test_wrap.py
@@ -0,0 +1,236 @@
+# Copyright (c) 2008 Simplistix Ltd
+# See license.txt for license details.
+
+from mock import Mock
+from testfixtures import wrap, compare
+from unittest import TestCase, TestSuite, makeSuite
+
+
+class TestWrap(TestCase):
+
+ def test_wrapping(self):
+
+ m = Mock()
+
+ @wrap(m.before, m.after)
+ def test_function(r):
+ m.test()
+ return 'something'
+
+ compare(m.method_calls, [])
+ compare(test_function(), 'something')
+ compare(m.method_calls, [
+ ('before', (), {}),
+ ('test', (), {}),
+ ('after', (), {})
+ ])
+
+ def test_wrapping_only_before(self):
+
+ before = Mock()
+
+ @wrap(before)
+ def test_function():
+ return 'something'
+
+ self.assertFalse(before.called)
+ compare(test_function(), 'something')
+ compare(before.call_count, 1)
+
+ def test_wrapping_wants_return(self):
+
+ m = Mock()
+ m.before.return_value = 'something'
+
+ @wrap(m.before, m.after)
+ def test_function(r):
+ m.test(r)
+ return 'r:'+r
+
+ compare(m.method_calls, [])
+ compare(test_function(), 'r:something')
+ compare(m.method_calls, [
+ ('before', (), {}),
+ ('test', ('something', ), {}),
+ ('after', (), {})
+ ])
+
+ def test_wrapping_wants_arguments(self):
+
+ # This only works in python 2.5+, for
+ # earlier versions, you'll have to come
+ # up with your own `partial` class...
+ from functools import partial
+
+ m = Mock()
+
+ @wrap(partial(m.before, 1, x=2), partial(m.after, 3, y=4))
+ def test_function(r):
+ m.test()
+ return 'something'
+
+ compare(m.method_calls, [])
+ compare(test_function(), 'something')
+ compare(m.method_calls, [
+ ('before', (1, ), {'x': 2}),
+ ('test', (), {}),
+ ('after', (3, ), {'y': 4})
+ ])
+
+ def test_multiple_wrappers(self):
+
+ m = Mock()
+
+ @wrap(m.before2, m.after2)
+ @wrap(m.before1, m.after1)
+ def test_function():
+ m.test_function()
+ return 'something'
+
+ compare(m.method_calls, [])
+ compare(test_function(), 'something')
+ compare(m.method_calls, [
+ ('before1', (), {}),
+ ('before2', (), {}),
+ ('test_function', (), {}),
+ ('after2', (), {}),
+ ('after1', (), {}),
+ ])
+
+ def test_multiple_wrappers_wants_return(self):
+
+ m = Mock()
+ m.before1.return_value = 1
+ m.before2.return_value = 2
+
+ @wrap(m.before2, m.after2)
+ @wrap(m.before1, m.after1)
+ def test_function(r1, r2):
+ m.test_function(r1, r2)
+ return 'something'
+
+ compare(m.method_calls, [])
+ compare(test_function(), 'something')
+ compare(m.method_calls, [
+ ('before1', (), {}),
+ ('before2', (), {}),
+ ('test_function', (1, 2), {}),
+ ('after2', (), {}),
+ ('after1', (), {}),
+ ])
+
+ def test_multiple_wrappers_only_want_first_return(self):
+
+ m = Mock()
+ m.before1.return_value = 1
+
+ @wrap(m.before2, m.after2)
+ @wrap(m.before1, m.after1)
+ def test_function(r1):
+ m.test_function(r1)
+ return 'something'
+
+ compare(m.method_calls, [])
+ compare(test_function(), 'something')
+ compare(m.method_calls, [
+ ('before1', (), {}),
+ ('before2', (), {}),
+ ('test_function', (1, ), {}),
+ ('after2', (), {}),
+ ('after1', (), {}),
+ ])
+
+ def test_wrap_method(self):
+
+ m = Mock()
+
+ class T:
+ @wrap(m.before, m.after)
+ def method(self):
+ m.method()
+
+ T().method()
+
+ compare(m.method_calls, [
+ ('before', (), {}),
+ ('method', (), {}),
+ ('after', (), {})
+ ])
+
+ def test_wrap_method_wants_return(self):
+
+ m = Mock()
+ m.before.return_value = 'return'
+
+ class T:
+ @wrap(m.before, m.after)
+ def method(self, r):
+ m.method(r)
+
+ T().method()
+
+ compare(m.method_calls, [
+ ('before', (), {}),
+ ('method', ('return', ), {}),
+ ('after', (), {})
+ ])
+
+ def test_wrapping_different_functions(self):
+
+ m = Mock()
+
+ @wrap(m.before1, m.after1)
+ def test_function1():
+ m.something1()
+ return 'something1'
+
+ @wrap(m.before2, m.after2)
+ def test_function2():
+ m.something2()
+ return 'something2'
+
+ compare(m.method_calls, [])
+ compare(test_function1(), 'something1')
+ compare(m.method_calls, [
+ ('before1', (), {}),
+ ('something1', (), {}),
+ ('after1', (), {})
+ ])
+ compare(test_function2(), 'something2')
+ compare(m.method_calls, [
+ ('before1', (), {}),
+ ('something1', (), {}),
+ ('after1', (), {}),
+ ('before2', (), {}),
+ ('something2', (), {}),
+ ('after2', (), {})
+ ])
+
+ def test_wrapping_local_vars(self):
+
+ m = Mock()
+
+ @wrap(m.before, m.after)
+ def test_function():
+ something = 1
+ from testfixtures.tests import sample2
+ m.test()
+ return 'something'
+
+ compare(m.method_calls, [])
+ compare(test_function(), 'something')
+ compare(m.method_calls, [
+ ('before', (), {}),
+ ('test', (), {}),
+ ('after', (), {})
+ ])
+
+ def test_wrapping__name__(self):
+
+ m = Mock()
+
+ @wrap(m.before, m.after)
+ def test_function():
+ pass # pragma: no cover
+
+ compare(test_function.__name__, 'test_function')
diff --git a/testfixtures/utils.py b/testfixtures/utils.py
new file mode 100644
index 0000000..7128c69
--- /dev/null
+++ b/testfixtures/utils.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2008-2011 Simplistix Ltd
+# See license.txt for license details.
+from textwrap import dedent
+
+from functools import wraps
+from inspect import getargspec
+
+
+def generator(*args):
+ """
+ A utility function for creating a generator that will yield the
+ supplied arguments.
+ """
+ for i in args:
+ yield i
+
+
+class Wrappings:
+ def __init__(self):
+ self.before = []
+ self.after = []
+
+
+def wrap(before, after=None):
+ """
+ A decorator that causes the supplied callables to be called before
+ or after the wrapped callable, as appropriate.
+ """
+ def wrapper(wrapped):
+ if getattr(wrapped, '_wrappings', None) is None:
+ w = Wrappings()
+
+ @wraps(wrapped)
+ def wrapping(*args, **kw):
+ args = list(args)
+ to_add = len(getargspec(wrapped)[0][len(args):])
+ added = 0
+ for c in w.before:
+ r = c()
+ if added < to_add:
+ args.append(r)
+ added += 1
+ try:
+ return wrapped(*args, **kw)
+ finally:
+ for c in w.after:
+ c()
+ f = wrapping
+ f._wrappings = w
+ else:
+ f = wrapped
+ w = f._wrappings
+ w.before.append(before)
+ if after is not None:
+ w.after.insert(0, after)
+ return f
+ return wrapper
+
+
+def extend_docstring(docstring, objs):
+ for obj in objs:
+ try:
+ obj.__doc__ = dedent(obj.__doc__) + docstring
+ except (AttributeError, TypeError): # python 2 or pypy 4.0.1 :-(
+ pass
diff --git a/testfixtures/version.txt b/testfixtures/version.txt
new file mode 100644
index 0000000..89c318c
--- /dev/null
+++ b/testfixtures/version.txt
@@ -0,0 +1 @@
+4.13.3