summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Ramacher <sramacher@debian.org>2017-03-21 20:38:52 +0100
committerSebastian Ramacher <sramacher@debian.org>2017-03-21 20:38:52 +0100
commit415d09b24717bd446dcdbddb8b9d6cfbf3728c83 (patch)
tree08a76397764d276edc846a15dfe0fe1ec244f7f5
parent7162b155c66836e91ed7c1056da5cf5a2c144678 (diff)
New upstream version 1.4.33
-rw-r--r--AUTHORS48
-rw-r--r--CHANGELOG2219
-rw-r--r--LICENSE38
-rw-r--r--MANIFEST.in19
-rw-r--r--PKG-INFO92
-rw-r--r--README.rst42
-rw-r--r--conftest.py142
-rw-r--r--doc/Makefile266
-rw-r--r--doc/_templates/layout.html36
-rw-r--r--doc/announce/release-0.9.0.txt14
-rw-r--r--doc/announce/release-0.9.2.txt54
-rw-r--r--doc/announce/release-1.0.0.txt126
-rw-r--r--doc/announce/release-1.0.1.txt96
-rw-r--r--doc/announce/release-1.0.2.txt10
-rw-r--r--doc/announce/release-1.1.0.txt230
-rw-r--r--doc/announce/release-1.1.1.txt96
-rw-r--r--doc/announce/release-1.2.0.txt232
-rw-r--r--doc/announce/release-1.2.1.txt132
-rw-r--r--doc/announce/release-1.3.0.txt1160
-rw-r--r--doc/announce/release-1.3.1.txt208
-rw-r--r--doc/announce/release-1.3.2.txt1440
-rw-r--r--doc/announce/release-1.3.3.txt52
-rw-r--r--doc/announce/release-1.3.4.txt44
-rw-r--r--doc/announce/release-1.4.0.txt94
-rw-r--r--doc/announce/release-1.4.1.txt94
-rw-r--r--doc/announce/releases.txt32
-rw-r--r--doc/changelog.txt6
-rw-r--r--doc/code.txt300
-rw-r--r--doc/conf.py526
-rw-r--r--doc/download.html36
-rw-r--r--doc/example/genhtml.py26
-rw-r--r--doc/example/genhtmlcss.py46
-rw-r--r--doc/example/genxml.py34
-rw-r--r--doc/faq.txt344
-rw-r--r--doc/index.txt82
-rw-r--r--doc/install.txt176
-rw-r--r--doc/io.txt118
-rw-r--r--doc/links.inc32
-rw-r--r--doc/log.txt416
-rw-r--r--doc/misc.txt186
-rw-r--r--doc/path.txt520
-rw-r--r--doc/style.css2088
-rw-r--r--doc/xml.txt328
-rw-r--r--py.egg-info/PKG-INFO92
-rw-r--r--py.egg-info/SOURCES.txt74
-rw-r--r--py.egg-info/not-zip-safe2
-rw-r--r--py/__init__.py302
-rw-r--r--py/__metainfo.py4
-rw-r--r--py/_apipkg.py362
-rw-r--r--py/_builtin.py496
-rw-r--r--py/_code/__init__.py2
-rw-r--r--py/_code/_assertionnew.py678
-rw-r--r--py/_code/_assertionold.py1110
-rw-r--r--py/_code/_py2traceback.py158
-rw-r--r--py/_code/assertion.py188
-rw-r--r--py/_code/code.py1574
-rw-r--r--py/_code/source.py822
-rw-r--r--py/_error.py178
-rw-r--r--py/_iniconfig.py324
-rw-r--r--py/_io/__init__.py2
-rw-r--r--py/_io/capture.py742
-rw-r--r--py/_io/saferepr.py142
-rw-r--r--py/_io/terminalwriter.py714
-rw-r--r--py/_log/__init__.py4
-rw-r--r--py/_log/log.py372
-rw-r--r--py/_log/warning.py152
-rw-r--r--py/_path/__init__.py2
-rw-r--r--py/_path/cacheutil.py228
-rw-r--r--py/_path/common.py879
-rw-r--r--py/_path/local.py1856
-rw-r--r--py/_path/svnurl.py760
-rw-r--r--py/_path/svnwc.py2480
-rw-r--r--py/_process/__init__.py2
-rw-r--r--py/_process/cmdexec.py98
-rw-r--r--py/_process/forkedfunc.py240
-rw-r--r--py/_process/killproc.py46
-rw-r--r--py/_std.py36
-rw-r--r--py/_xmlgen.py510
-rw-r--r--py/test.py20
-rw-r--r--setup.cfg25
-rw-r--r--setup.py100
-rw-r--r--testing/__pycache__/conftest.cpython-27-PYTEST.pycbin344 -> 0 bytes
-rw-r--r--testing/__pycache__/conftest.cpython-35-PYTEST.pycbin300 -> 0 bytes
-rw-r--r--testing/__pycache__/conftest.cpython-35.pycbin178 -> 0 bytes
-rw-r--r--testing/__pycache__/test_iniconfig.cpython-27-PYTEST.pycbin32701 -> 0 bytes
-rw-r--r--testing/__pycache__/test_iniconfig.cpython-35-PYTEST.pycbin26188 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_assertion.cpython-27-PYTEST.pycbin35317 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_assertion.cpython-35-PYTEST.pycbin30036 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_code.cpython-27-PYTEST.pycbin20965 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_code.cpython-35-PYTEST.pycbin16648 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_excinfo.cpython-27-PYTEST.pycbin97695 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_excinfo.cpython-35-PYTEST.pycbin86075 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_source.cpython-27-PYTEST.pycbin80334 -> 0 bytes
-rw-r--r--testing/code/__pycache__/test_source.cpython-35-PYTEST.pycbin69169 -> 0 bytes
-rw-r--r--testing/code/test_assertion.py616
-rw-r--r--testing/code/test_code.py318
-rw-r--r--testing/code/test_excinfo.py1830
-rw-r--r--testing/code/test_source.py1302
-rw-r--r--testing/conftest.py6
-rw-r--r--testing/conftest.pycbin197 -> 0 bytes
-rw-r--r--testing/io_/__init__.py2
-rw-r--r--testing/io_/__init__.pycbin145 -> 0 bytes
-rw-r--r--testing/io_/__pycache__/__init__.cpython-35.pycbin141 -> 0 bytes
-rw-r--r--testing/io_/__pycache__/test_capture.cpython-27-PYTEST.pycbin47367 -> 0 bytes
-rw-r--r--testing/io_/__pycache__/test_capture.cpython-35-PYTEST.pycbin40746 -> 0 bytes
-rw-r--r--testing/io_/__pycache__/test_saferepr.cpython-27-PYTEST.pycbin13013 -> 0 bytes
-rw-r--r--testing/io_/__pycache__/test_saferepr.cpython-35-PYTEST.pycbin11294 -> 0 bytes
-rw-r--r--testing/io_/__pycache__/test_terminalwriter.cpython-27-PYTEST.pycbin37781 -> 0 bytes
-rw-r--r--testing/io_/__pycache__/test_terminalwriter.cpython-35-PYTEST.pycbin31856 -> 0 bytes
-rw-r--r--testing/io_/test_capture.py1002
-rw-r--r--testing/io_/test_saferepr.py156
-rw-r--r--testing/io_/test_terminalwriter.py542
-rw-r--r--testing/log/__init__.pycbin145 -> 0 bytes
-rw-r--r--testing/log/__pycache__/__init__.cpython-35.pycbin141 -> 0 bytes
-rw-r--r--testing/log/__pycache__/test_log.cpython-27-PYTEST.pycbin22605 -> 0 bytes
-rw-r--r--testing/log/__pycache__/test_log.cpython-35-PYTEST.pycbin19113 -> 0 bytes
-rw-r--r--testing/log/__pycache__/test_warning.cpython-27-PYTEST.pycbin8564 -> 0 bytes
-rw-r--r--testing/log/__pycache__/test_warning.cpython-35-PYTEST.pycbin7133 -> 0 bytes
-rw-r--r--testing/log/test_log.py380
-rw-r--r--testing/log/test_warning.py152
-rw-r--r--testing/path/__init__.pycbin274 -> 0 bytes
-rw-r--r--testing/path/__pycache__/__init__.cpython-35.pycbin252 -> 0 bytes
-rw-r--r--testing/path/__pycache__/common.cpython-27-PYTEST.pycbin85153 -> 0 bytes
-rw-r--r--testing/path/__pycache__/common.cpython-35-PYTEST.pycbin75114 -> 0 bytes
-rw-r--r--testing/path/__pycache__/common.cpython-35.pycbin21819 -> 0 bytes
-rw-r--r--testing/path/__pycache__/conftest.cpython-27-PYTEST.pycbin5310 -> 0 bytes
-rw-r--r--testing/path/__pycache__/conftest.cpython-35-PYTEST.pycbin4501 -> 0 bytes
-rw-r--r--testing/path/__pycache__/conftest.cpython-35.pycbin3205 -> 0 bytes
-rw-r--r--testing/path/__pycache__/svntestbase.cpython-27-PYTEST.pycbin5239 -> 0 bytes
-rw-r--r--testing/path/__pycache__/svntestbase.cpython-35-PYTEST.pycbin4344 -> 0 bytes
-rw-r--r--testing/path/__pycache__/svntestbase.cpython-35.pycbin1563 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_cacheutil.cpython-27-PYTEST.pycbin9472 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_cacheutil.cpython-35-PYTEST.pycbin8337 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_local.cpython-27-PYTEST.pycbin133949 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_local.cpython-35-PYTEST.pycbin117968 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_svnauth.cpython-27-PYTEST.pycbin44805 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_svnauth.cpython-35-PYTEST.pycbin38134 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_svnurl.cpython-27-PYTEST.pycbin12356 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_svnurl.cpython-35-PYTEST.pycbin10700 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_svnwc.cpython-27-PYTEST.pycbin64663 -> 0 bytes
-rw-r--r--testing/path/__pycache__/test_svnwc.cpython-35-PYTEST.pycbin59081 -> 0 bytes
-rw-r--r--testing/path/common.py990
-rw-r--r--testing/path/common.pycbin24575 -> 0 bytes
-rw-r--r--testing/path/conftest.py160
-rw-r--r--testing/path/conftest.pycbin3919 -> 0 bytes
-rw-r--r--testing/path/svntestbase.py62
-rw-r--r--testing/path/svntestbase.pycbin1865 -> 0 bytes
-rw-r--r--testing/path/test_cacheutil.py168
-rw-r--r--testing/path/test_local.py1829
-rw-r--r--testing/path/test_svnauth.py908
-rw-r--r--testing/path/test_svnurl.py190
-rw-r--r--testing/path/test_svnwc.py1098
-rw-r--r--testing/process/__init__.py2
-rw-r--r--testing/process/__init__.pycbin149 -> 0 bytes
-rw-r--r--testing/process/__pycache__/__init__.cpython-35.pycbin145 -> 0 bytes
-rw-r--r--testing/process/__pycache__/test_cmdexec.cpython-27-PYTEST.pycbin6901 -> 0 bytes
-rw-r--r--testing/process/__pycache__/test_cmdexec.cpython-35-PYTEST.pycbin6003 -> 0 bytes
-rw-r--r--testing/process/__pycache__/test_forkedfunc.cpython-27-PYTEST.pycbin27173 -> 0 bytes
-rw-r--r--testing/process/__pycache__/test_forkedfunc.cpython-35-PYTEST.pycbin21850 -> 0 bytes
-rw-r--r--testing/process/__pycache__/test_killproc.cpython-27-PYTEST.pycbin2276 -> 0 bytes
-rw-r--r--testing/process/__pycache__/test_killproc.cpython-35-PYTEST.pycbin1835 -> 0 bytes
-rw-r--r--testing/process/test_cmdexec.py78
-rw-r--r--testing/process/test_forkedfunc.py354
-rw-r--r--testing/process/test_killproc.py32
-rw-r--r--testing/root/__init__.py2
-rw-r--r--testing/root/__init__.pycbin146 -> 0 bytes
-rw-r--r--testing/root/__pycache__/__init__.cpython-35.pycbin142 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_builtin.cpython-27-PYTEST.pycbin32154 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_builtin.cpython-35-PYTEST.pycbin27120 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_error.cpython-27-PYTEST.pycbin6976 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_error.cpython-35-PYTEST.pycbin6045 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_py_imports.cpython-27-PYTEST.pycbin5378 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_py_imports.cpython-35-PYTEST.pycbin4401 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_std.cpython-27-PYTEST.pycbin2399 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_std.cpython-35-PYTEST.pycbin1881 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_xmlgen.cpython-27-PYTEST.pycbin21965 -> 0 bytes
-rw-r--r--testing/root/__pycache__/test_xmlgen.cpython-35-PYTEST.pycbin17977 -> 0 bytes
-rw-r--r--testing/root/test_builtin.py358
-rw-r--r--testing/root/test_error.py96
-rw-r--r--testing/root/test_py_imports.py136
-rw-r--r--testing/root/test_std.py26
-rw-r--r--testing/root/test_xmlgen.py290
-rw-r--r--testing/test_iniconfig.py598
-rw-r--r--tox.ini64
184 files changed, 20883 insertions, 20920 deletions
diff --git a/AUTHORS b/AUTHORS
index 8c0cf9b..45df92a 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,24 +1,24 @@
-Holger Krekel, holger at merlinux eu
-Benjamin Peterson, benjamin at python org
-Ronny Pfannschmidt, Ronny.Pfannschmidt at gmx de
-Guido Wesdorp, johnny at johnnydebris net
-Samuele Pedroni, pedronis at openend se
-Carl Friedrich Bolz, cfbolz at gmx de
-Armin Rigo, arigo at tunes org
-Maciek Fijalkowski, fijal at genesilico pl
-Brian Dorsey, briandorsey at gmail com
-Floris Bruynooghe, flub at devork be
-merlinux GmbH, Germany, office at merlinux eu
-
-Contributors include::
-
-Ross Lawley
-Ralf Schmitt
-Chris Lamb
-Harald Armin Massa
-Martijn Faassen
-Ian Bicking
-Jan Balster
-Grig Gheorghiu
-Bob Ippolito
-Christian Tismer
+Holger Krekel, holger at merlinux eu
+Benjamin Peterson, benjamin at python org
+Ronny Pfannschmidt, Ronny.Pfannschmidt at gmx de
+Guido Wesdorp, johnny at johnnydebris net
+Samuele Pedroni, pedronis at openend se
+Carl Friedrich Bolz, cfbolz at gmx de
+Armin Rigo, arigo at tunes org
+Maciek Fijalkowski, fijal at genesilico pl
+Brian Dorsey, briandorsey at gmail com
+Floris Bruynooghe, flub at devork be
+merlinux GmbH, Germany, office at merlinux eu
+
+Contributors include::
+
+Ross Lawley
+Ralf Schmitt
+Chris Lamb
+Harald Armin Massa
+Martijn Faassen
+Ian Bicking
+Jan Balster
+Grig Gheorghiu
+Bob Ippolito
+Christian Tismer
diff --git a/CHANGELOG b/CHANGELOG
index fb68f26..78641ee 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,1105 +1,1114 @@
-
-1.4.32
-====================================================================
-
-- fix issue70: added ability to copy all stat info in py.path.local.copy.
-
-- make TerminalWriter.fullwidth a property. This results in the correct
- value when the terminal gets resized.
-
-- update supported html tags to include recent additions.
- Thanks Denis Afonso for the PR.
-
-- Remove internal code in ``Source.compile`` meant to support earlier Python 3 versions that produced the side effect
- of leaving ``None`` in ``sys.modules`` when called (see pytest-dev/pytest#2103).
- Thanks Bruno Oliveira for the PR.
-
-1.4.31
-==================================================
-
-- fix local().copy(dest, mode=True) to also work
- with unicode.
-
-- pass better error message with svn EEXIST paths
-
-1.4.30
-==================================================
-
-- fix issue68 an assert with a multiline list comprehension
- was not reported correctly. Thanks Henrik Heibuerger.
-
-
-1.4.29
-==================================================
-
-- fix issue55: revert a change to the statement finding algorithm
- which is used by pytest for generating tracebacks.
- Thanks Daniel Hahler for initial analysis.
-
-- fix pytest issue254 for when traceback rendering can't
- find valid source code. Thanks Ionel Cristian Maries.
-
-
-1.4.28
-==================================================
-
-- fix issue64 -- dirpath regression when "abs=True" is passed.
- Thanks Gilles Dartiguelongue.
-
-1.4.27
-==================================================
-
-- fix issue59: point to new repo site
-
-- allow a new ensuresyspath="append" mode for py.path.local.pyimport()
- so that a neccessary import path is appended instead of prepended to
- sys.path
-
-- strike undocumented, untested argument to py.path.local.pypkgpath
-
-- speed up py.path.local.dirpath by a factor of 10
-
-1.4.26
-==================================================
-
-- avoid calling normpath twice in py.path.local
-
-- py.builtin._reraise properly reraises under Python3 now.
-
-- fix issue53 - remove module index, thanks jenisys.
-
-- allow posix path separators when "fnmatch" is called.
- Thanks Christian Long for the complete PR.
-
-1.4.25
-==================================================
-
-- fix issue52: vaguely fix py25 compat of py.path.local (it's not
- officially supported), also fix docs
-
-- fix pytest issue 589: when checking if we have a recursion error
- check for the specific "maximum recursion depth" text of the exception.
-
-1.4.24
-==================================================
-
-- Fix retrieving source when an else: line has an other statement on
- the same line.
-
-- add localpath read_text/write_text/read_bytes/write_bytes methods
- as shortcuts and clearer bytes/text interfaces for read/write.
- Adapted from a PR from Paul Moore.
-
-
-1.4.23
-==================================================
-
-- use newer apipkg version which makes attribute access on
- alias modules resolve to None rather than an ImportError.
- This helps with code that uses inspect.getframeinfo()
- on py34 which causes a complete walk on sys.modules
- thus triggering the alias module to resolve and blowing
- up with ImportError. The negative side is that something
- like "py.test.X" will now result in None instead of "importerror: pytest"
- if pytest is not installed. But you shouldn't import "py.test"
- anyway anymore.
-
-- adapt one svn test to only check for any exception instead
- of specific ones because different svn versions cause different
- errors and we don't care.
-
-
-1.4.22
-==================================================
-
-- refactor class-level registry on ForkedFunc child start/finish
- event to become instance based (i.e. passed into the constructor)
-
-1.4.21
-==================================================
-
-- ForkedFunc now has class-level register_on_start/on_exit()
- methods to allow adding information in the boxed process.
- Thanks Marc Schlaich.
-
-- ForkedFunc in the child opens in "auto-flush" mode for
- stdout/stderr so that when a subprocess dies you can see
- its output even if it didn't flush itself.
-
-- refactor traceback generation in light of pytest issue 364
- (shortening tracebacks). you can now set a new traceback style
- on a per-entry basis such that a caller can force entries to be
- isplayed as short or long entries.
-
-- win32: py.path.local.sysfind(name) will preferrably return files with
- extensions so that if "X" and "X.bat" or "X.exe" is on the PATH,
- one of the latter two will be returned.
-
-1.4.20
-==================================================
-
-- ignore unicode decode errors in xmlescape. Thanks Anatoly Bubenkoff.
-
-- on python2 modify traceback.format_exception_only to match python3
- behaviour, namely trying to print unicode for Exception instances
-
-- use a safer way for serializing exception reports (helps to fix
- pytest issue413)
-
-Changes between 1.4.18 and 1.4.19
-==================================================
-
-- merge in apipkg fixes
-
-- some micro-optimizations in py/_code/code.py for speeding
- up pytest runs. Thanks Alex Gaynor for initiative.
-
-- check PY_COLORS=1 or PY_COLORS=0 to force coloring/not-coloring
- for py.io.TerminalWriter() independently from capabilities
- of the output file. Thanks Marc Abramowitz for the PR.
-
-- some fixes to unicode handling in assertion handling.
- Thanks for the PR to Floris Bruynooghe. (This helps
- to fix pytest issue 319).
-
-- depend on setuptools presence, remove distribute_setup
-
-Changes between 1.4.17 and 1.4.18
-==================================================
-
-- introduce path.ensure_dir() as a synonym for ensure(..., dir=1)
-
-- some unicode/python3 related fixes wrt to path manipulations
- (if you start passing unicode particular in py2 you might
- still get problems, though)
-
-Changes between 1.4.16 and 1.4.17
-==================================================
-
-- make py.io.TerminalWriter() prefer colorama if it is available
- and avoid empty lines when separator-lines are printed by
- being defensive and reducing the working terminalwidth by 1
-
-- introduce optional "expanduser" argument to py.path.local
- to that local("~", expanduser=True) gives the home
- directory of "user".
-
-Changes between 1.4.15 and 1.4.16
-==================================================
-
-- fix issue35 - define __gt__ ordering between a local path
- and strings
-
-- fix issue36 - make chdir() work even if os.getcwd() fails.
-
-- add path.exists/isdir/isfile/islink shortcuts
-
-- introduce local path.as_cwd() context manager.
-
-- introduce p.write(ensure=1) and p.open(ensure=1)
- where ensure triggers creation of neccessary parent
- dirs.
-
-
-Changes between 1.4.14 and 1.4.15
-==================================================
-
-- majorly speed up some common calling patterns with
- LocalPath.listdir()/join/check/stat functions considerably.
-
-- fix an edge case with fnmatch where a glob style pattern appeared
- in an absolute path.
-
-Changes between 1.4.13 and 1.4.14
-==================================================
-
-- fix dupfile to work with files that don't
- carry a mode. Thanks Jason R. Coombs.
-
-Changes between 1.4.12 and 1.4.13
-==================================================
-
-- fix getting statementrange/compiling a file ending
- in a comment line without newline (on python2.5)
-- for local paths you can pass "mode=True" to a copy()
- in order to copy permission bits (underlying mechanism
- is using shutil.copymode)
-- add paths arguments to py.path.local.sysfind to restrict
- search to the diretories in the path.
-- add isdir/isfile/islink to path.stat() objects allowing to perform
- multiple checks without calling out multiple times
-- drop py.path.local.__new__ in favour of a simpler __init__
-- iniconfig: allow "name:value" settings in config files, no space after
- "name" required
-- fix issue 27 - NameError in unlikely untested case of saferepr
-
-
-Changes between 1.4.11 and 1.4.12
-==================================================
-
-- fix python2.4 support - for pre-AST interpreters re-introduce
- old way to find statements in exceptions (closes pytest issue 209)
-- add tox.ini to distribution
-- fix issue23 - print *,** args information in tracebacks,
- thanks Manuel Jacob
-
-
-Changes between 1.4.10 and 1.4.11
-==================================================
-
-- use _ast to determine statement ranges when printing tracebacks -
- avoiding multi-second delays on some large test modules
-- fix an internal test to not use class-denoted pytest_funcarg__
-- fix a doc link to bug tracker
-- try to make terminal.write() printing more robust against
- unicodeencode/decode problems, amend according test
-- introduce py.builtin.text and py.builtin.bytes
- to point to respective str/unicode (py2) and bytes/str (py3) types
-- fix error handling on win32/py33 for ENODIR
-
-Changes between 1.4.9 and 1.4.10
-==================================================
-
-- terminalwriter: default to encode to UTF8 if no encoding is defined
- on the output stream
-- issue22: improve heuristic for finding the statementrange in exceptions
-
-Changes between 1.4.8 and 1.4.9
-==================================================
-
-- fix bug of path.visit() which would not recognize glob-style patterns
- for the "rec" recursion argument
-- changed iniconfig parsing to better conform, now the chars ";"
- and "#" only mark a comment at the stripped start of a line
-- include recent apipkg-1.2
-- change internal terminalwriter.line/reline logic to more nicely
- support file spinners
-
-Changes between 1.4.7 and 1.4.8
-==================================================
-
-- fix issue 13 - correct handling of the tag name object in xmlgen
-- fix issue 14 - support raw attribute values in xmlgen
-- fix windows terminalwriter printing/re-line problem
-- update distribute_setup.py to 0.6.27
-
-Changes between 1.4.6 and 1.4.7
-==================================================
-
-- fix issue11 - own test failure with python3.3 / Thanks Benjamin Peterson
-- help fix pytest issue 102
-
-Changes between 1.4.5 and 1.4.6
-==================================================
-
-- help to fix pytest issue99: unify output of
- ExceptionInfo.getrepr(style="native") with ...(style="long")
-- fix issue7: source.getstatementrange() now raises proper error
- if no valid statement can be found
-- fix issue8: fix code and tests of svnurl/svnwc to work on subversion 1.7 -
- note that path.status(updates=1) will not properly work svn-17's status
- --xml output is broken.
-- make source.getstatementrange() more resilent about non-python code frames
- (as seen from jnja2)
-- make trackeback recursion detection more resilent
- about the eval magic of a decorator library
-- iniconfig: add support for ; as comment starter
-- properly handle lists in xmlgen on python3
-- normalize py.code.getfslineno(obj) to always return a (string, int) tuple
- defaulting to ("", -1) respectively if no source code can be found for obj.
-
-Changes between 1.4.4 and 1.4.5
-==================================================
-
-- improve some unicode handling in terminalwriter and capturing
- (used by pytest)
-
-Changes between 1.4.3 and 1.4.4
-==================================================
-
-- a few fixes and assertion related refinements for pytest-2.1
-- guard py.code.Code and getfslineno against bogus input
- and make py.code.Code objects for object instance
- by looking up their __call__ function.
-- make exception presentation robust against invalid current cwd
-
-Changes between 1.4.2 and 1.4.3
-==================================================
-
-- fix terminal coloring issue for skipped tests (thanks Amaury)
-- fix issue4 - large calls to ansi_print (thanks Amaury)
-
-Changes between 1.4.1 and 1.4.2
-==================================================
-
-- fix (pytest) issue23 - tmpdir argument now works on Python3.2 and WindowsXP
- (which apparently starts to offer os.symlink now)
-
-- better error message for syntax errors from compiled code
-
-- small fix to better deal with (un-)colored terminal output on windows
-
-Changes between 1.4.0 and 1.4.1
-==================================================
-
-- fix issue1 - py.error.* classes to be pickleable
-
-- fix issue2 - on windows32 use PATHEXT as the list of potential
- extensions to find find binaries with py.path.local.sysfind(commandname)
-
-- fix (pytest-) issue10 and refine assertion reinterpretation
- to avoid breaking if the __nonzero__ of an object fails
-
-- fix (pytest-) issue17 where python3 does not like "import *"
- leading to misrepresentation of import-errors in test modules
-
-- fix py.error.* attribute pypy access issue
-
-- allow path.samefile(arg) to succeed when arg is a relative filename
-
-- fix (pytest-) issue20 path.samefile(relpath) works as expected now
-
-- fix (pytest-) issue8 len(long_list) now shows the lenght of the list
-
-Changes between 1.3.4 and 1.4.0
-==================================================
-
-- py.test was moved to a separate "pytest" package. What remains is
- a stub hook which will proxy ``import py.test`` to ``pytest``.
-- all command line tools ("py.cleanup/lookup/countloc/..." moved
- to "pycmd" package)
-- removed the old and deprecated "py.magic" namespace
-- use apipkg-1.1 and make py.apipkg.initpkg|ApiModule available
-- add py.iniconfig module for brain-dead easy ini-config file parsing
-- introduce py.builtin.any()
-- path objects have a .dirname attribute now (equivalent to
- os.path.dirname(path))
-- path.visit() accepts breadthfirst (bf) and sort options
-- remove deprecated py.compat namespace
-
-Changes between 1.3.3 and 1.3.4
-==================================================
-
-- fix issue111: improve install documentation for windows
-- fix issue119: fix custom collectability of __init__.py as a module
-- fix issue116: --doctestmodules work with __init__.py files as well
-- fix issue115: unify internal exception passthrough/catching/GeneratorExit
-- fix issue118: new --tb=native for presenting cpython-standard exceptions
-
-Changes between 1.3.2 and 1.3.3
-==================================================
-
-- fix issue113: assertion representation problem with triple-quoted strings
- (and possibly other cases)
-- make conftest loading detect that a conftest file with the same
- content was already loaded, avoids surprises in nested directory structures
- which can be produced e.g. by Hudson. It probably removes the need to use
- --confcutdir in most cases.
-- fix terminal coloring for win32
- (thanks Michael Foord for reporting)
-- fix weirdness: make terminal width detection work on stdout instead of stdin
- (thanks Armin Ronacher for reporting)
-- remove trailing whitespace in all py/text distribution files
-
-Changes between 1.3.1 and 1.3.2
-==================================================
-
-New features
-++++++++++++++++++
-
-- fix issue103: introduce py.test.raises as context manager, examples::
-
- with py.test.raises(ZeroDivisionError):
- x = 0
- 1 / x
-
- with py.test.raises(RuntimeError) as excinfo:
- call_something()
-
- # you may do extra checks on excinfo.value|type|traceback here
-
- (thanks Ronny Pfannschmidt)
-
-- Funcarg factories can now dynamically apply a marker to a
- test invocation. This is for example useful if a factory
- provides parameters to a test which are expected-to-fail::
-
- def pytest_funcarg__arg(request):
- request.applymarker(py.test.mark.xfail(reason="flaky config"))
- ...
-
- def test_function(arg):
- ...
-
-- improved error reporting on collection and import errors. This makes
- use of a more general mechanism, namely that for custom test item/collect
- nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
- override it to return a string error representation of your choice
- which is going to be reported as a (red) string.
-
-- introduce '--junitprefix=STR' option to prepend a prefix
- to all reports in the junitxml file.
-
-Bug fixes / Maintenance
-++++++++++++++++++++++++++
-
-- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
- to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
- you can properly check for their existence in a cross-python manner).
-- refine --pdb: ignore xfailed tests, unify its TB-reporting and
- don't display failures again at the end.
-- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
-- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
-- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
-- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
-- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
-- fix py.code.compile(source) to generate unique filenames
-- fix assertion re-interp problems on PyPy, by defering code
- compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
-- fix py.path.local.pyimport() to work with directories
-- streamline py.path.local.mkdtemp implementation and usage
-- don't print empty lines when showing junitxml-filename
-- add optional boolean ignore_errors parameter to py.path.local.remove
-- fix terminal writing on win32/python2.4
-- py.process.cmdexec() now tries harder to return properly encoded unicode objects
- on all python versions
-- install plain py.test/py.which scripts also for Jython, this helps to
- get canonical script paths in virtualenv situations
-- make path.bestrelpath(path) return ".", note that when calling
- X.bestrelpath the assumption is that X is a directory.
-- make initial conftest discovery ignore "--" prefixed arguments
-- fix resultlog plugin when used in an multicpu/multihost xdist situation
- (thanks Jakub Gustak)
-- perform distributed testing related reporting in the xdist-plugin
- rather than having dist-related code in the generic py.test
- distribution
-- fix homedir detection on Windows
-- ship distribute_setup.py version 0.6.13
-
-Changes between 1.3.0 and 1.3.1
-==================================================
-
-New features
-++++++++++++++++++
-
-- issue91: introduce new py.test.xfail(reason) helper
- to imperatively mark a test as expected to fail. Can
- be used from within setup and test functions. This is
- useful especially for parametrized tests when certain
- configurations are expected-to-fail. In this case the
- declarative approach with the @py.test.mark.xfail cannot
- be used as it would mark all configurations as xfail.
-
-- issue102: introduce new --maxfail=NUM option to stop
- test runs after NUM failures. This is a generalization
- of the '-x' or '--exitfirst' option which is now equivalent
- to '--maxfail=1'. Both '-x' and '--maxfail' will
- now also print a line near the end indicating the Interruption.
-
-- issue89: allow py.test.mark decorators to be used on classes
- (class decorators were introduced with python2.6) and
- also allow to have multiple markers applied at class/module level
- by specifying a list.
-
-- improve and refine letter reporting in the progress bar:
- . pass
- f failed test
- s skipped tests (reminder: use for dependency/platform mismatch only)
- x xfailed test (test that was expected to fail)
- X xpassed test (test that was expected to fail but passed)
-
- You can use any combination of 'fsxX' with the '-r' extended
- reporting option. The xfail/xpass results will show up as
- skipped tests in the junitxml output - which also fixes
- issue99.
-
-- make py.test.cmdline.main() return the exitstatus instead of raising
- SystemExit and also allow it to be called multiple times. This of
- course requires that your application and tests are properly teared
- down and don't have global state.
-
-Fixes / Maintenance
-++++++++++++++++++++++
-
-- improved traceback presentation:
- - improved and unified reporting for "--tb=short" option
- - Errors during test module imports are much shorter, (using --tb=short style)
- - raises shows shorter more relevant tracebacks
- - --fulltrace now more systematically makes traces longer / inhibits cutting
-
-- improve support for raises and other dynamically compiled code by
- manipulating python's linecache.cache instead of the previous
- rather hacky way of creating custom code objects. This makes
- it seemlessly work on Jython and PyPy where it previously didn't.
-
-- fix issue96: make capturing more resilient against Control-C
- interruptions (involved somewhat substantial refactoring
- to the underlying capturing functionality to avoid race
- conditions).
-
-- fix chaining of conditional skipif/xfail decorators - so it works now
- as expected to use multiple @py.test.mark.skipif(condition) decorators,
- including specific reporting which of the conditions lead to skipping.
-
-- fix issue95: late-import zlib so that it's not required
- for general py.test startup.
-
-- fix issue94: make reporting more robust against bogus source code
- (and internally be more careful when presenting unexpected byte sequences)
-
-
-Changes between 1.2.1 and 1.3.0
-==================================================
-
-- deprecate --report option in favour of a new shorter and easier to
- remember -r option: it takes a string argument consisting of any
- combination of 'xfsX' characters. They relate to the single chars
- you see during the dotted progress printing and will print an extra line
- per test at the end of the test run. This extra line indicates the exact
- position or test ID that you directly paste to the py.test cmdline in order
- to re-run a particular test.
-
-- allow external plugins to register new hooks via the new
- pytest_addhooks(pluginmanager) hook. The new release of
- the pytest-xdist plugin for distributed and looponfailing
- testing requires this feature.
-
-- add a new pytest_ignore_collect(path, config) hook to allow projects and
- plugins to define exclusion behaviour for their directory structure -
- for example you may define in a conftest.py this method::
-
- def pytest_ignore_collect(path):
- return path.check(link=1)
-
- to prevent even a collection try of any tests in symlinked dirs.
-
-- new pytest_pycollect_makemodule(path, parent) hook for
- allowing customization of the Module collection object for a
- matching test module.
-
-- extend and refine xfail mechanism:
- ``@py.test.mark.xfail(run=False)`` do not run the decorated test
- ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
- specifiying ``--runxfail`` on command line virtually ignores xfail markers
-
-- expose (previously internal) commonly useful methods:
- py.io.get_terminal_with() -> return terminal width
- py.io.ansi_print(...) -> print colored/bold text on linux/win32
- py.io.saferepr(obj) -> return limited representation string
-
-- expose test outcome related exceptions as py.test.skip.Exception,
- py.test.raises.Exception etc., useful mostly for plugins
- doing special outcome interpretation/tweaking
-
-- (issue85) fix junitxml plugin to handle tests with non-ascii output
-
-- fix/refine python3 compatibility (thanks Benjamin Peterson)
-
-- fixes for making the jython/win32 combination work, note however:
- jython2.5.1/win32 does not provide a command line launcher, see
- http://bugs.jython.org/issue1491 . See pylib install documentation
- for how to work around.
-
-- fixes for handling of unicode exception values and unprintable objects
-
-- (issue87) fix unboundlocal error in assertionold code
-
-- (issue86) improve documentation for looponfailing
-
-- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
-
-- ship distribute_setup.py version 0.6.10
-
-- added links to the new capturelog and coverage plugins
-
-
-Changes between 1.2.1 and 1.2.0
-=====================================
-
-- refined usage and options for "py.cleanup"::
-
- py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
- py.cleanup -e .swp -e .cache # also remove files with these extensions
- py.cleanup -s # remove "build" and "dist" directory next to setup.py files
- py.cleanup -d # also remove empty directories
- py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
- py.cleanup -n # dry run, only show what would be removed
-
-- add a new option "py.test --funcargs" which shows available funcargs
- and their help strings (docstrings on their respective factory function)
- for a given test path
-
-- display a short and concise traceback if a funcarg lookup fails
-
-- early-load "conftest.py" files in non-dot first-level sub directories.
- allows to conveniently keep and access test-related options in a ``test``
- subdir and still add command line options.
-
-- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
-
-- fix issue78: always call python-level teardown functions even if the
- according setup failed. This includes refinements for calling setup_module/class functions
- which will now only be called once instead of the previous behaviour where they'd be called
- multiple times if they raise an exception (including a Skipped exception). Any exception
- will be re-corded and associated with all tests in the according module/class scope.
-
-- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
-
-- fix pdb debugging to be in the correct frame on raises-related errors
-
-- update apipkg.py to fix an issue where recursive imports might
- unnecessarily break importing
-
-- fix plugin links
-
-Changes between 1.2 and 1.1.1
-=====================================
-
-- moved dist/looponfailing from py.test core into a new
- separately released pytest-xdist plugin.
-
-- new junitxml plugin: --junitxml=path will generate a junit style xml file
- which is processable e.g. by the Hudson CI system.
-
-- new option: --genscript=path will generate a standalone py.test script
- which will not need any libraries installed. thanks to Ralf Schmitt.
-
-- new option: --ignore will prevent specified path from collection.
- Can be specified multiple times.
-
-- new option: --confcutdir=dir will make py.test only consider conftest
- files that are relative to the specified dir.
-
-- new funcarg: "pytestconfig" is the pytest config object for access
- to command line args and can now be easily used in a test.
-
-- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
- disambiguate between Python3, python2.X, Jython and PyPy installed versions.
-
-- new "pytestconfig" funcarg allows access to test config object
-
-- new "pytest_report_header" hook can return additional lines
- to be displayed at the header of a test run.
-
-- (experimental) allow "py.test path::name1::name2::..." for pointing
- to a test within a test collection directly. This might eventually
- evolve as a full substitute to "-k" specifications.
-
-- streamlined plugin loading: order is now as documented in
- customize.html: setuptools, ENV, commandline, conftest.
- also setuptools entry point names are turned to canonical namees ("pytest_*")
-
-- automatically skip tests that need 'capfd' but have no os.dup
-
-- allow pytest_generate_tests to be defined in classes as well
-
-- deprecate usage of 'disabled' attribute in favour of pytestmark
-- deprecate definition of Directory, Module, Class and Function nodes
- in conftest.py files. Use pytest collect hooks instead.
-
-- collection/item node specific runtest/collect hooks are only called exactly
- on matching conftest.py files, i.e. ones which are exactly below
- the filesystem path of an item
-
-- change: the first pytest_collect_directory hook to return something
- will now prevent further hooks to be called.
-
-- change: figleaf plugin now requires --figleaf to run. Also
- change its long command line options to be a bit shorter (see py.test -h).
-
-- change: pytest doctest plugin is now enabled by default and has a
- new option --doctest-glob to set a pattern for file matches.
-
-- change: remove internal py._* helper vars, only keep py._pydir
-
-- robustify capturing to survive if custom pytest_runtest_setup
- code failed and prevented the capturing setup code from running.
-
-- make py.test.* helpers provided by default plugins visible early -
- works transparently both for pydoc and for interactive sessions
- which will regularly see e.g. py.test.mark and py.test.importorskip.
-
-- simplify internal plugin manager machinery
-- simplify internal collection tree by introducing a RootCollector node
-
-- fix assert reinterpreation that sees a call containing "keyword=..."
-
-- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
- hooks on slaves during dist-testing, report module/session teardown
- hooks correctly.
-
-- fix issue65: properly handle dist-testing if no
- execnet/py lib installed remotely.
-
-- skip some install-tests if no execnet is available
-
-- fix docs, fix internal bin/ script generation
-
-
-Changes between 1.1.1 and 1.1.0
-=====================================
-
-- introduce automatic plugin registration via 'pytest11'
- entrypoints via setuptools' pkg_resources.iter_entry_points
-
-- fix py.test dist-testing to work with execnet >= 1.0.0b4
-
-- re-introduce py.test.cmdline.main() for better backward compatibility
-
-- svn paths: fix a bug with path.check(versioned=True) for svn paths,
- allow '%' in svn paths, make svnwc.update() default to interactive mode
- like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
-
-- refine distributed tarball to contain test and no pyc files
-
-- try harder to have deprecation warnings for py.compat.* accesses
- report a correct location
-
-Changes between 1.1.0 and 1.0.2
-=====================================
-
-* adjust and improve docs
-
-* remove py.rest tool and internal namespace - it was
- never really advertised and can still be used with
- the old release if needed. If there is interest
- it could be revived into its own tool i guess.
-
-* fix issue48 and issue59: raise an Error if the module
- from an imported test file does not seem to come from
- the filepath - avoids "same-name" confusion that has
- been reported repeatedly
-
-* merged Ronny's nose-compatibility hacks: now
- nose-style setup_module() and setup() functions are
- supported
-
-* introduce generalized py.test.mark function marking
-
-* reshuffle / refine command line grouping
-
-* deprecate parser.addgroup in favour of getgroup which creates option group
-
-* add --report command line option that allows to control showing of skipped/xfailed sections
-
-* generalized skipping: a new way to mark python functions with skipif or xfail
- at function, class and modules level based on platform or sys-module attributes.
-
-* extend py.test.mark decorator to allow for positional args
-
-* introduce and test "py.cleanup -d" to remove empty directories
-
-* fix issue #59 - robustify unittest test collection
-
-* make bpython/help interaction work by adding an __all__ attribute
- to ApiModule, cleanup initpkg
-
-* use MIT license for pylib, add some contributors
-
-* remove py.execnet code and substitute all usages with 'execnet' proper
-
-* fix issue50 - cached_setup now caches more to expectations
- for test functions with multiple arguments.
-
-* merge Jarko's fixes, issue #45 and #46
-
-* add the ability to specify a path for py.lookup to search in
-
-* fix a funcarg cached_setup bug probably only occuring
- in distributed testing and "module" scope with teardown.
-
-* many fixes and changes for making the code base python3 compatible,
- many thanks to Benjamin Peterson for helping with this.
-
-* consolidate builtins implementation to be compatible with >=2.3,
- add helpers to ease keeping 2 and 3k compatible code
-
-* deprecate py.compat.doctest|subprocess|textwrap|optparse
-
-* deprecate py.magic.autopath, remove py/magic directory
-
-* move pytest assertion handling to py/code and a pytest_assertion
- plugin, add "--no-assert" option, deprecate py.magic namespaces
- in favour of (less) py.code ones.
-
-* consolidate and cleanup py/code classes and files
-
-* cleanup py/misc, move tests to bin-for-dist
-
-* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
-
-* consolidate py.log implementation, remove old approach.
-
-* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
- text/unicode and byte-streams (uses underlying standard lib io.*
- if available)
-
-* make py.unittest_convert helper script available which converts "unittest.py"
- style files into the simpler assert/direct-test-classes py.test/nosetests
- style. The script was written by Laura Creighton.
-
-* simplified internal localpath implementation
-
-Changes between 1.0.1 and 1.0.2
-=====================================
-
-* fixing packaging issues, triggered by fedora redhat packaging,
- also added doc, examples and contrib dirs to the tarball.
-
-* added a documentation link to the new django plugin.
-
-Changes between 1.0.0 and 1.0.1
-=====================================
-
-* added a 'pytest_nose' plugin which handles nose.SkipTest,
- nose-style function/method/generator setup/teardown and
- tries to report functions correctly.
-
-* capturing of unicode writes or encoded strings to sys.stdout/err
- work better, also terminalwriting was adapted and somewhat
- unified between windows and linux.
-
-* improved documentation layout and content a lot
-
-* added a "--help-config" option to show conftest.py / ENV-var names for
- all longopt cmdline options, and some special conftest.py variables.
- renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
-
-* fix issue #27: better reporting on non-collectable items given on commandline
- (e.g. pyc files)
-
-* fix issue #33: added --version flag (thanks Benjamin Peterson)
-
-* fix issue #32: adding support for "incomplete" paths to wcpath.status()
-
-* "Test" prefixed classes are *not* collected by default anymore if they
- have an __init__ method
-
-* monkeypatch setenv() now accepts a "prepend" parameter
-
-* improved reporting of collection error tracebacks
-
-* simplified multicall mechanism and plugin architecture,
- renamed some internal methods and argnames
-
-Changes between 1.0.0b9 and 1.0.0
-=====================================
-
-* more terse reporting try to show filesystem path relatively to current dir
-* improve xfail output a bit
-
-Changes between 1.0.0b8 and 1.0.0b9
-=====================================
-
-* cleanly handle and report final teardown of test setup
-
-* fix svn-1.6 compat issue with py.path.svnwc().versioned()
- (thanks Wouter Vanden Hove)
-
-* setup/teardown or collection problems now show as ERRORs
- or with big "E"'s in the progress lines. they are reported
- and counted separately.
-
-* dist-testing: properly handle test items that get locally
- collected but cannot be collected on the remote side - often
- due to platform/dependency reasons
-
-* simplified py.test.mark API - see keyword plugin documentation
-
-* integrate better with logging: capturing now by default captures
- test functions and their immediate setup/teardown in a single stream
-
-* capsys and capfd funcargs now have a readouterr() and a close() method
- (underlyingly py.io.StdCapture/FD objects are used which grew a
- readouterr() method as well to return snapshots of captured out/err)
-
-* make assert-reinterpretation work better with comparisons not
- returning bools (reported with numpy from thanks maciej fijalkowski)
-
-* reworked per-test output capturing into the pytest_iocapture.py plugin
- and thus removed capturing code from config object
-
-* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
-
-
-Changes between 1.0.0b7 and 1.0.0b8
-=====================================
-
-* pytest_unittest-plugin is now enabled by default
-
-* introduced pytest_keyboardinterrupt hook and
- refined pytest_sessionfinish hooked, added tests.
-
-* workaround a buggy logging module interaction ("closing already closed
- files"). Thanks to Sridhar Ratnakumar for triggering.
-
-* if plugins use "py.test.importorskip" for importing
- a dependency only a warning will be issued instead
- of exiting the testing process.
-
-* many improvements to docs:
- - refined funcargs doc , use the term "factory" instead of "provider"
- - added a new talk/tutorial doc page
- - better download page
- - better plugin docstrings
- - added new plugins page and automatic doc generation script
-
-* fixed teardown problem related to partially failing funcarg setups
- (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
- always invoked even if the "pytest_runtest_setup" failed.
-
-* tweaked doctest output for docstrings in py modules,
- thanks Radomir.
-
-Changes between 1.0.0b3 and 1.0.0b7
-=============================================
-
-* renamed py.test.xfail back to py.test.mark.xfail to avoid
- two ways to decorate for xfail
-
-* re-added py.test.mark decorator for setting keywords on functions
- (it was actually documented so removing it was not nice)
-
-* remove scope-argument from request.addfinalizer() because
- request.cached_setup has the scope arg. TOOWTDI.
-
-* perform setup finalization before reporting failures
-
-* apply modified patches from Andreas Kloeckner to allow
- test functions to have no func_code (#22) and to make
- "-k" and function keywords work (#20)
-
-* apply patch from Daniel Peolzleithner (issue #23)
-
-* resolve issue #18, multiprocessing.Manager() and
- redirection clash
-
-* make __name__ == "__channelexec__" for remote_exec code
-
-Changes between 1.0.0b1 and 1.0.0b3
-=============================================
-
-* plugin classes are removed: one now defines
- hooks directly in conftest.py or global pytest_*.py
- files.
-
-* added new pytest_namespace(config) hook that allows
- to inject helpers directly to the py.test.* namespace.
-
-* documented and refined many hooks
-
-* added new style of generative tests via
- pytest_generate_tests hook that integrates
- well with function arguments.
-
-
-Changes between 0.9.2 and 1.0.0b1
-=============================================
-
-* introduced new "funcarg" setup method,
- see doc/test/funcarg.txt
-
-* introduced plugin architecuture and many
- new py.test plugins, see
- doc/test/plugins.txt
-
-* teardown_method is now guaranteed to get
- called after a test method has run.
-
-* new method: py.test.importorskip(mod,minversion)
- will either import or call py.test.skip()
-
-* completely revised internal py.test architecture
-
-* new py.process.ForkedFunc object allowing to
- fork execution of a function to a sub process
- and getting a result back.
-
-XXX lots of things missing here XXX
-
-Changes between 0.9.1 and 0.9.2
-===============================
-
-* refined installation and metadata, created new setup.py,
- now based on setuptools/ez_setup (thanks to Ralf Schmitt
- for his support).
-
-* improved the way of making py.* scripts available in
- windows environments, they are now added to the
- Scripts directory as ".cmd" files.
-
-* py.path.svnwc.status() now is more complete and
- uses xml output from the 'svn' command if available
- (Guido Wesdorp)
-
-* fix for py.path.svn* to work with svn 1.5
- (Chris Lamb)
-
-* fix path.relto(otherpath) method on windows to
- use normcase for checking if a path is relative.
-
-* py.test's traceback is better parseable from editors
- (follows the filenames:LINENO: MSG convention)
- (thanks to Osmo Salomaa)
-
-* fix to javascript-generation, "py.test --runbrowser"
- should work more reliably now
-
-* removed previously accidentally added
- py.test.broken and py.test.notimplemented helpers.
-
-* there now is a py.__version__ attribute
-
-Changes between 0.9.0 and 0.9.1
-===============================
-
-This is a fairly complete list of changes between 0.9 and 0.9.1, which can
-serve as a reference for developers.
-
-* allowing + signs in py.path.svn urls [39106]
-* fixed support for Failed exceptions without excinfo in py.test [39340]
-* added support for killing processes for Windows (as well as platforms that
- support os.kill) in py.misc.killproc [39655]
-* added setup/teardown for generative tests to py.test [40702]
-* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
-* fixed problem with calling .remove() on wcpaths of non-versioned files in
- py.path [44248]
-* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
-* fail to run greenlet tests when pypy is available, but without stackless
- [45294]
-* small fixes in rsession tests [45295]
-* fixed issue with 2.5 type representations in py.test [45483, 45484]
-* made that internal reporting issues displaying is done atomically in py.test
- [45518]
-* made that non-existing files are igored by the py.lookup script [45519]
-* improved exception name creation in py.test [45535]
-* made that less threads are used in execnet [merge in 45539]
-* removed lock required for atomical reporting issue displaying in py.test
- [45545]
-* removed globals from execnet [45541, 45547]
-* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
- get called in 2.5 (py.execnet) [45548]
-* fixed bug in joining threads in py.execnet's servemain [45549]
-* refactored py.test.rsession tests to not rely on exact output format anymore
- [45646]
-* using repr() on test outcome [45647]
-* added 'Reason' classes for py.test.skip() [45648, 45649]
-* killed some unnecessary sanity check in py.test.collect [45655]
-* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
- usable by Administrators [45901]
-* added support for locking and non-recursive commits to py.path.svnwc [45994]
-* locking files in py.execnet to prevent CPython from segfaulting [46010]
-* added export() method to py.path.svnurl
-* fixed -d -x in py.test [47277]
-* fixed argument concatenation problem in py.path.svnwc [49423]
-* restore py.test behaviour that it exits with code 1 when there are failures
- [49974]
-* don't fail on html files that don't have an accompanying .txt file [50606]
-* fixed 'utestconvert.py < input' [50645]
-* small fix for code indentation in py.code.source [50755]
-* fix _docgen.py documentation building [51285]
-* improved checks for source representation of code blocks in py.test [51292]
-* added support for passing authentication to py.path.svn* objects [52000,
- 52001]
-* removed sorted() call for py.apigen tests in favour of [].sort() to support
- Python 2.3 [52481]
+1.4.33
+====================================================================
+
+- avoid imports in calls to py.path.local().fnmatch(). Thanks Andreas Pelme for
+ the PR.
+
+- fix issue106: Naive unicode encoding when calling fspath() in python2. Thanks Tiago Nobrega for the PR.
+
+- fix issue110: unittest.TestCase.assertWarns fails with py imported.
+
+1.4.32
+====================================================================
+
+- fix issue70: added ability to copy all stat info in py.path.local.copy.
+
+- make TerminalWriter.fullwidth a property. This results in the correct
+ value when the terminal gets resized.
+
+- update supported html tags to include recent additions.
+ Thanks Denis Afonso for the PR.
+
+- Remove internal code in ``Source.compile`` meant to support earlier Python 3 versions that produced the side effect
+ of leaving ``None`` in ``sys.modules`` when called (see pytest-dev/pytest#2103).
+ Thanks Bruno Oliveira for the PR.
+
+1.4.31
+==================================================
+
+- fix local().copy(dest, mode=True) to also work
+ with unicode.
+
+- pass better error message with svn EEXIST paths
+
+1.4.30
+==================================================
+
+- fix issue68 an assert with a multiline list comprehension
+ was not reported correctly. Thanks Henrik Heibuerger.
+
+
+1.4.29
+==================================================
+
+- fix issue55: revert a change to the statement finding algorithm
+ which is used by pytest for generating tracebacks.
+ Thanks Daniel Hahler for initial analysis.
+
+- fix pytest issue254 for when traceback rendering can't
+ find valid source code. Thanks Ionel Cristian Maries.
+
+
+1.4.28
+==================================================
+
+- fix issue64 -- dirpath regression when "abs=True" is passed.
+ Thanks Gilles Dartiguelongue.
+
+1.4.27
+==================================================
+
+- fix issue59: point to new repo site
+
+- allow a new ensuresyspath="append" mode for py.path.local.pyimport()
+ so that a neccessary import path is appended instead of prepended to
+ sys.path
+
+- strike undocumented, untested argument to py.path.local.pypkgpath
+
+- speed up py.path.local.dirpath by a factor of 10
+
+1.4.26
+==================================================
+
+- avoid calling normpath twice in py.path.local
+
+- py.builtin._reraise properly reraises under Python3 now.
+
+- fix issue53 - remove module index, thanks jenisys.
+
+- allow posix path separators when "fnmatch" is called.
+ Thanks Christian Long for the complete PR.
+
+1.4.25
+==================================================
+
+- fix issue52: vaguely fix py25 compat of py.path.local (it's not
+ officially supported), also fix docs
+
+- fix pytest issue 589: when checking if we have a recursion error
+ check for the specific "maximum recursion depth" text of the exception.
+
+1.4.24
+==================================================
+
+- Fix retrieving source when an else: line has an other statement on
+ the same line.
+
+- add localpath read_text/write_text/read_bytes/write_bytes methods
+ as shortcuts and clearer bytes/text interfaces for read/write.
+ Adapted from a PR from Paul Moore.
+
+
+1.4.23
+==================================================
+
+- use newer apipkg version which makes attribute access on
+ alias modules resolve to None rather than an ImportError.
+ This helps with code that uses inspect.getframeinfo()
+ on py34 which causes a complete walk on sys.modules
+ thus triggering the alias module to resolve and blowing
+ up with ImportError. The negative side is that something
+ like "py.test.X" will now result in None instead of "importerror: pytest"
+ if pytest is not installed. But you shouldn't import "py.test"
+ anyway anymore.
+
+- adapt one svn test to only check for any exception instead
+ of specific ones because different svn versions cause different
+ errors and we don't care.
+
+
+1.4.22
+==================================================
+
+- refactor class-level registry on ForkedFunc child start/finish
+ event to become instance based (i.e. passed into the constructor)
+
+1.4.21
+==================================================
+
+- ForkedFunc now has class-level register_on_start/on_exit()
+ methods to allow adding information in the boxed process.
+ Thanks Marc Schlaich.
+
+- ForkedFunc in the child opens in "auto-flush" mode for
+ stdout/stderr so that when a subprocess dies you can see
+ its output even if it didn't flush itself.
+
+- refactor traceback generation in light of pytest issue 364
+ (shortening tracebacks). you can now set a new traceback style
+ on a per-entry basis such that a caller can force entries to be
+ isplayed as short or long entries.
+
+- win32: py.path.local.sysfind(name) will preferrably return files with
+ extensions so that if "X" and "X.bat" or "X.exe" is on the PATH,
+ one of the latter two will be returned.
+
+1.4.20
+==================================================
+
+- ignore unicode decode errors in xmlescape. Thanks Anatoly Bubenkoff.
+
+- on python2 modify traceback.format_exception_only to match python3
+ behaviour, namely trying to print unicode for Exception instances
+
+- use a safer way for serializing exception reports (helps to fix
+ pytest issue413)
+
+Changes between 1.4.18 and 1.4.19
+==================================================
+
+- merge in apipkg fixes
+
+- some micro-optimizations in py/_code/code.py for speeding
+ up pytest runs. Thanks Alex Gaynor for initiative.
+
+- check PY_COLORS=1 or PY_COLORS=0 to force coloring/not-coloring
+ for py.io.TerminalWriter() independently from capabilities
+ of the output file. Thanks Marc Abramowitz for the PR.
+
+- some fixes to unicode handling in assertion handling.
+ Thanks for the PR to Floris Bruynooghe. (This helps
+ to fix pytest issue 319).
+
+- depend on setuptools presence, remove distribute_setup
+
+Changes between 1.4.17 and 1.4.18
+==================================================
+
+- introduce path.ensure_dir() as a synonym for ensure(..., dir=1)
+
+- some unicode/python3 related fixes wrt to path manipulations
+ (if you start passing unicode particular in py2 you might
+ still get problems, though)
+
+Changes between 1.4.16 and 1.4.17
+==================================================
+
+- make py.io.TerminalWriter() prefer colorama if it is available
+ and avoid empty lines when separator-lines are printed by
+ being defensive and reducing the working terminalwidth by 1
+
+- introduce optional "expanduser" argument to py.path.local
+ to that local("~", expanduser=True) gives the home
+ directory of "user".
+
+Changes between 1.4.15 and 1.4.16
+==================================================
+
+- fix issue35 - define __gt__ ordering between a local path
+ and strings
+
+- fix issue36 - make chdir() work even if os.getcwd() fails.
+
+- add path.exists/isdir/isfile/islink shortcuts
+
+- introduce local path.as_cwd() context manager.
+
+- introduce p.write(ensure=1) and p.open(ensure=1)
+ where ensure triggers creation of neccessary parent
+ dirs.
+
+
+Changes between 1.4.14 and 1.4.15
+==================================================
+
+- majorly speed up some common calling patterns with
+ LocalPath.listdir()/join/check/stat functions considerably.
+
+- fix an edge case with fnmatch where a glob style pattern appeared
+ in an absolute path.
+
+Changes between 1.4.13 and 1.4.14
+==================================================
+
+- fix dupfile to work with files that don't
+ carry a mode. Thanks Jason R. Coombs.
+
+Changes between 1.4.12 and 1.4.13
+==================================================
+
+- fix getting statementrange/compiling a file ending
+ in a comment line without newline (on python2.5)
+- for local paths you can pass "mode=True" to a copy()
+ in order to copy permission bits (underlying mechanism
+ is using shutil.copymode)
+- add paths arguments to py.path.local.sysfind to restrict
+ search to the diretories in the path.
+- add isdir/isfile/islink to path.stat() objects allowing to perform
+ multiple checks without calling out multiple times
+- drop py.path.local.__new__ in favour of a simpler __init__
+- iniconfig: allow "name:value" settings in config files, no space after
+ "name" required
+- fix issue 27 - NameError in unlikely untested case of saferepr
+
+
+Changes between 1.4.11 and 1.4.12
+==================================================
+
+- fix python2.4 support - for pre-AST interpreters re-introduce
+ old way to find statements in exceptions (closes pytest issue 209)
+- add tox.ini to distribution
+- fix issue23 - print *,** args information in tracebacks,
+ thanks Manuel Jacob
+
+
+Changes between 1.4.10 and 1.4.11
+==================================================
+
+- use _ast to determine statement ranges when printing tracebacks -
+ avoiding multi-second delays on some large test modules
+- fix an internal test to not use class-denoted pytest_funcarg__
+- fix a doc link to bug tracker
+- try to make terminal.write() printing more robust against
+ unicodeencode/decode problems, amend according test
+- introduce py.builtin.text and py.builtin.bytes
+ to point to respective str/unicode (py2) and bytes/str (py3) types
+- fix error handling on win32/py33 for ENODIR
+
+Changes between 1.4.9 and 1.4.10
+==================================================
+
+- terminalwriter: default to encode to UTF8 if no encoding is defined
+ on the output stream
+- issue22: improve heuristic for finding the statementrange in exceptions
+
+Changes between 1.4.8 and 1.4.9
+==================================================
+
+- fix bug of path.visit() which would not recognize glob-style patterns
+ for the "rec" recursion argument
+- changed iniconfig parsing to better conform, now the chars ";"
+ and "#" only mark a comment at the stripped start of a line
+- include recent apipkg-1.2
+- change internal terminalwriter.line/reline logic to more nicely
+ support file spinners
+
+Changes between 1.4.7 and 1.4.8
+==================================================
+
+- fix issue 13 - correct handling of the tag name object in xmlgen
+- fix issue 14 - support raw attribute values in xmlgen
+- fix windows terminalwriter printing/re-line problem
+- update distribute_setup.py to 0.6.27
+
+Changes between 1.4.6 and 1.4.7
+==================================================
+
+- fix issue11 - own test failure with python3.3 / Thanks Benjamin Peterson
+- help fix pytest issue 102
+
+Changes between 1.4.5 and 1.4.6
+==================================================
+
+- help to fix pytest issue99: unify output of
+ ExceptionInfo.getrepr(style="native") with ...(style="long")
+- fix issue7: source.getstatementrange() now raises proper error
+ if no valid statement can be found
+- fix issue8: fix code and tests of svnurl/svnwc to work on subversion 1.7 -
+ note that path.status(updates=1) will not properly work svn-17's status
+ --xml output is broken.
+- make source.getstatementrange() more resilent about non-python code frames
+ (as seen from jnja2)
+- make trackeback recursion detection more resilent
+ about the eval magic of a decorator library
+- iniconfig: add support for ; as comment starter
+- properly handle lists in xmlgen on python3
+- normalize py.code.getfslineno(obj) to always return a (string, int) tuple
+ defaulting to ("", -1) respectively if no source code can be found for obj.
+
+Changes between 1.4.4 and 1.4.5
+==================================================
+
+- improve some unicode handling in terminalwriter and capturing
+ (used by pytest)
+
+Changes between 1.4.3 and 1.4.4
+==================================================
+
+- a few fixes and assertion related refinements for pytest-2.1
+- guard py.code.Code and getfslineno against bogus input
+ and make py.code.Code objects for object instance
+ by looking up their __call__ function.
+- make exception presentation robust against invalid current cwd
+
+Changes between 1.4.2 and 1.4.3
+==================================================
+
+- fix terminal coloring issue for skipped tests (thanks Amaury)
+- fix issue4 - large calls to ansi_print (thanks Amaury)
+
+Changes between 1.4.1 and 1.4.2
+==================================================
+
+- fix (pytest) issue23 - tmpdir argument now works on Python3.2 and WindowsXP
+ (which apparently starts to offer os.symlink now)
+
+- better error message for syntax errors from compiled code
+
+- small fix to better deal with (un-)colored terminal output on windows
+
+Changes between 1.4.0 and 1.4.1
+==================================================
+
+- fix issue1 - py.error.* classes to be pickleable
+
+- fix issue2 - on windows32 use PATHEXT as the list of potential
+ extensions to find find binaries with py.path.local.sysfind(commandname)
+
+- fix (pytest-) issue10 and refine assertion reinterpretation
+ to avoid breaking if the __nonzero__ of an object fails
+
+- fix (pytest-) issue17 where python3 does not like "import *"
+ leading to misrepresentation of import-errors in test modules
+
+- fix py.error.* attribute pypy access issue
+
+- allow path.samefile(arg) to succeed when arg is a relative filename
+
+- fix (pytest-) issue20 path.samefile(relpath) works as expected now
+
+- fix (pytest-) issue8 len(long_list) now shows the lenght of the list
+
+Changes between 1.3.4 and 1.4.0
+==================================================
+
+- py.test was moved to a separate "pytest" package. What remains is
+ a stub hook which will proxy ``import py.test`` to ``pytest``.
+- all command line tools ("py.cleanup/lookup/countloc/..." moved
+ to "pycmd" package)
+- removed the old and deprecated "py.magic" namespace
+- use apipkg-1.1 and make py.apipkg.initpkg|ApiModule available
+- add py.iniconfig module for brain-dead easy ini-config file parsing
+- introduce py.builtin.any()
+- path objects have a .dirname attribute now (equivalent to
+ os.path.dirname(path))
+- path.visit() accepts breadthfirst (bf) and sort options
+- remove deprecated py.compat namespace
+
+Changes between 1.3.3 and 1.3.4
+==================================================
+
+- fix issue111: improve install documentation for windows
+- fix issue119: fix custom collectability of __init__.py as a module
+- fix issue116: --doctestmodules work with __init__.py files as well
+- fix issue115: unify internal exception passthrough/catching/GeneratorExit
+- fix issue118: new --tb=native for presenting cpython-standard exceptions
+
+Changes between 1.3.2 and 1.3.3
+==================================================
+
+- fix issue113: assertion representation problem with triple-quoted strings
+ (and possibly other cases)
+- make conftest loading detect that a conftest file with the same
+ content was already loaded, avoids surprises in nested directory structures
+ which can be produced e.g. by Hudson. It probably removes the need to use
+ --confcutdir in most cases.
+- fix terminal coloring for win32
+ (thanks Michael Foord for reporting)
+- fix weirdness: make terminal width detection work on stdout instead of stdin
+ (thanks Armin Ronacher for reporting)
+- remove trailing whitespace in all py/text distribution files
+
+Changes between 1.3.1 and 1.3.2
+==================================================
+
+New features
+++++++++++++++++++
+
+- fix issue103: introduce py.test.raises as context manager, examples::
+
+ with py.test.raises(ZeroDivisionError):
+ x = 0
+ 1 / x
+
+ with py.test.raises(RuntimeError) as excinfo:
+ call_something()
+
+ # you may do extra checks on excinfo.value|type|traceback here
+
+ (thanks Ronny Pfannschmidt)
+
+- Funcarg factories can now dynamically apply a marker to a
+ test invocation. This is for example useful if a factory
+ provides parameters to a test which are expected-to-fail::
+
+ def pytest_funcarg__arg(request):
+ request.applymarker(py.test.mark.xfail(reason="flaky config"))
+ ...
+
+ def test_function(arg):
+ ...
+
+- improved error reporting on collection and import errors. This makes
+ use of a more general mechanism, namely that for custom test item/collect
+ nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
+ override it to return a string error representation of your choice
+ which is going to be reported as a (red) string.
+
+- introduce '--junitprefix=STR' option to prepend a prefix
+ to all reports in the junitxml file.
+
+Bug fixes / Maintenance
+++++++++++++++++++++++++++
+
+- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
+ to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
+ you can properly check for their existence in a cross-python manner).
+- refine --pdb: ignore xfailed tests, unify its TB-reporting and
+ don't display failures again at the end.
+- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
+- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
+- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
+- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
+- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
+- fix py.code.compile(source) to generate unique filenames
+- fix assertion re-interp problems on PyPy, by defering code
+ compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
+- fix py.path.local.pyimport() to work with directories
+- streamline py.path.local.mkdtemp implementation and usage
+- don't print empty lines when showing junitxml-filename
+- add optional boolean ignore_errors parameter to py.path.local.remove
+- fix terminal writing on win32/python2.4
+- py.process.cmdexec() now tries harder to return properly encoded unicode objects
+ on all python versions
+- install plain py.test/py.which scripts also for Jython, this helps to
+ get canonical script paths in virtualenv situations
+- make path.bestrelpath(path) return ".", note that when calling
+ X.bestrelpath the assumption is that X is a directory.
+- make initial conftest discovery ignore "--" prefixed arguments
+- fix resultlog plugin when used in an multicpu/multihost xdist situation
+ (thanks Jakub Gustak)
+- perform distributed testing related reporting in the xdist-plugin
+ rather than having dist-related code in the generic py.test
+ distribution
+- fix homedir detection on Windows
+- ship distribute_setup.py version 0.6.13
+
+Changes between 1.3.0 and 1.3.1
+==================================================
+
+New features
+++++++++++++++++++
+
+- issue91: introduce new py.test.xfail(reason) helper
+ to imperatively mark a test as expected to fail. Can
+ be used from within setup and test functions. This is
+ useful especially for parametrized tests when certain
+ configurations are expected-to-fail. In this case the
+ declarative approach with the @py.test.mark.xfail cannot
+ be used as it would mark all configurations as xfail.
+
+- issue102: introduce new --maxfail=NUM option to stop
+ test runs after NUM failures. This is a generalization
+ of the '-x' or '--exitfirst' option which is now equivalent
+ to '--maxfail=1'. Both '-x' and '--maxfail' will
+ now also print a line near the end indicating the Interruption.
+
+- issue89: allow py.test.mark decorators to be used on classes
+ (class decorators were introduced with python2.6) and
+ also allow to have multiple markers applied at class/module level
+ by specifying a list.
+
+- improve and refine letter reporting in the progress bar:
+ . pass
+ f failed test
+ s skipped tests (reminder: use for dependency/platform mismatch only)
+ x xfailed test (test that was expected to fail)
+ X xpassed test (test that was expected to fail but passed)
+
+ You can use any combination of 'fsxX' with the '-r' extended
+ reporting option. The xfail/xpass results will show up as
+ skipped tests in the junitxml output - which also fixes
+ issue99.
+
+- make py.test.cmdline.main() return the exitstatus instead of raising
+ SystemExit and also allow it to be called multiple times. This of
+ course requires that your application and tests are properly teared
+ down and don't have global state.
+
+Fixes / Maintenance
+++++++++++++++++++++++
+
+- improved traceback presentation:
+ - improved and unified reporting for "--tb=short" option
+ - Errors during test module imports are much shorter, (using --tb=short style)
+ - raises shows shorter more relevant tracebacks
+ - --fulltrace now more systematically makes traces longer / inhibits cutting
+
+- improve support for raises and other dynamically compiled code by
+ manipulating python's linecache.cache instead of the previous
+ rather hacky way of creating custom code objects. This makes
+ it seemlessly work on Jython and PyPy where it previously didn't.
+
+- fix issue96: make capturing more resilient against Control-C
+ interruptions (involved somewhat substantial refactoring
+ to the underlying capturing functionality to avoid race
+ conditions).
+
+- fix chaining of conditional skipif/xfail decorators - so it works now
+ as expected to use multiple @py.test.mark.skipif(condition) decorators,
+ including specific reporting which of the conditions lead to skipping.
+
+- fix issue95: late-import zlib so that it's not required
+ for general py.test startup.
+
+- fix issue94: make reporting more robust against bogus source code
+ (and internally be more careful when presenting unexpected byte sequences)
+
+
+Changes between 1.2.1 and 1.3.0
+==================================================
+
+- deprecate --report option in favour of a new shorter and easier to
+ remember -r option: it takes a string argument consisting of any
+ combination of 'xfsX' characters. They relate to the single chars
+ you see during the dotted progress printing and will print an extra line
+ per test at the end of the test run. This extra line indicates the exact
+ position or test ID that you directly paste to the py.test cmdline in order
+ to re-run a particular test.
+
+- allow external plugins to register new hooks via the new
+ pytest_addhooks(pluginmanager) hook. The new release of
+ the pytest-xdist plugin for distributed and looponfailing
+ testing requires this feature.
+
+- add a new pytest_ignore_collect(path, config) hook to allow projects and
+ plugins to define exclusion behaviour for their directory structure -
+ for example you may define in a conftest.py this method::
+
+ def pytest_ignore_collect(path):
+ return path.check(link=1)
+
+ to prevent even a collection try of any tests in symlinked dirs.
+
+- new pytest_pycollect_makemodule(path, parent) hook for
+ allowing customization of the Module collection object for a
+ matching test module.
+
+- extend and refine xfail mechanism:
+ ``@py.test.mark.xfail(run=False)`` do not run the decorated test
+ ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
+ specifiying ``--runxfail`` on command line virtually ignores xfail markers
+
+- expose (previously internal) commonly useful methods:
+ py.io.get_terminal_with() -> return terminal width
+ py.io.ansi_print(...) -> print colored/bold text on linux/win32
+ py.io.saferepr(obj) -> return limited representation string
+
+- expose test outcome related exceptions as py.test.skip.Exception,
+ py.test.raises.Exception etc., useful mostly for plugins
+ doing special outcome interpretation/tweaking
+
+- (issue85) fix junitxml plugin to handle tests with non-ascii output
+
+- fix/refine python3 compatibility (thanks Benjamin Peterson)
+
+- fixes for making the jython/win32 combination work, note however:
+ jython2.5.1/win32 does not provide a command line launcher, see
+ http://bugs.jython.org/issue1491 . See pylib install documentation
+ for how to work around.
+
+- fixes for handling of unicode exception values and unprintable objects
+
+- (issue87) fix unboundlocal error in assertionold code
+
+- (issue86) improve documentation for looponfailing
+
+- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
+
+- ship distribute_setup.py version 0.6.10
+
+- added links to the new capturelog and coverage plugins
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
+
+Changes between 1.2 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
+
+Changes between 1.1.0 and 1.0.2
+=====================================
+
+* adjust and improve docs
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
+
+Changes between 1.0.1 and 1.0.2
+=====================================
+
+* fixing packaging issues, triggered by fedora redhat packaging,
+ also added doc, examples and contrib dirs to the tarball.
+
+* added a documentation link to the new django plugin.
+
+Changes between 1.0.0 and 1.0.1
+=====================================
+
+* added a 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* capturing of unicode writes or encoded strings to sys.stdout/err
+ work better, also terminalwriting was adapted and somewhat
+ unified between windows and linux.
+
+* improved documentation layout and content a lot
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
+
+Changes between 1.0.0b9 and 1.0.0
+=====================================
+
+* more terse reporting try to show filesystem path relatively to current dir
+* improve xfail output a bit
+
+Changes between 1.0.0b8 and 1.0.0b9
+=====================================
+
+* cleanly handle and report final teardown of test setup
+
+* fix svn-1.6 compat issue with py.path.svnwc().versioned()
+ (thanks Wouter Vanden Hove)
+
+* setup/teardown or collection problems now show as ERRORs
+ or with big "E"'s in the progress lines. they are reported
+ and counted separately.
+
+* dist-testing: properly handle test items that get locally
+ collected but cannot be collected on the remote side - often
+ due to platform/dependency reasons
+
+* simplified py.test.mark API - see keyword plugin documentation
+
+* integrate better with logging: capturing now by default captures
+ test functions and their immediate setup/teardown in a single stream
+
+* capsys and capfd funcargs now have a readouterr() and a close() method
+ (underlyingly py.io.StdCapture/FD objects are used which grew a
+ readouterr() method as well to return snapshots of captured out/err)
+
+* make assert-reinterpretation work better with comparisons not
+ returning bools (reported with numpy from thanks maciej fijalkowski)
+
+* reworked per-test output capturing into the pytest_iocapture.py plugin
+ and thus removed capturing code from config object
+
+* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
+
+
+Changes between 1.0.0b7 and 1.0.0b8
+=====================================
+
+* pytest_unittest-plugin is now enabled by default
+
+* introduced pytest_keyboardinterrupt hook and
+ refined pytest_sessionfinish hooked, added tests.
+
+* workaround a buggy logging module interaction ("closing already closed
+ files"). Thanks to Sridhar Ratnakumar for triggering.
+
+* if plugins use "py.test.importorskip" for importing
+ a dependency only a warning will be issued instead
+ of exiting the testing process.
+
+* many improvements to docs:
+ - refined funcargs doc , use the term "factory" instead of "provider"
+ - added a new talk/tutorial doc page
+ - better download page
+ - better plugin docstrings
+ - added new plugins page and automatic doc generation script
+
+* fixed teardown problem related to partially failing funcarg setups
+ (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
+ always invoked even if the "pytest_runtest_setup" failed.
+
+* tweaked doctest output for docstrings in py modules,
+ thanks Radomir.
+
+Changes between 1.0.0b3 and 1.0.0b7
+=============================================
+
+* renamed py.test.xfail back to py.test.mark.xfail to avoid
+ two ways to decorate for xfail
+
+* re-added py.test.mark decorator for setting keywords on functions
+ (it was actually documented so removing it was not nice)
+
+* remove scope-argument from request.addfinalizer() because
+ request.cached_setup has the scope arg. TOOWTDI.
+
+* perform setup finalization before reporting failures
+
+* apply modified patches from Andreas Kloeckner to allow
+ test functions to have no func_code (#22) and to make
+ "-k" and function keywords work (#20)
+
+* apply patch from Daniel Peolzleithner (issue #23)
+
+* resolve issue #18, multiprocessing.Manager() and
+ redirection clash
+
+* make __name__ == "__channelexec__" for remote_exec code
+
+Changes between 1.0.0b1 and 1.0.0b3
+=============================================
+
+* plugin classes are removed: one now defines
+ hooks directly in conftest.py or global pytest_*.py
+ files.
+
+* added new pytest_namespace(config) hook that allows
+ to inject helpers directly to the py.test.* namespace.
+
+* documented and refined many hooks
+
+* added new style of generative tests via
+ pytest_generate_tests hook that integrates
+ well with function arguments.
+
+
+Changes between 0.9.2 and 1.0.0b1
+=============================================
+
+* introduced new "funcarg" setup method,
+ see doc/test/funcarg.txt
+
+* introduced plugin architecuture and many
+ new py.test plugins, see
+ doc/test/plugins.txt
+
+* teardown_method is now guaranteed to get
+ called after a test method has run.
+
+* new method: py.test.importorskip(mod,minversion)
+ will either import or call py.test.skip()
+
+* completely revised internal py.test architecture
+
+* new py.process.ForkedFunc object allowing to
+ fork execution of a function to a sub process
+ and getting a result back.
+
+XXX lots of things missing here XXX
+
+Changes between 0.9.1 and 0.9.2
+===============================
+
+* refined installation and metadata, created new setup.py,
+ now based on setuptools/ez_setup (thanks to Ralf Schmitt
+ for his support).
+
+* improved the way of making py.* scripts available in
+ windows environments, they are now added to the
+ Scripts directory as ".cmd" files.
+
+* py.path.svnwc.status() now is more complete and
+ uses xml output from the 'svn' command if available
+ (Guido Wesdorp)
+
+* fix for py.path.svn* to work with svn 1.5
+ (Chris Lamb)
+
+* fix path.relto(otherpath) method on windows to
+ use normcase for checking if a path is relative.
+
+* py.test's traceback is better parseable from editors
+ (follows the filenames:LINENO: MSG convention)
+ (thanks to Osmo Salomaa)
+
+* fix to javascript-generation, "py.test --runbrowser"
+ should work more reliably now
+
+* removed previously accidentally added
+ py.test.broken and py.test.notimplemented helpers.
+
+* there now is a py.__version__ attribute
+
+Changes between 0.9.0 and 0.9.1
+===============================
+
+This is a fairly complete list of changes between 0.9 and 0.9.1, which can
+serve as a reference for developers.
+
+* allowing + signs in py.path.svn urls [39106]
+* fixed support for Failed exceptions without excinfo in py.test [39340]
+* added support for killing processes for Windows (as well as platforms that
+ support os.kill) in py.misc.killproc [39655]
+* added setup/teardown for generative tests to py.test [40702]
+* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
+* fixed problem with calling .remove() on wcpaths of non-versioned files in
+ py.path [44248]
+* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
+* fail to run greenlet tests when pypy is available, but without stackless
+ [45294]
+* small fixes in rsession tests [45295]
+* fixed issue with 2.5 type representations in py.test [45483, 45484]
+* made that internal reporting issues displaying is done atomically in py.test
+ [45518]
+* made that non-existing files are igored by the py.lookup script [45519]
+* improved exception name creation in py.test [45535]
+* made that less threads are used in execnet [merge in 45539]
+* removed lock required for atomical reporting issue displaying in py.test
+ [45545]
+* removed globals from execnet [45541, 45547]
+* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
+ get called in 2.5 (py.execnet) [45548]
+* fixed bug in joining threads in py.execnet's servemain [45549]
+* refactored py.test.rsession tests to not rely on exact output format anymore
+ [45646]
+* using repr() on test outcome [45647]
+* added 'Reason' classes for py.test.skip() [45648, 45649]
+* killed some unnecessary sanity check in py.test.collect [45655]
+* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
+ usable by Administrators [45901]
+* added support for locking and non-recursive commits to py.path.svnwc [45994]
+* locking files in py.execnet to prevent CPython from segfaulting [46010]
+* added export() method to py.path.svnurl
+* fixed -d -x in py.test [47277]
+* fixed argument concatenation problem in py.path.svnwc [49423]
+* restore py.test behaviour that it exits with code 1 when there are failures
+ [49974]
+* don't fail on html files that don't have an accompanying .txt file [50606]
+* fixed 'utestconvert.py < input' [50645]
+* small fix for code indentation in py.code.source [50755]
+* fix _docgen.py documentation building [51285]
+* improved checks for source representation of code blocks in py.test [51292]
+* added support for passing authentication to py.path.svn* objects [52000,
+ 52001]
+* removed sorted() call for py.apigen tests in favour of [].sort() to support
+ Python 2.3 [52481]
diff --git a/LICENSE b/LICENSE
index 31ecdfb..790c261 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,19 +1,19 @@
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
diff --git a/MANIFEST.in b/MANIFEST.in
index 31fb010..6ca2888 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,9 +1,10 @@
-include CHANGELOG
-include AUTHORS
-include README.txt
-include setup.py
-include LICENSE
-include conftest.py
-include tox.ini
-graft doc
-graft testing
+include CHANGELOG
+include AUTHORS
+include README.rst
+include setup.py
+include LICENSE
+include conftest.py
+include tox.ini
+graft doc
+graft testing
+global-exclude *.pyc
diff --git a/PKG-INFO b/PKG-INFO
index fadff4c..2bb58ea 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,46 +1,46 @@
-Metadata-Version: 1.1
-Name: py
-Version: 1.4.32
-Summary: library with cross-python path, ini-parsing, io, code, log facilities
-Home-page: http://pylib.readthedocs.org/
-Author: holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others
-Author-email: pytest-dev@python.org
-License: MIT license
-Description: .. image:: https://img.shields.io/pypi/pyversions/pytest.svg
- :target: https://pypi.org/project/py
- .. image:: https://img.shields.io/travis/pytest-dev/py.svg
- :target: https://travis-ci.org/pytest-dev/py
-
- The py lib is a Python development support library featuring
- the following tools and modules:
-
- * ``py.path``: uniform local and svn path objects
- * ``py.apipkg``: explicit API control and lazy-importing
- * ``py.iniconfig``: easy parsing of .ini files
- * ``py.code``: dynamic code generation and introspection
-
- NOTE: prior to the 1.4 release this distribution used to
- contain py.test which is now its own package, see http://pytest.org
-
- For questions and more information please visit http://pylib.readthedocs.org
-
- Bugs and issues: https://github.com/pytest-dev/py
-
- Authors: Holger Krekel and others, 2004-2016
-
-Platform: unix
-Platform: linux
-Platform: osx
-Platform: cygwin
-Platform: win32
-Classifier: Development Status :: 6 - Mature
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Topic :: Software Development :: Testing
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
+Metadata-Version: 1.1
+Name: py
+Version: 1.4.33
+Summary: library with cross-python path, ini-parsing, io, code, log facilities
+Home-page: http://py.readthedocs.io/
+Author: holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others
+Author-email: pytest-dev@python.org
+License: MIT license
+Description: .. image:: https://img.shields.io/pypi/pyversions/pytest.svg
+ :target: https://pypi.org/project/py
+ .. image:: https://img.shields.io/travis/pytest-dev/py.svg
+ :target: https://travis-ci.org/pytest-dev/py
+
+ The py lib is a Python development support library featuring
+ the following tools and modules:
+
+ * ``py.path``: uniform local and svn path objects
+ * ``py.apipkg``: explicit API control and lazy-importing
+ * ``py.iniconfig``: easy parsing of .ini files
+ * ``py.code``: dynamic code generation and introspection
+
+ NOTE: prior to the 1.4 release this distribution used to
+ contain py.test which is now its own package, see http://pytest.org
+
+ For questions and more information please visit http://py.readthedocs.org
+
+ Bugs and issues: https://github.com/pytest-dev/py
+
+ Authors: Holger Krekel and others, 2004-2017
+
+Platform: unix
+Platform: linux
+Platform: osx
+Platform: cygwin
+Platform: win32
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
diff --git a/README.rst b/README.rst
index e836b7b..c37410c 100644
--- a/README.rst
+++ b/README.rst
@@ -1,21 +1,21 @@
-.. image:: https://img.shields.io/pypi/pyversions/pytest.svg
- :target: https://pypi.org/project/py
-.. image:: https://img.shields.io/travis/pytest-dev/py.svg
- :target: https://travis-ci.org/pytest-dev/py
-
-The py lib is a Python development support library featuring
-the following tools and modules:
-
-* ``py.path``: uniform local and svn path objects
-* ``py.apipkg``: explicit API control and lazy-importing
-* ``py.iniconfig``: easy parsing of .ini files
-* ``py.code``: dynamic code generation and introspection
-
-NOTE: prior to the 1.4 release this distribution used to
-contain py.test which is now its own package, see http://pytest.org
-
-For questions and more information please visit http://pylib.readthedocs.org
-
-Bugs and issues: https://github.com/pytest-dev/py
-
-Authors: Holger Krekel and others, 2004-2016
+.. image:: https://img.shields.io/pypi/pyversions/pytest.svg
+ :target: https://pypi.org/project/py
+.. image:: https://img.shields.io/travis/pytest-dev/py.svg
+ :target: https://travis-ci.org/pytest-dev/py
+
+The py lib is a Python development support library featuring
+the following tools and modules:
+
+* ``py.path``: uniform local and svn path objects
+* ``py.apipkg``: explicit API control and lazy-importing
+* ``py.iniconfig``: easy parsing of .ini files
+* ``py.code``: dynamic code generation and introspection
+
+NOTE: prior to the 1.4 release this distribution used to
+contain py.test which is now its own package, see http://pytest.org
+
+For questions and more information please visit http://py.readthedocs.org
+
+Bugs and issues: https://github.com/pytest-dev/py
+
+Authors: Holger Krekel and others, 2004-2017
diff --git a/conftest.py b/conftest.py
index 11c2d44..69f3893 100644
--- a/conftest.py
+++ b/conftest.py
@@ -1,71 +1,71 @@
-import py
-import sys
-
-pytest_plugins = 'doctest pytester'.split()
-
-collect_ignore = ['build', 'doc/_build']
-
-
-import os, py
-pid = os.getpid()
-
-def pytest_addoption(parser):
- group = parser.getgroup("pylib", "py lib testing options")
- group.addoption('--runslowtests',
- action="store_true", dest="runslowtests", default=False,
- help=("run slow tests"))
-
-def pytest_funcarg__sshhost(request):
- val = request.config.getvalue("sshhost")
- if val:
- return val
- py.test.skip("need --sshhost option")
-def pytest_generate_tests(metafunc):
- multi = getattr(metafunc.function, 'multi', None)
- if multi is not None:
- assert len(multi.kwargs) == 1
- for name, l in multi.kwargs.items():
- for val in l:
- metafunc.addcall(funcargs={name: val})
- elif 'anypython' in metafunc.funcargnames:
- for name in ('python2.4', 'python2.5', 'python2.6',
- 'python2.7', 'python3.1', 'pypy-c', 'jython'):
- metafunc.addcall(id=name, param=name)
-
-# XXX copied from execnet's conftest.py - needs to be merged
-winpymap = {
- 'python2.7': r'C:\Python27\python.exe',
- 'python2.6': r'C:\Python26\python.exe',
- 'python2.5': r'C:\Python25\python.exe',
- 'python2.4': r'C:\Python24\python.exe',
- 'python3.1': r'C:\Python31\python.exe',
-}
-
-def getexecutable(name, cache={}):
- try:
- return cache[name]
- except KeyError:
- executable = py.path.local.sysfind(name)
- if executable:
- if name == "jython":
- import subprocess
- popen = subprocess.Popen([str(executable), "--version"],
- universal_newlines=True, stderr=subprocess.PIPE)
- out, err = popen.communicate()
- if not err or "2.5" not in err:
- executable = None
- cache[name] = executable
- return executable
-
-def pytest_funcarg__anypython(request):
- name = request.param
- executable = getexecutable(name)
- if executable is None:
- if sys.platform == "win32":
- executable = winpymap.get(name, None)
- if executable:
- executable = py.path.local(executable)
- if executable.check():
- return executable
- py.test.skip("no %s found" % (name,))
- return executable
+import py
+import sys
+
+pytest_plugins = 'doctest pytester'.split()
+
+collect_ignore = ['build', 'doc/_build']
+
+
+import os, py
+pid = os.getpid()
+
+def pytest_addoption(parser):
+ group = parser.getgroup("pylib", "py lib testing options")
+ group.addoption('--runslowtests',
+ action="store_true", dest="runslowtests", default=False,
+ help=("run slow tests"))
+
+def pytest_funcarg__sshhost(request):
+ val = request.config.getvalue("sshhost")
+ if val:
+ return val
+ py.test.skip("need --sshhost option")
+def pytest_generate_tests(metafunc):
+ multi = getattr(metafunc.function, 'multi', None)
+ if multi is not None:
+ assert len(multi.kwargs) == 1
+ for name, l in multi.kwargs.items():
+ for val in l:
+ metafunc.addcall(funcargs={name: val})
+ elif 'anypython' in metafunc.funcargnames:
+ for name in ('python2.4', 'python2.5', 'python2.6',
+ 'python2.7', 'python3.1', 'pypy-c', 'jython'):
+ metafunc.addcall(id=name, param=name)
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+ 'python2.7': r'C:\Python27\python.exe',
+ 'python2.6': r'C:\Python26\python.exe',
+ 'python2.5': r'C:\Python25\python.exe',
+ 'python2.4': r'C:\Python24\python.exe',
+ 'python3.1': r'C:\Python31\python.exe',
+}
+
+def getexecutable(name, cache={}):
+ try:
+ return cache[name]
+ except KeyError:
+ executable = py.path.local.sysfind(name)
+ if executable:
+ if name == "jython":
+ import subprocess
+ popen = subprocess.Popen([str(executable), "--version"],
+ universal_newlines=True, stderr=subprocess.PIPE)
+ out, err = popen.communicate()
+ if not err or "2.5" not in err:
+ executable = None
+ cache[name] = executable
+ return executable
+
+def pytest_funcarg__anypython(request):
+ name = request.param
+ executable = getexecutable(name)
+ if executable is None:
+ if sys.platform == "win32":
+ executable = winpymap.get(name, None)
+ if executable:
+ executable = py.path.local(executable)
+ if executable.check():
+ return executable
+ py.test.skip("no %s found" % (name,))
+ return executable
diff --git a/doc/Makefile b/doc/Makefile
index 0a0e89e..309cb78 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,133 +1,133 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = _build
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
-
-help:
- @echo "Please use \`make <target>' where <target> is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
-
-install: clean html
- rsync -avz _build/html/ code:www-pylib/
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/py.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/py.qhc"
-
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/py"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/py"
- @echo "# devhelp"
-
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- make -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+install: clean html
+ rsync -avz _build/html/ code:www-pylib/
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/py.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/py.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/py"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/py"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ make -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html
index 683863a..4e3e57c 100644
--- a/doc/_templates/layout.html
+++ b/doc/_templates/layout.html
@@ -1,18 +1,18 @@
-{% extends "!layout.html" %}
-
-{% block footer %}
-{{ super() }}
-<script type="text/javascript">
-
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', 'UA-7597274-14']);
- _gaq.push(['_trackPageview']);
-
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
-
-</script>
-{% endblock %}
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script type="text/javascript">
+
+ var _gaq = _gaq || [];
+ _gaq.push(['_setAccount', 'UA-7597274-14']);
+ _gaq.push(['_trackPageview']);
+
+ (function() {
+ var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+ ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+ var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+ })();
+
+</script>
+{% endblock %}
diff --git a/doc/announce/release-0.9.0.txt b/doc/announce/release-0.9.0.txt
index 0710931..ed18eec 100644
--- a/doc/announce/release-0.9.0.txt
+++ b/doc/announce/release-0.9.0.txt
@@ -1,7 +1,7 @@
-py lib 1.0.0: XXX
-======================================================================
-
-Welcome to the 1.0.0 py lib release - a library aiming to
-support agile and test-driven python development on various levels.
-
-XXX
+py lib 1.0.0: XXX
+======================================================================
+
+Welcome to the 1.0.0 py lib release - a library aiming to
+support agile and test-driven python development on various levels.
+
+XXX
diff --git a/doc/announce/release-0.9.2.txt b/doc/announce/release-0.9.2.txt
index bc2d2ef..aa4dca5 100644
--- a/doc/announce/release-0.9.2.txt
+++ b/doc/announce/release-0.9.2.txt
@@ -1,27 +1,27 @@
-py lib 0.9.2: bugfix release
-=============================
-
-Welcome to the 0.9.2 py lib and py.test release -
-mainly fixing Windows issues, providing better
-packaging and integration with setuptools.
-
-Here is a quick summary of what the py lib provides:
-
-* py.test: cross-project testing tool with many advanced features
-* py.execnet: ad-hoc code distribution to SSH, Socket and local sub processes
-* py.magic.greenlet: micro-threads on standard CPython ("stackless-light")
-* py.path: path abstractions over local and subversion files
-* rich documentation of py's exported API
-* tested against Linux, Win32, OSX, works on python 2.3-2.6
-
-See here for more information:
-
-Pypi pages: http://pypi.python.org/pypi/py/
-
-Download/Install: http://codespeak.net/py/0.9.2/download.html
-
-Documentation/API: http://codespeak.net/py/0.9.2/index.html
-
-best and have fun,
-
-holger krekel
+py lib 0.9.2: bugfix release
+=============================
+
+Welcome to the 0.9.2 py lib and py.test release -
+mainly fixing Windows issues, providing better
+packaging and integration with setuptools.
+
+Here is a quick summary of what the py lib provides:
+
+* py.test: cross-project testing tool with many advanced features
+* py.execnet: ad-hoc code distribution to SSH, Socket and local sub processes
+* py.magic.greenlet: micro-threads on standard CPython ("stackless-light")
+* py.path: path abstractions over local and subversion files
+* rich documentation of py's exported API
+* tested against Linux, Win32, OSX, works on python 2.3-2.6
+
+See here for more information:
+
+Pypi pages: http://pypi.python.org/pypi/py/
+
+Download/Install: http://codespeak.net/py/0.9.2/download.html
+
+Documentation/API: http://codespeak.net/py/0.9.2/index.html
+
+best and have fun,
+
+holger krekel
diff --git a/doc/announce/release-1.0.0.txt b/doc/announce/release-1.0.0.txt
index 7024255..96a56e3 100644
--- a/doc/announce/release-1.0.0.txt
+++ b/doc/announce/release-1.0.0.txt
@@ -1,63 +1,63 @@
-
-pylib 1.0.0 released: testing-with-python innovations continue
---------------------------------------------------------------------
-
-Took a few betas but finally i uploaded a `1.0.0 py lib release`_,
-featuring the mature and powerful py.test tool and "execnet-style"
-*elastic* distributed programming. With the new release, there are
-many new advanced automated testing features - here is a quick summary:
-
-* funcargs_ - pythonic zero-boilerplate fixtures for Python test functions :
-
- - totally separates test code, test configuration and test setup
- - ideal for integration and functional tests
- - allows for flexible and natural test parametrization schemes
-
-* new `plugin architecture`_, allowing easy-to-write project-specific and cross-project single-file plugins. The most notable new external plugin is `oejskit`_ which naturally enables **running and reporting of javascript-unittests in real-life browsers**.
-
-* many new features done in easy-to-improve `default plugins`_, highlights:
-
- * xfail: mark tests as "expected to fail" and report separately.
- * pastebin: automatically send tracebacks to pocoo paste service
- * capture: flexibly capture stdout/stderr of subprocesses, per-test ...
- * monkeypatch: safely monkeypatch modules/classes from within tests
- * unittest: run and integrate traditional unittest.py tests
- * figleaf: generate html coverage reports with the figleaf module
- * resultlog: generate buildbot-friendly reporting output
- * ...
-
-* `distributed testing`_ and `elastic distributed execution`_:
-
- - new unified "TX" URL scheme for specifying remote processes
- - new distribution modes "--dist=each" and "--dist=load"
- - new sync/async ways to handle 1:N communication
- - improved documentation
-
-The py lib continues to offer most of the functionality used by
-the testing tool in `independent namespaces`_.
-
-Some non-test related code, notably greenlets/co-routines and
-api-generation now live as their own projects which simplifies the
-installation procedure because no C-Extensions are required anymore.
-
-The whole package should work well with Linux, Win32 and OSX, on Python
-2.3, 2.4, 2.5 and 2.6. (Expect Python3 compatibility soon!)
-
-For more info, see the py.test and py lib documentation:
-
- http://pytest.org
-
- http://pylib.org
-
-have fun,
-holger
-
-.. _`independent namespaces`: http://pylib.org
-.. _`funcargs`: http://codespeak.net/py/dist/test/funcargs.html
-.. _`plugin architecture`: http://codespeak.net/py/dist/test/extend.html
-.. _`default plugins`: http://codespeak.net/py/dist/test/plugin/index.html
-.. _`distributed testing`: http://codespeak.net/py/dist/test/dist.html
-.. _`elastic distributed execution`: http://codespeak.net/py/dist/execnet.html
-.. _`1.0.0 py lib release`: http://pypi.python.org/pypi/py
-.. _`oejskit`: http://codespeak.net/py/dist/test/plugin/oejskit.html
-
+
+pylib 1.0.0 released: testing-with-python innovations continue
+--------------------------------------------------------------------
+
+Took a few betas but finally i uploaded a `1.0.0 py lib release`_,
+featuring the mature and powerful py.test tool and "execnet-style"
+*elastic* distributed programming. With the new release, there are
+many new advanced automated testing features - here is a quick summary:
+
+* funcargs_ - pythonic zero-boilerplate fixtures for Python test functions :
+
+ - totally separates test code, test configuration and test setup
+ - ideal for integration and functional tests
+ - allows for flexible and natural test parametrization schemes
+
+* new `plugin architecture`_, allowing easy-to-write project-specific and cross-project single-file plugins. The most notable new external plugin is `oejskit`_ which naturally enables **running and reporting of javascript-unittests in real-life browsers**.
+
+* many new features done in easy-to-improve `default plugins`_, highlights:
+
+ * xfail: mark tests as "expected to fail" and report separately.
+ * pastebin: automatically send tracebacks to pocoo paste service
+ * capture: flexibly capture stdout/stderr of subprocesses, per-test ...
+ * monkeypatch: safely monkeypatch modules/classes from within tests
+ * unittest: run and integrate traditional unittest.py tests
+ * figleaf: generate html coverage reports with the figleaf module
+ * resultlog: generate buildbot-friendly reporting output
+ * ...
+
+* `distributed testing`_ and `elastic distributed execution`_:
+
+ - new unified "TX" URL scheme for specifying remote processes
+ - new distribution modes "--dist=each" and "--dist=load"
+ - new sync/async ways to handle 1:N communication
+ - improved documentation
+
+The py lib continues to offer most of the functionality used by
+the testing tool in `independent namespaces`_.
+
+Some non-test related code, notably greenlets/co-routines and
+api-generation now live as their own projects which simplifies the
+installation procedure because no C-Extensions are required anymore.
+
+The whole package should work well with Linux, Win32 and OSX, on Python
+2.3, 2.4, 2.5 and 2.6. (Expect Python3 compatibility soon!)
+
+For more info, see the py.test and py lib documentation:
+
+ http://pytest.org
+
+ http://pylib.org
+
+have fun,
+holger
+
+.. _`independent namespaces`: http://pylib.org
+.. _`funcargs`: http://codespeak.net/py/dist/test/funcargs.html
+.. _`plugin architecture`: http://codespeak.net/py/dist/test/extend.html
+.. _`default plugins`: http://codespeak.net/py/dist/test/plugin/index.html
+.. _`distributed testing`: http://codespeak.net/py/dist/test/dist.html
+.. _`elastic distributed execution`: http://codespeak.net/py/dist/execnet.html
+.. _`1.0.0 py lib release`: http://pypi.python.org/pypi/py
+.. _`oejskit`: http://codespeak.net/py/dist/test/plugin/oejskit.html
+
diff --git a/doc/announce/release-1.0.1.txt b/doc/announce/release-1.0.1.txt
index 0c9f876..46e564d 100644
--- a/doc/announce/release-1.0.1.txt
+++ b/doc/announce/release-1.0.1.txt
@@ -1,48 +1,48 @@
-1.0.1: improved reporting, nose/unittest.py support, bug fixes
------------------------------------------------------------------------
-
-This is a bugfix release of pylib/py.test also coming with:
-
-* improved documentation, improved navigation
-* test failure reporting improvements
-* support for directly running existing nose/unittest.py style tests
-
-visit here for more info, including quickstart and tutorials:
-
- http://pytest.org and http://pylib.org
-
-
-Changelog 1.0.0 to 1.0.1
-------------------------
-
-* added a default 'pytest_nose' plugin which handles nose.SkipTest,
- nose-style function/method/generator setup/teardown and
- tries to report functions correctly.
-
-* improved documentation, better navigation: see http://pytest.org
-
-* added a "--help-config" option to show conftest.py / ENV-var names for
- all longopt cmdline options, and some special conftest.py variables.
- renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
-
-* unicode fixes: capturing and unicode writes to sys.stdout
- (through e.g a print statement) now work within tests,
- they are encoded as "utf8" by default, also terminalwriting
- was adapted and somewhat unified between windows and linux
-
-* fix issue #27: better reporting on non-collectable items given on commandline
- (e.g. pyc files)
-
-* fix issue #33: added --version flag (thanks Benjamin Peterson)
-
-* fix issue #32: adding support for "incomplete" paths to wcpath.status()
-
-* "Test" prefixed classes are *not* collected by default anymore if they
- have an __init__ method
-
-* monkeypatch setenv() now accepts a "prepend" parameter
-
-* improved reporting of collection error tracebacks
-
-* simplified multicall mechanism and plugin architecture,
- renamed some internal methods and argnames
+1.0.1: improved reporting, nose/unittest.py support, bug fixes
+-----------------------------------------------------------------------
+
+This is a bugfix release of pylib/py.test also coming with:
+
+* improved documentation, improved navigation
+* test failure reporting improvements
+* support for directly running existing nose/unittest.py style tests
+
+visit here for more info, including quickstart and tutorials:
+
+ http://pytest.org and http://pylib.org
+
+
+Changelog 1.0.0 to 1.0.1
+------------------------
+
+* added a default 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* improved documentation, better navigation: see http://pytest.org
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* unicode fixes: capturing and unicode writes to sys.stdout
+ (through e.g a print statement) now work within tests,
+ they are encoded as "utf8" by default, also terminalwriting
+ was adapted and somewhat unified between windows and linux
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
diff --git a/doc/announce/release-1.0.2.txt b/doc/announce/release-1.0.2.txt
index 2354619..272b2a5 100644
--- a/doc/announce/release-1.0.2.txt
+++ b/doc/announce/release-1.0.2.txt
@@ -1,5 +1,5 @@
-1.0.2: packaging fixes
------------------------------------------------------------------------
-
-this release is purely a release for fixing packaging issues.
-
+1.0.2: packaging fixes
+-----------------------------------------------------------------------
+
+this release is purely a release for fixing packaging issues.
+
diff --git a/doc/announce/release-1.1.0.txt b/doc/announce/release-1.1.0.txt
index 0441c32..c9308aa 100644
--- a/doc/announce/release-1.1.0.txt
+++ b/doc/announce/release-1.1.0.txt
@@ -1,115 +1,115 @@
-py.test/pylib 1.1.0: Python3, Jython, advanced skipping, cleanups ...
---------------------------------------------------------------------------------
-
-Features:
-
-* compatible to Python3 (single py2/py3 source), `easy to install`_
-* conditional skipping_: skip/xfail based on platform/dependencies
-* generalized marking_: mark tests one a whole-class or whole-module basis
-
-Fixes:
-
-* code reduction and "de-magification" (e.g. 23 KLoc -> 11 KLOC)
-* distribute testing requires the now separately released execnet_ package
-* funcarg-setup/caching, "same-name" test modules now cause an exlicit error
-* de-cluttered reporting options, --report for skipped/xfail details
-
-Compatibilities
-
-1.1.0 should allow running test code that already worked well with 1.0.2
-plus some more due to improved unittest/nose compatibility.
-
-More information: http://pytest.org
-
-thanks and have fun,
-
-holger (http://twitter.com/hpk42)
-
-.. _execnet: http://codespeak.net/execnet
-.. _`easy to install`: ../install.html
-.. _marking: ../test/plugin/mark.html
-.. _skipping: ../test/plugin/skipping.html
-
-
-Changelog 1.0.2 -> 1.1.0
------------------------------------------------------------------------
-
-* remove py.rest tool and internal namespace - it was
- never really advertised and can still be used with
- the old release if needed. If there is interest
- it could be revived into its own tool i guess.
-
-* fix issue48 and issue59: raise an Error if the module
- from an imported test file does not seem to come from
- the filepath - avoids "same-name" confusion that has
- been reported repeatedly
-
-* merged Ronny's nose-compatibility hacks: now
- nose-style setup_module() and setup() functions are
- supported
-
-* introduce generalized py.test.mark function marking
-
-* reshuffle / refine command line grouping
-
-* deprecate parser.addgroup in favour of getgroup which creates option group
-
-* add --report command line option that allows to control showing of skipped/xfailed sections
-
-* generalized skipping: a new way to mark python functions with skipif or xfail
- at function, class and modules level based on platform or sys-module attributes.
-
-* extend py.test.mark decorator to allow for positional args
-
-* introduce and test "py.cleanup -d" to remove empty directories
-
-* fix issue #59 - robustify unittest test collection
-
-* make bpython/help interaction work by adding an __all__ attribute
- to ApiModule, cleanup initpkg
-
-* use MIT license for pylib, add some contributors
-
-* remove py.execnet code and substitute all usages with 'execnet' proper
-
-* fix issue50 - cached_setup now caches more to expectations
- for test functions with multiple arguments.
-
-* merge Jarko's fixes, issue #45 and #46
-
-* add the ability to specify a path for py.lookup to search in
-
-* fix a funcarg cached_setup bug probably only occuring
- in distributed testing and "module" scope with teardown.
-
-* many fixes and changes for making the code base python3 compatible,
- many thanks to Benjamin Peterson for helping with this.
-
-* consolidate builtins implementation to be compatible with >=2.3,
- add helpers to ease keeping 2 and 3k compatible code
-
-* deprecate py.compat.doctest|subprocess|textwrap|optparse
-
-* deprecate py.magic.autopath, remove py/magic directory
-
-* move pytest assertion handling to py/code and a pytest_assertion
- plugin, add "--no-assert" option, deprecate py.magic namespaces
- in favour of (less) py.code ones.
-
-* consolidate and cleanup py/code classes and files
-
-* cleanup py/misc, move tests to bin-for-dist
-
-* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
-
-* consolidate py.log implementation, remove old approach.
-
-* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
- text/unicode and byte-streams (uses underlying standard lib io.*
- if available)
-
-* make py.unittest_convert helper script available which converts "unittest.py"
- style files into the simpler assert/direct-test-classes py.test/nosetests
- style. The script was written by Laura Creighton.
-
-* simplified internal localpath implementation
+py.test/pylib 1.1.0: Python3, Jython, advanced skipping, cleanups ...
+--------------------------------------------------------------------------------
+
+Features:
+
+* compatible to Python3 (single py2/py3 source), `easy to install`_
+* conditional skipping_: skip/xfail based on platform/dependencies
+* generalized marking_: mark tests one a whole-class or whole-module basis
+
+Fixes:
+
+* code reduction and "de-magification" (e.g. 23 KLoc -> 11 KLOC)
+* distribute testing requires the now separately released execnet_ package
+* funcarg-setup/caching, "same-name" test modules now cause an exlicit error
+* de-cluttered reporting options, --report for skipped/xfail details
+
+Compatibilities
+
+1.1.0 should allow running test code that already worked well with 1.0.2
+plus some more due to improved unittest/nose compatibility.
+
+More information: http://pytest.org
+
+thanks and have fun,
+
+holger (http://twitter.com/hpk42)
+
+.. _execnet: http://codespeak.net/execnet
+.. _`easy to install`: ../install.html
+.. _marking: ../test/plugin/mark.html
+.. _skipping: ../test/plugin/skipping.html
+
+
+Changelog 1.0.2 -> 1.1.0
+-----------------------------------------------------------------------
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
diff --git a/doc/announce/release-1.1.1.txt b/doc/announce/release-1.1.1.txt
index 83e6a1f..4d5c9e5 100644
--- a/doc/announce/release-1.1.1.txt
+++ b/doc/announce/release-1.1.1.txt
@@ -1,48 +1,48 @@
-py.test/pylib 1.1.1: bugfix release, setuptools plugin registration
---------------------------------------------------------------------------------
-
-This is a compatibility fixing release of pylib/py.test to work
-better with previous 1.0.x test code bases. It also contains fixes
-and changes to work with `execnet>=1.0.0`_ to provide distributed
-testing and looponfailing testing modes. py-1.1.1 also introduces
-a new mechanism for registering plugins via setuptools.
-
-What is pylib/py.test?
------------------------
-
-py.test is an advanced automated testing tool working with
-Python2, Python3 and Jython versions on all major operating
-systems. It has an extensive plugin architecture and can run many
-existing common Python test suites without modification. Moreover,
-it offers some unique features not found in other
-testing tools. See http://pytest.org for more info.
-
-The pylib also contains a localpath and svnpath implementation
-and some developer-oriented command line tools. See
-http://pylib.org for more info.
-
-thanks to all who helped and gave feedback,
-have fun,
-
-holger (http://twitter.com/hpk42)
-
-.. _`execnet>=1.0.0`: http://codespeak.net/execnet
-
-Changes between 1.1.1 and 1.1.0
-=====================================
-
-- introduce automatic plugin registration via 'pytest11'
- entrypoints via setuptools' pkg_resources.iter_entry_points
-
-- fix py.test dist-testing to work with execnet >= 1.0.0b4
-
-- re-introduce py.test.cmdline.main() for better backward compatibility
-
-- svn paths: fix a bug with path.check(versioned=True) for svn paths,
- allow '%' in svn paths, make svnwc.update() default to interactive mode
- like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
-
-- refine distributed tarball to contain test and no pyc files
-
-- try harder to have deprecation warnings for py.compat.* accesses
- report a correct location
+py.test/pylib 1.1.1: bugfix release, setuptools plugin registration
+--------------------------------------------------------------------------------
+
+This is a compatibility fixing release of pylib/py.test to work
+better with previous 1.0.x test code bases. It also contains fixes
+and changes to work with `execnet>=1.0.0`_ to provide distributed
+testing and looponfailing testing modes. py-1.1.1 also introduces
+a new mechanism for registering plugins via setuptools.
+
+What is pylib/py.test?
+-----------------------
+
+py.test is an advanced automated testing tool working with
+Python2, Python3 and Jython versions on all major operating
+systems. It has an extensive plugin architecture and can run many
+existing common Python test suites without modification. Moreover,
+it offers some unique features not found in other
+testing tools. See http://pytest.org for more info.
+
+The pylib also contains a localpath and svnpath implementation
+and some developer-oriented command line tools. See
+http://pylib.org for more info.
+
+thanks to all who helped and gave feedback,
+have fun,
+
+holger (http://twitter.com/hpk42)
+
+.. _`execnet>=1.0.0`: http://codespeak.net/execnet
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
diff --git a/doc/announce/release-1.2.0.txt b/doc/announce/release-1.2.0.txt
index 4f6a561..85dd759 100644
--- a/doc/announce/release-1.2.0.txt
+++ b/doc/announce/release-1.2.0.txt
@@ -1,116 +1,116 @@
-py.test/pylib 1.2.0: junitxml, standalone test scripts, pluginization
---------------------------------------------------------------------------------
-
-py.test is an advanced automated testing tool working with
-Python2, Python3 and Jython versions on all major operating
-systems. It has a simple plugin architecture and can run many
-existing common Python test suites without modification. It offers
-some unique features not found in other testing tools.
-See http://pytest.org for more info.
-
-py.test 1.2.0 brings many bug fixes and interesting new abilities:
-
-* --junitxml=path will create an XML file for use with CI processing
-* --genscript=path creates a standalone py.test-equivalent test-script
-* --ignore=path prevents collection of anything below that path
-* --confcutdir=path only lookup conftest.py test configs below that path
-* a 'pytest_report_header' hook to add info to the terminal report header
-* a 'pytestconfig' function argument gives direct access to option values
-* 'pytest_generate_tests' can now be put into a class as well
-* on CPython py.test additionally installs as "py.test-VERSION", on
- Jython as py.test-jython and on PyPy as py.test-pypy-XYZ
-
-Apart from many bug fixes 1.2.0 also has better pluginization:
-Distributed testing and looponfailing testing now live in the
-separately installable 'pytest-xdist' plugin. The same is true for
-'pytest-figleaf' for doing coverage reporting. Those two plugins
-can serve well now as blue prints for doing your own.
-
-thanks to all who helped and gave feedback,
-have fun,
-
-holger krekel, January 2010
-
-Changes between 1.2.0 and 1.1.1
-=====================================
-
-- moved dist/looponfailing from py.test core into a new
- separately released pytest-xdist plugin.
-
-- new junitxml plugin: --junitxml=path will generate a junit style xml file
- which is processable e.g. by the Hudson CI system.
-
-- new option: --genscript=path will generate a standalone py.test script
- which will not need any libraries installed. thanks to Ralf Schmitt.
-
-- new option: --ignore will prevent specified path from collection.
- Can be specified multiple times.
-
-- new option: --confcutdir=dir will make py.test only consider conftest
- files that are relative to the specified dir.
-
-- new funcarg: "pytestconfig" is the pytest config object for access
- to command line args and can now be easily used in a test.
-
-- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
- disambiguate between Python3, python2.X, Jython and PyPy installed versions.
-
-- new "pytestconfig" funcarg allows access to test config object
-
-- new "pytest_report_header" hook can return additional lines
- to be displayed at the header of a test run.
-
-- (experimental) allow "py.test path::name1::name2::..." for pointing
- to a test within a test collection directly. This might eventually
- evolve as a full substitute to "-k" specifications.
-
-- streamlined plugin loading: order is now as documented in
- customize.html: setuptools, ENV, commandline, conftest.
- also setuptools entry point names are turned to canonical namees ("pytest_*")
-
-- automatically skip tests that need 'capfd' but have no os.dup
-
-- allow pytest_generate_tests to be defined in classes as well
-
-- deprecate usage of 'disabled' attribute in favour of pytestmark
-- deprecate definition of Directory, Module, Class and Function nodes
- in conftest.py files. Use pytest collect hooks instead.
-
-- collection/item node specific runtest/collect hooks are only called exactly
- on matching conftest.py files, i.e. ones which are exactly below
- the filesystem path of an item
-
-- change: the first pytest_collect_directory hook to return something
- will now prevent further hooks to be called.
-
-- change: figleaf plugin now requires --figleaf to run. Also
- change its long command line options to be a bit shorter (see py.test -h).
-
-- change: pytest doctest plugin is now enabled by default and has a
- new option --doctest-glob to set a pattern for file matches.
-
-- change: remove internal py._* helper vars, only keep py._pydir
-
-- robustify capturing to survive if custom pytest_runtest_setup
- code failed and prevented the capturing setup code from running.
-
-- make py.test.* helpers provided by default plugins visible early -
- works transparently both for pydoc and for interactive sessions
- which will regularly see e.g. py.test.mark and py.test.importorskip.
-
-- simplify internal plugin manager machinery
-- simplify internal collection tree by introducing a RootCollector node
-
-- fix assert reinterpreation that sees a call containing "keyword=..."
-
-- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
- hooks on slaves during dist-testing, report module/session teardown
- hooks correctly.
-
-- fix issue65: properly handle dist-testing if no
- execnet/py lib installed remotely.
-
-- skip some install-tests if no execnet is available
-
-- fix docs, fix internal bin/ script generation
-
+py.test/pylib 1.2.0: junitxml, standalone test scripts, pluginization
+--------------------------------------------------------------------------------
+
+py.test is an advanced automated testing tool working with
+Python2, Python3 and Jython versions on all major operating
+systems. It has a simple plugin architecture and can run many
+existing common Python test suites without modification. It offers
+some unique features not found in other testing tools.
+See http://pytest.org for more info.
+
+py.test 1.2.0 brings many bug fixes and interesting new abilities:
+
+* --junitxml=path will create an XML file for use with CI processing
+* --genscript=path creates a standalone py.test-equivalent test-script
+* --ignore=path prevents collection of anything below that path
+* --confcutdir=path only lookup conftest.py test configs below that path
+* a 'pytest_report_header' hook to add info to the terminal report header
+* a 'pytestconfig' function argument gives direct access to option values
+* 'pytest_generate_tests' can now be put into a class as well
+* on CPython py.test additionally installs as "py.test-VERSION", on
+ Jython as py.test-jython and on PyPy as py.test-pypy-XYZ
+
+Apart from many bug fixes 1.2.0 also has better pluginization:
+Distributed testing and looponfailing testing now live in the
+separately installable 'pytest-xdist' plugin. The same is true for
+'pytest-figleaf' for doing coverage reporting. Those two plugins
+can serve well now as blue prints for doing your own.
+
+thanks to all who helped and gave feedback,
+have fun,
+
+holger krekel, January 2010
+
+Changes between 1.2.0 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
diff --git a/doc/announce/release-1.2.1.txt b/doc/announce/release-1.2.1.txt
index 5bf8ba2..a00f7fe 100644
--- a/doc/announce/release-1.2.1.txt
+++ b/doc/announce/release-1.2.1.txt
@@ -1,66 +1,66 @@
-py.test/pylib 1.2.1: little fixes and improvements
---------------------------------------------------------------------------------
-
-py.test is an advanced automated testing tool working with
-Python2, Python3 and Jython versions on all major operating
-systems. It has a simple plugin architecture and can run many
-existing common Python test suites without modification. It offers
-some unique features not found in other testing tools.
-See http://pytest.org for more info.
-
-py.test 1.2.1 brings bug fixes and some new options and abilities triggered
-by user feedback:
-
-* --funcargs [testpath] will show available builtin- and project funcargs.
-* display a short and concise traceback if funcarg lookup fails.
-* early-load "conftest.py" files in non-dot first-level sub directories.
-* --tb=line will print a single line for each failing test (issue67)
-* py.cleanup has a number of new options, cleanups up setup.py related files
-* fix issue78: always call python-level teardown functions even if the
- according setup failed.
-
-For more detailed information see the changelog below.
-
-cheers and have fun,
-
-holger
-
-
-Changes between 1.2.1 and 1.2.0
-=====================================
-
-- refined usage and options for "py.cleanup"::
-
- py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
- py.cleanup -e .swp -e .cache # also remove files with these extensions
- py.cleanup -s # remove "build" and "dist" directory next to setup.py files
- py.cleanup -d # also remove empty directories
- py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
- py.cleanup -n # dry run, only show what would be removed
-
-- add a new option "py.test --funcargs" which shows available funcargs
- and their help strings (docstrings on their respective factory function)
- for a given test path
-
-- display a short and concise traceback if a funcarg lookup fails
-
-- early-load "conftest.py" files in non-dot first-level sub directories.
- allows to conveniently keep and access test-related options in a ``test``
- subdir and still add command line options.
-
-- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
-
-- fix issue78: always call python-level teardown functions even if the
- according setup failed. This includes refinements for calling setup_module/class functions
- which will now only be called once instead of the previous behaviour where they'd be called
- multiple times if they raise an exception (including a Skipped exception). Any exception
- will be re-corded and associated with all tests in the according module/class scope.
-
-- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
-
-- fix pdb debugging to be in the correct frame on raises-related errors
-
-- update apipkg.py to fix an issue where recursive imports might
- unnecessarily break importing
-
-- fix plugin links
+py.test/pylib 1.2.1: little fixes and improvements
+--------------------------------------------------------------------------------
+
+py.test is an advanced automated testing tool working with
+Python2, Python3 and Jython versions on all major operating
+systems. It has a simple plugin architecture and can run many
+existing common Python test suites without modification. It offers
+some unique features not found in other testing tools.
+See http://pytest.org for more info.
+
+py.test 1.2.1 brings bug fixes and some new options and abilities triggered
+by user feedback:
+
+* --funcargs [testpath] will show available builtin- and project funcargs.
+* display a short and concise traceback if funcarg lookup fails.
+* early-load "conftest.py" files in non-dot first-level sub directories.
+* --tb=line will print a single line for each failing test (issue67)
+* py.cleanup has a number of new options, cleanups up setup.py related files
+* fix issue78: always call python-level teardown functions even if the
+ according setup failed.
+
+For more detailed information see the changelog below.
+
+cheers and have fun,
+
+holger
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
diff --git a/doc/announce/release-1.3.0.txt b/doc/announce/release-1.3.0.txt
index cf97db0..1b6b3c7 100644
--- a/doc/announce/release-1.3.0.txt
+++ b/doc/announce/release-1.3.0.txt
@@ -1,580 +1,580 @@
-py.test/pylib 1.3.0: new options, per-plugin hooks, fixes ...
-===========================================================================
-
-The 1.3.0 release introduces new options, bug fixes and improved compatibility
-with Python3 and Jython-2.5.1 on Windows. If you already use py-1.2 chances
-are you can use py-1.3.0. See the below CHANGELOG for more details and
-http://pylib.org/install.html for installation instructions.
-
-py.test is an advanced automated testing tool working with Python2,
-Python3, Jython and PyPy versions on all major operating systems. It
-offers a no-boilerplate testing approach and has inspired other testing
-tools and enhancements in the standard Python library for more than five
-years. It has a simple and extensive plugin architecture, configurable
-reporting and provides unique ways to make it fit to your testing
-process and needs.
-
-See http://pytest.org for more info.
-
-cheers and have fun,
-
-holger krekel
-
-Changes between 1.2.1 and 1.3.0
-==================================================
-
-- deprecate --report option in favour of a new shorter and easier to
- remember -r option: it takes a string argument consisting of any
- combination of 'xfsX' characters. They relate to the single chars
- you see during the dotted progress printing and will print an extra line
- per test at the end of the test run. This extra line indicates the exact
- position or test ID that you directly paste to the py.test cmdline in order
- to re-run a particular test.
-
-- allow external plugins to register new hooks via the new
- pytest_addhooks(pluginmanager) hook. The new release of
- the pytest-xdist plugin for distributed and looponfailing
- testing requires this feature.
-
-- add a new pytest_ignore_collect(path, config) hook to allow projects and
- plugins to define exclusion behaviour for their directory structure -
- for example you may define in a conftest.py this method::
-
- def pytest_ignore_collect(path):
- return path.check(link=1)
-
- to prevent even collection of any tests in symlinked dirs.
-
-- new pytest_pycollect_makemodule(path, parent) hook for
- allowing customization of the Module collection object for a
- matching test module.
-
-- extend and refine xfail mechanism::
-
- @py.test.mark.xfail(run=False) do not run the decorated test
- @py.test.mark.xfail(reason="...") prints the reason string in xfail summaries
-
- specifiying ``--runxfail`` on command line ignores xfail markers to show
- you the underlying traceback.
-
-- expose (previously internal) commonly useful methods:
- py.io.get_terminal_with() -> return terminal width
- py.io.ansi_print(...) -> print colored/bold text on linux/win32
- py.io.saferepr(obj) -> return limited representation string
-
-- expose test outcome related exceptions as py.test.skip.Exception,
- py.test.raises.Exception etc., useful mostly for plugins
- doing special outcome interpretation/tweaking
-
-- (issue85) fix junitxml plugin to handle tests with non-ascii output
-
-- fix/refine python3 compatibility (thanks Benjamin Peterson)
-
-- fixes for making the jython/win32 combination work, note however:
- jython2.5.1/win32 does not provide a command line launcher, see
- http://bugs.jython.org/issue1491 . See pylib install documentation
- for how to work around.
-
-- fixes for handling of unicode exception values and unprintable objects
-
-- (issue87) fix unboundlocal error in assertionold code
-
-- (issue86) improve documentation for looponfailing
-
-- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
-
-- ship distribute_setup.py version 0.6.10
-
-- added links to the new capturelog and coverage plugins
-
-
-Changes between 1.2.1 and 1.2.0
-=====================================
-
-- refined usage and options for "py.cleanup"::
-
- py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
- py.cleanup -e .swp -e .cache # also remove files with these extensions
- py.cleanup -s # remove "build" and "dist" directory next to setup.py files
- py.cleanup -d # also remove empty directories
- py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
- py.cleanup -n # dry run, only show what would be removed
-
-- add a new option "py.test --funcargs" which shows available funcargs
- and their help strings (docstrings on their respective factory function)
- for a given test path
-
-- display a short and concise traceback if a funcarg lookup fails
-
-- early-load "conftest.py" files in non-dot first-level sub directories.
- allows to conveniently keep and access test-related options in a ``test``
- subdir and still add command line options.
-
-- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
-
-- fix issue78: always call python-level teardown functions even if the
- according setup failed. This includes refinements for calling setup_module/class functions
- which will now only be called once instead of the previous behaviour where they'd be called
- multiple times if they raise an exception (including a Skipped exception). Any exception
- will be re-corded and associated with all tests in the according module/class scope.
-
-- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
-
-- fix pdb debugging to be in the correct frame on raises-related errors
-
-- update apipkg.py to fix an issue where recursive imports might
- unnecessarily break importing
-
-- fix plugin links
-
-Changes between 1.2 and 1.1.1
-=====================================
-
-- moved dist/looponfailing from py.test core into a new
- separately released pytest-xdist plugin.
-
-- new junitxml plugin: --junitxml=path will generate a junit style xml file
- which is processable e.g. by the Hudson CI system.
-
-- new option: --genscript=path will generate a standalone py.test script
- which will not need any libraries installed. thanks to Ralf Schmitt.
-
-- new option: --ignore will prevent specified path from collection.
- Can be specified multiple times.
-
-- new option: --confcutdir=dir will make py.test only consider conftest
- files that are relative to the specified dir.
-
-- new funcarg: "pytestconfig" is the pytest config object for access
- to command line args and can now be easily used in a test.
-
-- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
- disambiguate between Python3, python2.X, Jython and PyPy installed versions.
-
-- new "pytestconfig" funcarg allows access to test config object
-
-- new "pytest_report_header" hook can return additional lines
- to be displayed at the header of a test run.
-
-- (experimental) allow "py.test path::name1::name2::..." for pointing
- to a test within a test collection directly. This might eventually
- evolve as a full substitute to "-k" specifications.
-
-- streamlined plugin loading: order is now as documented in
- customize.html: setuptools, ENV, commandline, conftest.
- also setuptools entry point names are turned to canonical namees ("pytest_*")
-
-- automatically skip tests that need 'capfd' but have no os.dup
-
-- allow pytest_generate_tests to be defined in classes as well
-
-- deprecate usage of 'disabled' attribute in favour of pytestmark
-- deprecate definition of Directory, Module, Class and Function nodes
- in conftest.py files. Use pytest collect hooks instead.
-
-- collection/item node specific runtest/collect hooks are only called exactly
- on matching conftest.py files, i.e. ones which are exactly below
- the filesystem path of an item
-
-- change: the first pytest_collect_directory hook to return something
- will now prevent further hooks to be called.
-
-- change: figleaf plugin now requires --figleaf to run. Also
- change its long command line options to be a bit shorter (see py.test -h).
-
-- change: pytest doctest plugin is now enabled by default and has a
- new option --doctest-glob to set a pattern for file matches.
-
-- change: remove internal py._* helper vars, only keep py._pydir
-
-- robustify capturing to survive if custom pytest_runtest_setup
- code failed and prevented the capturing setup code from running.
-
-- make py.test.* helpers provided by default plugins visible early -
- works transparently both for pydoc and for interactive sessions
- which will regularly see e.g. py.test.mark and py.test.importorskip.
-
-- simplify internal plugin manager machinery
-- simplify internal collection tree by introducing a RootCollector node
-
-- fix assert reinterpreation that sees a call containing "keyword=..."
-
-- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
- hooks on slaves during dist-testing, report module/session teardown
- hooks correctly.
-
-- fix issue65: properly handle dist-testing if no
- execnet/py lib installed remotely.
-
-- skip some install-tests if no execnet is available
-
-- fix docs, fix internal bin/ script generation
-
-
-Changes between 1.1.1 and 1.1.0
-=====================================
-
-- introduce automatic plugin registration via 'pytest11'
- entrypoints via setuptools' pkg_resources.iter_entry_points
-
-- fix py.test dist-testing to work with execnet >= 1.0.0b4
-
-- re-introduce py.test.cmdline.main() for better backward compatibility
-
-- svn paths: fix a bug with path.check(versioned=True) for svn paths,
- allow '%' in svn paths, make svnwc.update() default to interactive mode
- like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
-
-- refine distributed tarball to contain test and no pyc files
-
-- try harder to have deprecation warnings for py.compat.* accesses
- report a correct location
-
-Changes between 1.1.0 and 1.0.2
-=====================================
-
-* adjust and improve docs
-
-* remove py.rest tool and internal namespace - it was
- never really advertised and can still be used with
- the old release if needed. If there is interest
- it could be revived into its own tool i guess.
-
-* fix issue48 and issue59: raise an Error if the module
- from an imported test file does not seem to come from
- the filepath - avoids "same-name" confusion that has
- been reported repeatedly
-
-* merged Ronny's nose-compatibility hacks: now
- nose-style setup_module() and setup() functions are
- supported
-
-* introduce generalized py.test.mark function marking
-
-* reshuffle / refine command line grouping
-
-* deprecate parser.addgroup in favour of getgroup which creates option group
-
-* add --report command line option that allows to control showing of skipped/xfailed sections
-
-* generalized skipping: a new way to mark python functions with skipif or xfail
- at function, class and modules level based on platform or sys-module attributes.
-
-* extend py.test.mark decorator to allow for positional args
-
-* introduce and test "py.cleanup -d" to remove empty directories
-
-* fix issue #59 - robustify unittest test collection
-
-* make bpython/help interaction work by adding an __all__ attribute
- to ApiModule, cleanup initpkg
-
-* use MIT license for pylib, add some contributors
-
-* remove py.execnet code and substitute all usages with 'execnet' proper
-
-* fix issue50 - cached_setup now caches more to expectations
- for test functions with multiple arguments.
-
-* merge Jarko's fixes, issue #45 and #46
-
-* add the ability to specify a path for py.lookup to search in
-
-* fix a funcarg cached_setup bug probably only occuring
- in distributed testing and "module" scope with teardown.
-
-* many fixes and changes for making the code base python3 compatible,
- many thanks to Benjamin Peterson for helping with this.
-
-* consolidate builtins implementation to be compatible with >=2.3,
- add helpers to ease keeping 2 and 3k compatible code
-
-* deprecate py.compat.doctest|subprocess|textwrap|optparse
-
-* deprecate py.magic.autopath, remove py/magic directory
-
-* move pytest assertion handling to py/code and a pytest_assertion
- plugin, add "--no-assert" option, deprecate py.magic namespaces
- in favour of (less) py.code ones.
-
-* consolidate and cleanup py/code classes and files
-
-* cleanup py/misc, move tests to bin-for-dist
-
-* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
-
-* consolidate py.log implementation, remove old approach.
-
-* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
- text/unicode and byte-streams (uses underlying standard lib io.*
- if available)
-
-* make py.unittest_convert helper script available which converts "unittest.py"
- style files into the simpler assert/direct-test-classes py.test/nosetests
- style. The script was written by Laura Creighton.
-
-* simplified internal localpath implementation
-
-Changes between 1.0.1 and 1.0.2
-=====================================
-
-* fixing packaging issues, triggered by fedora redhat packaging,
- also added doc, examples and contrib dirs to the tarball.
-
-* added a documentation link to the new django plugin.
-
-Changes between 1.0.0 and 1.0.1
-=====================================
-
-* added a 'pytest_nose' plugin which handles nose.SkipTest,
- nose-style function/method/generator setup/teardown and
- tries to report functions correctly.
-
-* capturing of unicode writes or encoded strings to sys.stdout/err
- work better, also terminalwriting was adapted and somewhat
- unified between windows and linux.
-
-* improved documentation layout and content a lot
-
-* added a "--help-config" option to show conftest.py / ENV-var names for
- all longopt cmdline options, and some special conftest.py variables.
- renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
-
-* fix issue #27: better reporting on non-collectable items given on commandline
- (e.g. pyc files)
-
-* fix issue #33: added --version flag (thanks Benjamin Peterson)
-
-* fix issue #32: adding support for "incomplete" paths to wcpath.status()
-
-* "Test" prefixed classes are *not* collected by default anymore if they
- have an __init__ method
-
-* monkeypatch setenv() now accepts a "prepend" parameter
-
-* improved reporting of collection error tracebacks
-
-* simplified multicall mechanism and plugin architecture,
- renamed some internal methods and argnames
-
-Changes between 1.0.0b9 and 1.0.0
-=====================================
-
-* more terse reporting try to show filesystem path relatively to current dir
-* improve xfail output a bit
-
-Changes between 1.0.0b8 and 1.0.0b9
-=====================================
-
-* cleanly handle and report final teardown of test setup
-
-* fix svn-1.6 compat issue with py.path.svnwc().versioned()
- (thanks Wouter Vanden Hove)
-
-* setup/teardown or collection problems now show as ERRORs
- or with big "E"'s in the progress lines. they are reported
- and counted separately.
-
-* dist-testing: properly handle test items that get locally
- collected but cannot be collected on the remote side - often
- due to platform/dependency reasons
-
-* simplified py.test.mark API - see keyword plugin documentation
-
-* integrate better with logging: capturing now by default captures
- test functions and their immediate setup/teardown in a single stream
-
-* capsys and capfd funcargs now have a readouterr() and a close() method
- (underlyingly py.io.StdCapture/FD objects are used which grew a
- readouterr() method as well to return snapshots of captured out/err)
-
-* make assert-reinterpretation work better with comparisons not
- returning bools (reported with numpy from thanks maciej fijalkowski)
-
-* reworked per-test output capturing into the pytest_iocapture.py plugin
- and thus removed capturing code from config object
-
-* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
-
-
-Changes between 1.0.0b7 and 1.0.0b8
-=====================================
-
-* pytest_unittest-plugin is now enabled by default
-
-* introduced pytest_keyboardinterrupt hook and
- refined pytest_sessionfinish hooked, added tests.
-
-* workaround a buggy logging module interaction ("closing already closed
- files"). Thanks to Sridhar Ratnakumar for triggering.
-
-* if plugins use "py.test.importorskip" for importing
- a dependency only a warning will be issued instead
- of exiting the testing process.
-
-* many improvements to docs:
- - refined funcargs doc , use the term "factory" instead of "provider"
- - added a new talk/tutorial doc page
- - better download page
- - better plugin docstrings
- - added new plugins page and automatic doc generation script
-
-* fixed teardown problem related to partially failing funcarg setups
- (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
- always invoked even if the "pytest_runtest_setup" failed.
-
-* tweaked doctest output for docstrings in py modules,
- thanks Radomir.
-
-Changes between 1.0.0b3 and 1.0.0b7
-=============================================
-
-* renamed py.test.xfail back to py.test.mark.xfail to avoid
- two ways to decorate for xfail
-
-* re-added py.test.mark decorator for setting keywords on functions
- (it was actually documented so removing it was not nice)
-
-* remove scope-argument from request.addfinalizer() because
- request.cached_setup has the scope arg. TOOWTDI.
-
-* perform setup finalization before reporting failures
-
-* apply modified patches from Andreas Kloeckner to allow
- test functions to have no func_code (#22) and to make
- "-k" and function keywords work (#20)
-
-* apply patch from Daniel Peolzleithner (issue #23)
-
-* resolve issue #18, multiprocessing.Manager() and
- redirection clash
-
-* make __name__ == "__channelexec__" for remote_exec code
-
-Changes between 1.0.0b1 and 1.0.0b3
-=============================================
-
-* plugin classes are removed: one now defines
- hooks directly in conftest.py or global pytest_*.py
- files.
-
-* added new pytest_namespace(config) hook that allows
- to inject helpers directly to the py.test.* namespace.
-
-* documented and refined many hooks
-
-* added new style of generative tests via
- pytest_generate_tests hook that integrates
- well with function arguments.
-
-
-Changes between 0.9.2 and 1.0.0b1
-=============================================
-
-* introduced new "funcarg" setup method,
- see doc/test/funcarg.txt
-
-* introduced plugin architecuture and many
- new py.test plugins, see
- doc/test/plugins.txt
-
-* teardown_method is now guaranteed to get
- called after a test method has run.
-
-* new method: py.test.importorskip(mod,minversion)
- will either import or call py.test.skip()
-
-* completely revised internal py.test architecture
-
-* new py.process.ForkedFunc object allowing to
- fork execution of a function to a sub process
- and getting a result back.
-
-XXX lots of things missing here XXX
-
-Changes between 0.9.1 and 0.9.2
-===============================
-
-* refined installation and metadata, created new setup.py,
- now based on setuptools/ez_setup (thanks to Ralf Schmitt
- for his support).
-
-* improved the way of making py.* scripts available in
- windows environments, they are now added to the
- Scripts directory as ".cmd" files.
-
-* py.path.svnwc.status() now is more complete and
- uses xml output from the 'svn' command if available
- (Guido Wesdorp)
-
-* fix for py.path.svn* to work with svn 1.5
- (Chris Lamb)
-
-* fix path.relto(otherpath) method on windows to
- use normcase for checking if a path is relative.
-
-* py.test's traceback is better parseable from editors
- (follows the filenames:LINENO: MSG convention)
- (thanks to Osmo Salomaa)
-
-* fix to javascript-generation, "py.test --runbrowser"
- should work more reliably now
-
-* removed previously accidentally added
- py.test.broken and py.test.notimplemented helpers.
-
-* there now is a py.__version__ attribute
-
-Changes between 0.9.0 and 0.9.1
-===============================
-
-This is a fairly complete list of changes between 0.9 and 0.9.1, which can
-serve as a reference for developers.
-
-* allowing + signs in py.path.svn urls [39106]
-* fixed support for Failed exceptions without excinfo in py.test [39340]
-* added support for killing processes for Windows (as well as platforms that
- support os.kill) in py.misc.killproc [39655]
-* added setup/teardown for generative tests to py.test [40702]
-* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
-* fixed problem with calling .remove() on wcpaths of non-versioned files in
- py.path [44248]
-* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
-* fail to run greenlet tests when pypy is available, but without stackless
- [45294]
-* small fixes in rsession tests [45295]
-* fixed issue with 2.5 type representations in py.test [45483, 45484]
-* made that internal reporting issues displaying is done atomically in py.test
- [45518]
-* made that non-existing files are igored by the py.lookup script [45519]
-* improved exception name creation in py.test [45535]
-* made that less threads are used in execnet [merge in 45539]
-* removed lock required for atomical reporting issue displaying in py.test
- [45545]
-* removed globals from execnet [45541, 45547]
-* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
- get called in 2.5 (py.execnet) [45548]
-* fixed bug in joining threads in py.execnet's servemain [45549]
-* refactored py.test.rsession tests to not rely on exact output format anymore
- [45646]
-* using repr() on test outcome [45647]
-* added 'Reason' classes for py.test.skip() [45648, 45649]
-* killed some unnecessary sanity check in py.test.collect [45655]
-* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
- usable by Administrators [45901]
-* added support for locking and non-recursive commits to py.path.svnwc [45994]
-* locking files in py.execnet to prevent CPython from segfaulting [46010]
-* added export() method to py.path.svnurl
-* fixed -d -x in py.test [47277]
-* fixed argument concatenation problem in py.path.svnwc [49423]
-* restore py.test behaviour that it exits with code 1 when there are failures
- [49974]
-* don't fail on html files that don't have an accompanying .txt file [50606]
-* fixed 'utestconvert.py < input' [50645]
-* small fix for code indentation in py.code.source [50755]
-* fix _docgen.py documentation building [51285]
-* improved checks for source representation of code blocks in py.test [51292]
-* added support for passing authentication to py.path.svn* objects [52000,
- 52001]
-* removed sorted() call for py.apigen tests in favour of [].sort() to support
- Python 2.3 [52481]
+py.test/pylib 1.3.0: new options, per-plugin hooks, fixes ...
+===========================================================================
+
+The 1.3.0 release introduces new options, bug fixes and improved compatibility
+with Python3 and Jython-2.5.1 on Windows. If you already use py-1.2 chances
+are you can use py-1.3.0. See the below CHANGELOG for more details and
+http://pylib.org/install.html for installation instructions.
+
+py.test is an advanced automated testing tool working with Python2,
+Python3, Jython and PyPy versions on all major operating systems. It
+offers a no-boilerplate testing approach and has inspired other testing
+tools and enhancements in the standard Python library for more than five
+years. It has a simple and extensive plugin architecture, configurable
+reporting and provides unique ways to make it fit to your testing
+process and needs.
+
+See http://pytest.org for more info.
+
+cheers and have fun,
+
+holger krekel
+
+Changes between 1.2.1 and 1.3.0
+==================================================
+
+- deprecate --report option in favour of a new shorter and easier to
+ remember -r option: it takes a string argument consisting of any
+ combination of 'xfsX' characters. They relate to the single chars
+ you see during the dotted progress printing and will print an extra line
+ per test at the end of the test run. This extra line indicates the exact
+ position or test ID that you directly paste to the py.test cmdline in order
+ to re-run a particular test.
+
+- allow external plugins to register new hooks via the new
+ pytest_addhooks(pluginmanager) hook. The new release of
+ the pytest-xdist plugin for distributed and looponfailing
+ testing requires this feature.
+
+- add a new pytest_ignore_collect(path, config) hook to allow projects and
+ plugins to define exclusion behaviour for their directory structure -
+ for example you may define in a conftest.py this method::
+
+ def pytest_ignore_collect(path):
+ return path.check(link=1)
+
+ to prevent even collection of any tests in symlinked dirs.
+
+- new pytest_pycollect_makemodule(path, parent) hook for
+ allowing customization of the Module collection object for a
+ matching test module.
+
+- extend and refine xfail mechanism::
+
+ @py.test.mark.xfail(run=False) do not run the decorated test
+ @py.test.mark.xfail(reason="...") prints the reason string in xfail summaries
+
+ specifiying ``--runxfail`` on command line ignores xfail markers to show
+ you the underlying traceback.
+
+- expose (previously internal) commonly useful methods:
+ py.io.get_terminal_with() -> return terminal width
+ py.io.ansi_print(...) -> print colored/bold text on linux/win32
+ py.io.saferepr(obj) -> return limited representation string
+
+- expose test outcome related exceptions as py.test.skip.Exception,
+ py.test.raises.Exception etc., useful mostly for plugins
+ doing special outcome interpretation/tweaking
+
+- (issue85) fix junitxml plugin to handle tests with non-ascii output
+
+- fix/refine python3 compatibility (thanks Benjamin Peterson)
+
+- fixes for making the jython/win32 combination work, note however:
+ jython2.5.1/win32 does not provide a command line launcher, see
+ http://bugs.jython.org/issue1491 . See pylib install documentation
+ for how to work around.
+
+- fixes for handling of unicode exception values and unprintable objects
+
+- (issue87) fix unboundlocal error in assertionold code
+
+- (issue86) improve documentation for looponfailing
+
+- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
+
+- ship distribute_setup.py version 0.6.10
+
+- added links to the new capturelog and coverage plugins
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
+
+Changes between 1.2 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
+
+Changes between 1.1.0 and 1.0.2
+=====================================
+
+* adjust and improve docs
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
+
+Changes between 1.0.1 and 1.0.2
+=====================================
+
+* fixing packaging issues, triggered by fedora redhat packaging,
+ also added doc, examples and contrib dirs to the tarball.
+
+* added a documentation link to the new django plugin.
+
+Changes between 1.0.0 and 1.0.1
+=====================================
+
+* added a 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* capturing of unicode writes or encoded strings to sys.stdout/err
+ work better, also terminalwriting was adapted and somewhat
+ unified between windows and linux.
+
+* improved documentation layout and content a lot
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
+
+Changes between 1.0.0b9 and 1.0.0
+=====================================
+
+* more terse reporting try to show filesystem path relatively to current dir
+* improve xfail output a bit
+
+Changes between 1.0.0b8 and 1.0.0b9
+=====================================
+
+* cleanly handle and report final teardown of test setup
+
+* fix svn-1.6 compat issue with py.path.svnwc().versioned()
+ (thanks Wouter Vanden Hove)
+
+* setup/teardown or collection problems now show as ERRORs
+ or with big "E"'s in the progress lines. they are reported
+ and counted separately.
+
+* dist-testing: properly handle test items that get locally
+ collected but cannot be collected on the remote side - often
+ due to platform/dependency reasons
+
+* simplified py.test.mark API - see keyword plugin documentation
+
+* integrate better with logging: capturing now by default captures
+ test functions and their immediate setup/teardown in a single stream
+
+* capsys and capfd funcargs now have a readouterr() and a close() method
+ (underlyingly py.io.StdCapture/FD objects are used which grew a
+ readouterr() method as well to return snapshots of captured out/err)
+
+* make assert-reinterpretation work better with comparisons not
+ returning bools (reported with numpy from thanks maciej fijalkowski)
+
+* reworked per-test output capturing into the pytest_iocapture.py plugin
+ and thus removed capturing code from config object
+
+* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
+
+
+Changes between 1.0.0b7 and 1.0.0b8
+=====================================
+
+* pytest_unittest-plugin is now enabled by default
+
+* introduced pytest_keyboardinterrupt hook and
+ refined pytest_sessionfinish hooked, added tests.
+
+* workaround a buggy logging module interaction ("closing already closed
+ files"). Thanks to Sridhar Ratnakumar for triggering.
+
+* if plugins use "py.test.importorskip" for importing
+ a dependency only a warning will be issued instead
+ of exiting the testing process.
+
+* many improvements to docs:
+ - refined funcargs doc , use the term "factory" instead of "provider"
+ - added a new talk/tutorial doc page
+ - better download page
+ - better plugin docstrings
+ - added new plugins page and automatic doc generation script
+
+* fixed teardown problem related to partially failing funcarg setups
+ (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
+ always invoked even if the "pytest_runtest_setup" failed.
+
+* tweaked doctest output for docstrings in py modules,
+ thanks Radomir.
+
+Changes between 1.0.0b3 and 1.0.0b7
+=============================================
+
+* renamed py.test.xfail back to py.test.mark.xfail to avoid
+ two ways to decorate for xfail
+
+* re-added py.test.mark decorator for setting keywords on functions
+ (it was actually documented so removing it was not nice)
+
+* remove scope-argument from request.addfinalizer() because
+ request.cached_setup has the scope arg. TOOWTDI.
+
+* perform setup finalization before reporting failures
+
+* apply modified patches from Andreas Kloeckner to allow
+ test functions to have no func_code (#22) and to make
+ "-k" and function keywords work (#20)
+
+* apply patch from Daniel Peolzleithner (issue #23)
+
+* resolve issue #18, multiprocessing.Manager() and
+ redirection clash
+
+* make __name__ == "__channelexec__" for remote_exec code
+
+Changes between 1.0.0b1 and 1.0.0b3
+=============================================
+
+* plugin classes are removed: one now defines
+ hooks directly in conftest.py or global pytest_*.py
+ files.
+
+* added new pytest_namespace(config) hook that allows
+ to inject helpers directly to the py.test.* namespace.
+
+* documented and refined many hooks
+
+* added new style of generative tests via
+ pytest_generate_tests hook that integrates
+ well with function arguments.
+
+
+Changes between 0.9.2 and 1.0.0b1
+=============================================
+
+* introduced new "funcarg" setup method,
+ see doc/test/funcarg.txt
+
+* introduced plugin architecuture and many
+ new py.test plugins, see
+ doc/test/plugins.txt
+
+* teardown_method is now guaranteed to get
+ called after a test method has run.
+
+* new method: py.test.importorskip(mod,minversion)
+ will either import or call py.test.skip()
+
+* completely revised internal py.test architecture
+
+* new py.process.ForkedFunc object allowing to
+ fork execution of a function to a sub process
+ and getting a result back.
+
+XXX lots of things missing here XXX
+
+Changes between 0.9.1 and 0.9.2
+===============================
+
+* refined installation and metadata, created new setup.py,
+ now based on setuptools/ez_setup (thanks to Ralf Schmitt
+ for his support).
+
+* improved the way of making py.* scripts available in
+ windows environments, they are now added to the
+ Scripts directory as ".cmd" files.
+
+* py.path.svnwc.status() now is more complete and
+ uses xml output from the 'svn' command if available
+ (Guido Wesdorp)
+
+* fix for py.path.svn* to work with svn 1.5
+ (Chris Lamb)
+
+* fix path.relto(otherpath) method on windows to
+ use normcase for checking if a path is relative.
+
+* py.test's traceback is better parseable from editors
+ (follows the filenames:LINENO: MSG convention)
+ (thanks to Osmo Salomaa)
+
+* fix to javascript-generation, "py.test --runbrowser"
+ should work more reliably now
+
+* removed previously accidentally added
+ py.test.broken and py.test.notimplemented helpers.
+
+* there now is a py.__version__ attribute
+
+Changes between 0.9.0 and 0.9.1
+===============================
+
+This is a fairly complete list of changes between 0.9 and 0.9.1, which can
+serve as a reference for developers.
+
+* allowing + signs in py.path.svn urls [39106]
+* fixed support for Failed exceptions without excinfo in py.test [39340]
+* added support for killing processes for Windows (as well as platforms that
+ support os.kill) in py.misc.killproc [39655]
+* added setup/teardown for generative tests to py.test [40702]
+* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
+* fixed problem with calling .remove() on wcpaths of non-versioned files in
+ py.path [44248]
+* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
+* fail to run greenlet tests when pypy is available, but without stackless
+ [45294]
+* small fixes in rsession tests [45295]
+* fixed issue with 2.5 type representations in py.test [45483, 45484]
+* made that internal reporting issues displaying is done atomically in py.test
+ [45518]
+* made that non-existing files are igored by the py.lookup script [45519]
+* improved exception name creation in py.test [45535]
+* made that less threads are used in execnet [merge in 45539]
+* removed lock required for atomical reporting issue displaying in py.test
+ [45545]
+* removed globals from execnet [45541, 45547]
+* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
+ get called in 2.5 (py.execnet) [45548]
+* fixed bug in joining threads in py.execnet's servemain [45549]
+* refactored py.test.rsession tests to not rely on exact output format anymore
+ [45646]
+* using repr() on test outcome [45647]
+* added 'Reason' classes for py.test.skip() [45648, 45649]
+* killed some unnecessary sanity check in py.test.collect [45655]
+* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
+ usable by Administrators [45901]
+* added support for locking and non-recursive commits to py.path.svnwc [45994]
+* locking files in py.execnet to prevent CPython from segfaulting [46010]
+* added export() method to py.path.svnurl
+* fixed -d -x in py.test [47277]
+* fixed argument concatenation problem in py.path.svnwc [49423]
+* restore py.test behaviour that it exits with code 1 when there are failures
+ [49974]
+* don't fail on html files that don't have an accompanying .txt file [50606]
+* fixed 'utestconvert.py < input' [50645]
+* small fix for code indentation in py.code.source [50755]
+* fix _docgen.py documentation building [51285]
+* improved checks for source representation of code blocks in py.test [51292]
+* added support for passing authentication to py.path.svn* objects [52000,
+ 52001]
+* removed sorted() call for py.apigen tests in favour of [].sort() to support
+ Python 2.3 [52481]
diff --git a/doc/announce/release-1.3.1.txt b/doc/announce/release-1.3.1.txt
index 471de40..aa1bad4 100644
--- a/doc/announce/release-1.3.1.txt
+++ b/doc/announce/release-1.3.1.txt
@@ -1,104 +1,104 @@
-py.test/pylib 1.3.1: new py.test.xfail, --maxfail, better reporting
-===========================================================================
-
-The pylib/py.test 1.3.1 release brings:
-
-- the new imperative ``py.test.xfail()`` helper in order to have a test or
- setup function result in an "expected failure"
-- a new option ``--maxfail=NUM`` to stop the test run after some failures
-- markers/decorators are now applicable to test classes (>=Python2.6)
-- improved reporting, shorter tracebacks in several cases
-- some simplified internals, more compatibility with Jython and PyPy
-- bug fixes and various refinements
-
-See the below CHANGELOG entry below for more details and
-http://pylib.org/install.html for installation instructions.
-
-If you used older versions of py.test you should be able to upgrade
-to 1.3.1 without changes to your test source code.
-
-py.test is an automated testing tool working with Python2,
-Python3, Jython and PyPy versions on all major operating systems. It
-offers a no-boilerplate testing approach and has inspired other testing
-tools and enhancements in the standard Python library for more than five
-years. It has a simple and extensive plugin architecture, configurable
-reporting and provides unique ways to make it fit to your testing
-process and needs.
-
-See http://pytest.org for more info.
-
-cheers and have fun,
-
-holger krekel
-
-Changes between 1.3.0 and 1.3.1
-==================================================
-
-New features
-++++++++++++++++++
-
-- issue91: introduce new py.test.xfail(reason) helper
- to imperatively mark a test as expected to fail. Can
- be used from within setup and test functions. This is
- useful especially for parametrized tests when certain
- configurations are expected-to-fail. In this case the
- declarative approach with the @py.test.mark.xfail cannot
- be used as it would mark all configurations as xfail.
-
-- issue102: introduce new --maxfail=NUM option to stop
- test runs after NUM failures. This is a generalization
- of the '-x' or '--exitfirst' option which is now equivalent
- to '--maxfail=1'. Both '-x' and '--maxfail' will
- now also print a line near the end indicating the Interruption.
-
-- issue89: allow py.test.mark decorators to be used on classes
- (class decorators were introduced with python2.6) and
- also allow to have multiple markers applied at class/module level
- by specifying a list.
-
-- improve and refine letter reporting in the progress bar:
- . pass
- f failed test
- s skipped tests (reminder: use for dependency/platform mismatch only)
- x xfailed test (test that was expected to fail)
- X xpassed test (test that was expected to fail but passed)
-
- You can use any combination of 'fsxX' with the '-r' extended
- reporting option. The xfail/xpass results will show up as
- skipped tests in the junitxml output - which also fixes
- issue99.
-
-- make py.test.cmdline.main() return the exitstatus instead of raising
- SystemExit and also allow it to be called multiple times. This of
- course requires that your application and tests are properly teared
- down and don't have global state.
-
-Fixes / Maintenance
-++++++++++++++++++++++
-
-- improved traceback presentation:
- - improved and unified reporting for "--tb=short" option
- - Errors during test module imports are much shorter, (using --tb=short style)
- - raises shows shorter more relevant tracebacks
- - --fulltrace now more systematically makes traces longer / inhibits cutting
-
-- improve support for raises and other dynamically compiled code by
- manipulating python's linecache.cache instead of the previous
- rather hacky way of creating custom code objects. This makes
- it seemlessly work on Jython and PyPy where it previously didn't.
-
-- fix issue96: make capturing more resilient against Control-C
- interruptions (involved somewhat substantial refactoring
- to the underlying capturing functionality to avoid race
- conditions).
-
-- fix chaining of conditional skipif/xfail decorators - so it works now
- as expected to use multiple @py.test.mark.skipif(condition) decorators,
- including specific reporting which of the conditions lead to skipping.
-
-- fix issue95: late-import zlib so that it's not required
- for general py.test startup.
-
-- fix issue94: make reporting more robust against bogus source code
- (and internally be more careful when presenting unexpected byte sequences)
-
+py.test/pylib 1.3.1: new py.test.xfail, --maxfail, better reporting
+===========================================================================
+
+The pylib/py.test 1.3.1 release brings:
+
+- the new imperative ``py.test.xfail()`` helper in order to have a test or
+ setup function result in an "expected failure"
+- a new option ``--maxfail=NUM`` to stop the test run after some failures
+- markers/decorators are now applicable to test classes (>=Python2.6)
+- improved reporting, shorter tracebacks in several cases
+- some simplified internals, more compatibility with Jython and PyPy
+- bug fixes and various refinements
+
+See the below CHANGELOG entry below for more details and
+http://pylib.org/install.html for installation instructions.
+
+If you used older versions of py.test you should be able to upgrade
+to 1.3.1 without changes to your test source code.
+
+py.test is an automated testing tool working with Python2,
+Python3, Jython and PyPy versions on all major operating systems. It
+offers a no-boilerplate testing approach and has inspired other testing
+tools and enhancements in the standard Python library for more than five
+years. It has a simple and extensive plugin architecture, configurable
+reporting and provides unique ways to make it fit to your testing
+process and needs.
+
+See http://pytest.org for more info.
+
+cheers and have fun,
+
+holger krekel
+
+Changes between 1.3.0 and 1.3.1
+==================================================
+
+New features
+++++++++++++++++++
+
+- issue91: introduce new py.test.xfail(reason) helper
+ to imperatively mark a test as expected to fail. Can
+ be used from within setup and test functions. This is
+ useful especially for parametrized tests when certain
+ configurations are expected-to-fail. In this case the
+ declarative approach with the @py.test.mark.xfail cannot
+ be used as it would mark all configurations as xfail.
+
+- issue102: introduce new --maxfail=NUM option to stop
+ test runs after NUM failures. This is a generalization
+ of the '-x' or '--exitfirst' option which is now equivalent
+ to '--maxfail=1'. Both '-x' and '--maxfail' will
+ now also print a line near the end indicating the Interruption.
+
+- issue89: allow py.test.mark decorators to be used on classes
+ (class decorators were introduced with python2.6) and
+ also allow to have multiple markers applied at class/module level
+ by specifying a list.
+
+- improve and refine letter reporting in the progress bar:
+ . pass
+ f failed test
+ s skipped tests (reminder: use for dependency/platform mismatch only)
+ x xfailed test (test that was expected to fail)
+ X xpassed test (test that was expected to fail but passed)
+
+ You can use any combination of 'fsxX' with the '-r' extended
+ reporting option. The xfail/xpass results will show up as
+ skipped tests in the junitxml output - which also fixes
+ issue99.
+
+- make py.test.cmdline.main() return the exitstatus instead of raising
+ SystemExit and also allow it to be called multiple times. This of
+ course requires that your application and tests are properly teared
+ down and don't have global state.
+
+Fixes / Maintenance
+++++++++++++++++++++++
+
+- improved traceback presentation:
+ - improved and unified reporting for "--tb=short" option
+ - Errors during test module imports are much shorter, (using --tb=short style)
+ - raises shows shorter more relevant tracebacks
+ - --fulltrace now more systematically makes traces longer / inhibits cutting
+
+- improve support for raises and other dynamically compiled code by
+ manipulating python's linecache.cache instead of the previous
+ rather hacky way of creating custom code objects. This makes
+ it seemlessly work on Jython and PyPy where it previously didn't.
+
+- fix issue96: make capturing more resilient against Control-C
+ interruptions (involved somewhat substantial refactoring
+ to the underlying capturing functionality to avoid race
+ conditions).
+
+- fix chaining of conditional skipif/xfail decorators - so it works now
+ as expected to use multiple @py.test.mark.skipif(condition) decorators,
+ including specific reporting which of the conditions lead to skipping.
+
+- fix issue95: late-import zlib so that it's not required
+ for general py.test startup.
+
+- fix issue94: make reporting more robust against bogus source code
+ (and internally be more careful when presenting unexpected byte sequences)
+
diff --git a/doc/announce/release-1.3.2.txt b/doc/announce/release-1.3.2.txt
index 599dfbe..a3eb70e 100644
--- a/doc/announce/release-1.3.2.txt
+++ b/doc/announce/release-1.3.2.txt
@@ -1,720 +1,720 @@
-py.test/pylib 1.3.2: API and reporting refinements, many fixes
-===========================================================================
-
-The pylib/py.test 1.3.2 release brings many bug fixes and some new
-features. It was refined for and tested against the recently released
-Python2.7 and remains compatibile to the usual armada of interpreters
-(Python2.4 through to Python3.1.2, Jython and PyPy). Note that for using
-distributed testing features you'll need to upgrade to the jointly released
-pytest-xdist-1.4 because of some internal refactorings.
-
-See http://pytest.org for general documentation and below for
-a detailed CHANGELOG.
-
-cheers & particular thanks to Benjamin Peterson, Ronny Pfannschmidt
-and all issue and patch contributors,
-
-holger krekel
-
-Changes between 1.3.1 and 1.3.2
-==================================================
-
-New features
-++++++++++++++++++
-
-- fix issue103: introduce py.test.raises as context manager, examples::
-
- with py.test.raises(ZeroDivisionError):
- x = 0
- 1 / x
-
- with py.test.raises(RuntimeError) as excinfo:
- call_something()
-
- # you may do extra checks on excinfo.value|type|traceback here
-
- (thanks Ronny Pfannschmidt)
-
-- Funcarg factories can now dynamically apply a marker to a
- test invocation. This is for example useful if a factory
- provides parameters to a test which are expected-to-fail::
-
- def pytest_funcarg__arg(request):
- request.applymarker(py.test.mark.xfail(reason="flaky config"))
- ...
-
- def test_function(arg):
- ...
-
-- improved error reporting on collection and import errors. This makes
- use of a more general mechanism, namely that for custom test item/collect
- nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
- override it to return a string error representation of your choice
- which is going to be reported as a (red) string.
-
-- introduce '--junitprefix=STR' option to prepend a prefix
- to all reports in the junitxml file.
-
-Bug fixes / Maintenance
-++++++++++++++++++++++++++
-
-- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
- to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
- you can properly check for their existence in a cross-python manner).
-- refine --pdb: ignore xfailed tests, unify its TB-reporting and
- don't display failures again at the end.
-- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
-- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
-- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
-- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
-- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
-- fix py.code.compile(source) to generate unique filenames
-- fix assertion re-interp problems on PyPy, by defering code
- compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
-- fix py.path.local.pyimport() to work with directories
-- streamline py.path.local.mkdtemp implementation and usage
-- don't print empty lines when showing junitxml-filename
-- add optional boolean ignore_errors parameter to py.path.local.remove
-- fix terminal writing on win32/python2.4
-- py.process.cmdexec() now tries harder to return properly encoded unicode objects
- on all python versions
-- install plain py.test/py.which scripts also for Jython, this helps to
- get canonical script paths in virtualenv situations
-- make path.bestrelpath(path) return ".", note that when calling
- X.bestrelpath the assumption is that X is a directory.
-- make initial conftest discovery ignore "--" prefixed arguments
-- fix resultlog plugin when used in an multicpu/multihost xdist situation
- (thanks Jakub Gustak)
-- perform distributed testing related reporting in the xdist-plugin
- rather than having dist-related code in the generic py.test
- distribution
-- fix homedir detection on Windows
-- ship distribute_setup.py version 0.6.13
-
-Changes between 1.3.0 and 1.3.1
-==================================================
-
-New features
-++++++++++++++++++
-
-- issue91: introduce new py.test.xfail(reason) helper
- to imperatively mark a test as expected to fail. Can
- be used from within setup and test functions. This is
- useful especially for parametrized tests when certain
- configurations are expected-to-fail. In this case the
- declarative approach with the @py.test.mark.xfail cannot
- be used as it would mark all configurations as xfail.
-
-- issue102: introduce new --maxfail=NUM option to stop
- test runs after NUM failures. This is a generalization
- of the '-x' or '--exitfirst' option which is now equivalent
- to '--maxfail=1'. Both '-x' and '--maxfail' will
- now also print a line near the end indicating the Interruption.
-
-- issue89: allow py.test.mark decorators to be used on classes
- (class decorators were introduced with python2.6) and
- also allow to have multiple markers applied at class/module level
- by specifying a list.
-
-- improve and refine letter reporting in the progress bar:
- . pass
- f failed test
- s skipped tests (reminder: use for dependency/platform mismatch only)
- x xfailed test (test that was expected to fail)
- X xpassed test (test that was expected to fail but passed)
-
- You can use any combination of 'fsxX' with the '-r' extended
- reporting option. The xfail/xpass results will show up as
- skipped tests in the junitxml output - which also fixes
- issue99.
-
-- make py.test.cmdline.main() return the exitstatus instead of raising
- SystemExit and also allow it to be called multiple times. This of
- course requires that your application and tests are properly teared
- down and don't have global state.
-
-Fixes / Maintenance
-++++++++++++++++++++++
-
-- improved traceback presentation:
- - improved and unified reporting for "--tb=short" option
- - Errors during test module imports are much shorter, (using --tb=short style)
- - raises shows shorter more relevant tracebacks
- - --fulltrace now more systematically makes traces longer / inhibits cutting
-
-- improve support for raises and other dynamically compiled code by
- manipulating python's linecache.cache instead of the previous
- rather hacky way of creating custom code objects. This makes
- it seemlessly work on Jython and PyPy where it previously didn't.
-
-- fix issue96: make capturing more resilient against Control-C
- interruptions (involved somewhat substantial refactoring
- to the underlying capturing functionality to avoid race
- conditions).
-
-- fix chaining of conditional skipif/xfail decorators - so it works now
- as expected to use multiple @py.test.mark.skipif(condition) decorators,
- including specific reporting which of the conditions lead to skipping.
-
-- fix issue95: late-import zlib so that it's not required
- for general py.test startup.
-
-- fix issue94: make reporting more robust against bogus source code
- (and internally be more careful when presenting unexpected byte sequences)
-
-
-Changes between 1.2.1 and 1.3.0
-==================================================
-
-- deprecate --report option in favour of a new shorter and easier to
- remember -r option: it takes a string argument consisting of any
- combination of 'xfsX' characters. They relate to the single chars
- you see during the dotted progress printing and will print an extra line
- per test at the end of the test run. This extra line indicates the exact
- position or test ID that you directly paste to the py.test cmdline in order
- to re-run a particular test.
-
-- allow external plugins to register new hooks via the new
- pytest_addhooks(pluginmanager) hook. The new release of
- the pytest-xdist plugin for distributed and looponfailing
- testing requires this feature.
-
-- add a new pytest_ignore_collect(path, config) hook to allow projects and
- plugins to define exclusion behaviour for their directory structure -
- for example you may define in a conftest.py this method::
-
- def pytest_ignore_collect(path):
- return path.check(link=1)
-
- to prevent even a collection try of any tests in symlinked dirs.
-
-- new pytest_pycollect_makemodule(path, parent) hook for
- allowing customization of the Module collection object for a
- matching test module.
-
-- extend and refine xfail mechanism:
- ``@py.test.mark.xfail(run=False)`` do not run the decorated test
- ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
- specifiying ``--runxfail`` on command line virtually ignores xfail markers
-
-- expose (previously internal) commonly useful methods:
- py.io.get_terminal_with() -> return terminal width
- py.io.ansi_print(...) -> print colored/bold text on linux/win32
- py.io.saferepr(obj) -> return limited representation string
-
-- expose test outcome related exceptions as py.test.skip.Exception,
- py.test.raises.Exception etc., useful mostly for plugins
- doing special outcome interpretation/tweaking
-
-- (issue85) fix junitxml plugin to handle tests with non-ascii output
-
-- fix/refine python3 compatibility (thanks Benjamin Peterson)
-
-- fixes for making the jython/win32 combination work, note however:
- jython2.5.1/win32 does not provide a command line launcher, see
- http://bugs.jython.org/issue1491 . See pylib install documentation
- for how to work around.
-
-- fixes for handling of unicode exception values and unprintable objects
-
-- (issue87) fix unboundlocal error in assertionold code
-
-- (issue86) improve documentation for looponfailing
-
-- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
-
-- ship distribute_setup.py version 0.6.10
-
-- added links to the new capturelog and coverage plugins
-
-
-Changes between 1.2.1 and 1.2.0
-=====================================
-
-- refined usage and options for "py.cleanup"::
-
- py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
- py.cleanup -e .swp -e .cache # also remove files with these extensions
- py.cleanup -s # remove "build" and "dist" directory next to setup.py files
- py.cleanup -d # also remove empty directories
- py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
- py.cleanup -n # dry run, only show what would be removed
-
-- add a new option "py.test --funcargs" which shows available funcargs
- and their help strings (docstrings on their respective factory function)
- for a given test path
-
-- display a short and concise traceback if a funcarg lookup fails
-
-- early-load "conftest.py" files in non-dot first-level sub directories.
- allows to conveniently keep and access test-related options in a ``test``
- subdir and still add command line options.
-
-- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
-
-- fix issue78: always call python-level teardown functions even if the
- according setup failed. This includes refinements for calling setup_module/class functions
- which will now only be called once instead of the previous behaviour where they'd be called
- multiple times if they raise an exception (including a Skipped exception). Any exception
- will be re-corded and associated with all tests in the according module/class scope.
-
-- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
-
-- fix pdb debugging to be in the correct frame on raises-related errors
-
-- update apipkg.py to fix an issue where recursive imports might
- unnecessarily break importing
-
-- fix plugin links
-
-Changes between 1.2 and 1.1.1
-=====================================
-
-- moved dist/looponfailing from py.test core into a new
- separately released pytest-xdist plugin.
-
-- new junitxml plugin: --junitxml=path will generate a junit style xml file
- which is processable e.g. by the Hudson CI system.
-
-- new option: --genscript=path will generate a standalone py.test script
- which will not need any libraries installed. thanks to Ralf Schmitt.
-
-- new option: --ignore will prevent specified path from collection.
- Can be specified multiple times.
-
-- new option: --confcutdir=dir will make py.test only consider conftest
- files that are relative to the specified dir.
-
-- new funcarg: "pytestconfig" is the pytest config object for access
- to command line args and can now be easily used in a test.
-
-- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
- disambiguate between Python3, python2.X, Jython and PyPy installed versions.
-
-- new "pytestconfig" funcarg allows access to test config object
-
-- new "pytest_report_header" hook can return additional lines
- to be displayed at the header of a test run.
-
-- (experimental) allow "py.test path::name1::name2::..." for pointing
- to a test within a test collection directly. This might eventually
- evolve as a full substitute to "-k" specifications.
-
-- streamlined plugin loading: order is now as documented in
- customize.html: setuptools, ENV, commandline, conftest.
- also setuptools entry point names are turned to canonical namees ("pytest_*")
-
-- automatically skip tests that need 'capfd' but have no os.dup
-
-- allow pytest_generate_tests to be defined in classes as well
-
-- deprecate usage of 'disabled' attribute in favour of pytestmark
-- deprecate definition of Directory, Module, Class and Function nodes
- in conftest.py files. Use pytest collect hooks instead.
-
-- collection/item node specific runtest/collect hooks are only called exactly
- on matching conftest.py files, i.e. ones which are exactly below
- the filesystem path of an item
-
-- change: the first pytest_collect_directory hook to return something
- will now prevent further hooks to be called.
-
-- change: figleaf plugin now requires --figleaf to run. Also
- change its long command line options to be a bit shorter (see py.test -h).
-
-- change: pytest doctest plugin is now enabled by default and has a
- new option --doctest-glob to set a pattern for file matches.
-
-- change: remove internal py._* helper vars, only keep py._pydir
-
-- robustify capturing to survive if custom pytest_runtest_setup
- code failed and prevented the capturing setup code from running.
-
-- make py.test.* helpers provided by default plugins visible early -
- works transparently both for pydoc and for interactive sessions
- which will regularly see e.g. py.test.mark and py.test.importorskip.
-
-- simplify internal plugin manager machinery
-- simplify internal collection tree by introducing a RootCollector node
-
-- fix assert reinterpreation that sees a call containing "keyword=..."
-
-- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
- hooks on slaves during dist-testing, report module/session teardown
- hooks correctly.
-
-- fix issue65: properly handle dist-testing if no
- execnet/py lib installed remotely.
-
-- skip some install-tests if no execnet is available
-
-- fix docs, fix internal bin/ script generation
-
-
-Changes between 1.1.1 and 1.1.0
-=====================================
-
-- introduce automatic plugin registration via 'pytest11'
- entrypoints via setuptools' pkg_resources.iter_entry_points
-
-- fix py.test dist-testing to work with execnet >= 1.0.0b4
-
-- re-introduce py.test.cmdline.main() for better backward compatibility
-
-- svn paths: fix a bug with path.check(versioned=True) for svn paths,
- allow '%' in svn paths, make svnwc.update() default to interactive mode
- like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
-
-- refine distributed tarball to contain test and no pyc files
-
-- try harder to have deprecation warnings for py.compat.* accesses
- report a correct location
-
-Changes between 1.1.0 and 1.0.2
-=====================================
-
-* adjust and improve docs
-
-* remove py.rest tool and internal namespace - it was
- never really advertised and can still be used with
- the old release if needed. If there is interest
- it could be revived into its own tool i guess.
-
-* fix issue48 and issue59: raise an Error if the module
- from an imported test file does not seem to come from
- the filepath - avoids "same-name" confusion that has
- been reported repeatedly
-
-* merged Ronny's nose-compatibility hacks: now
- nose-style setup_module() and setup() functions are
- supported
-
-* introduce generalized py.test.mark function marking
-
-* reshuffle / refine command line grouping
-
-* deprecate parser.addgroup in favour of getgroup which creates option group
-
-* add --report command line option that allows to control showing of skipped/xfailed sections
-
-* generalized skipping: a new way to mark python functions with skipif or xfail
- at function, class and modules level based on platform or sys-module attributes.
-
-* extend py.test.mark decorator to allow for positional args
-
-* introduce and test "py.cleanup -d" to remove empty directories
-
-* fix issue #59 - robustify unittest test collection
-
-* make bpython/help interaction work by adding an __all__ attribute
- to ApiModule, cleanup initpkg
-
-* use MIT license for pylib, add some contributors
-
-* remove py.execnet code and substitute all usages with 'execnet' proper
-
-* fix issue50 - cached_setup now caches more to expectations
- for test functions with multiple arguments.
-
-* merge Jarko's fixes, issue #45 and #46
-
-* add the ability to specify a path for py.lookup to search in
-
-* fix a funcarg cached_setup bug probably only occuring
- in distributed testing and "module" scope with teardown.
-
-* many fixes and changes for making the code base python3 compatible,
- many thanks to Benjamin Peterson for helping with this.
-
-* consolidate builtins implementation to be compatible with >=2.3,
- add helpers to ease keeping 2 and 3k compatible code
-
-* deprecate py.compat.doctest|subprocess|textwrap|optparse
-
-* deprecate py.magic.autopath, remove py/magic directory
-
-* move pytest assertion handling to py/code and a pytest_assertion
- plugin, add "--no-assert" option, deprecate py.magic namespaces
- in favour of (less) py.code ones.
-
-* consolidate and cleanup py/code classes and files
-
-* cleanup py/misc, move tests to bin-for-dist
-
-* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
-
-* consolidate py.log implementation, remove old approach.
-
-* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
- text/unicode and byte-streams (uses underlying standard lib io.*
- if available)
-
-* make py.unittest_convert helper script available which converts "unittest.py"
- style files into the simpler assert/direct-test-classes py.test/nosetests
- style. The script was written by Laura Creighton.
-
-* simplified internal localpath implementation
-
-Changes between 1.0.1 and 1.0.2
-=====================================
-
-* fixing packaging issues, triggered by fedora redhat packaging,
- also added doc, examples and contrib dirs to the tarball.
-
-* added a documentation link to the new django plugin.
-
-Changes between 1.0.0 and 1.0.1
-=====================================
-
-* added a 'pytest_nose' plugin which handles nose.SkipTest,
- nose-style function/method/generator setup/teardown and
- tries to report functions correctly.
-
-* capturing of unicode writes or encoded strings to sys.stdout/err
- work better, also terminalwriting was adapted and somewhat
- unified between windows and linux.
-
-* improved documentation layout and content a lot
-
-* added a "--help-config" option to show conftest.py / ENV-var names for
- all longopt cmdline options, and some special conftest.py variables.
- renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
-
-* fix issue #27: better reporting on non-collectable items given on commandline
- (e.g. pyc files)
-
-* fix issue #33: added --version flag (thanks Benjamin Peterson)
-
-* fix issue #32: adding support for "incomplete" paths to wcpath.status()
-
-* "Test" prefixed classes are *not* collected by default anymore if they
- have an __init__ method
-
-* monkeypatch setenv() now accepts a "prepend" parameter
-
-* improved reporting of collection error tracebacks
-
-* simplified multicall mechanism and plugin architecture,
- renamed some internal methods and argnames
-
-Changes between 1.0.0b9 and 1.0.0
-=====================================
-
-* more terse reporting try to show filesystem path relatively to current dir
-* improve xfail output a bit
-
-Changes between 1.0.0b8 and 1.0.0b9
-=====================================
-
-* cleanly handle and report final teardown of test setup
-
-* fix svn-1.6 compat issue with py.path.svnwc().versioned()
- (thanks Wouter Vanden Hove)
-
-* setup/teardown or collection problems now show as ERRORs
- or with big "E"'s in the progress lines. they are reported
- and counted separately.
-
-* dist-testing: properly handle test items that get locally
- collected but cannot be collected on the remote side - often
- due to platform/dependency reasons
-
-* simplified py.test.mark API - see keyword plugin documentation
-
-* integrate better with logging: capturing now by default captures
- test functions and their immediate setup/teardown in a single stream
-
-* capsys and capfd funcargs now have a readouterr() and a close() method
- (underlyingly py.io.StdCapture/FD objects are used which grew a
- readouterr() method as well to return snapshots of captured out/err)
-
-* make assert-reinterpretation work better with comparisons not
- returning bools (reported with numpy from thanks maciej fijalkowski)
-
-* reworked per-test output capturing into the pytest_iocapture.py plugin
- and thus removed capturing code from config object
-
-* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
-
-
-Changes between 1.0.0b7 and 1.0.0b8
-=====================================
-
-* pytest_unittest-plugin is now enabled by default
-
-* introduced pytest_keyboardinterrupt hook and
- refined pytest_sessionfinish hooked, added tests.
-
-* workaround a buggy logging module interaction ("closing already closed
- files"). Thanks to Sridhar Ratnakumar for triggering.
-
-* if plugins use "py.test.importorskip" for importing
- a dependency only a warning will be issued instead
- of exiting the testing process.
-
-* many improvements to docs:
- - refined funcargs doc , use the term "factory" instead of "provider"
- - added a new talk/tutorial doc page
- - better download page
- - better plugin docstrings
- - added new plugins page and automatic doc generation script
-
-* fixed teardown problem related to partially failing funcarg setups
- (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
- always invoked even if the "pytest_runtest_setup" failed.
-
-* tweaked doctest output for docstrings in py modules,
- thanks Radomir.
-
-Changes between 1.0.0b3 and 1.0.0b7
-=============================================
-
-* renamed py.test.xfail back to py.test.mark.xfail to avoid
- two ways to decorate for xfail
-
-* re-added py.test.mark decorator for setting keywords on functions
- (it was actually documented so removing it was not nice)
-
-* remove scope-argument from request.addfinalizer() because
- request.cached_setup has the scope arg. TOOWTDI.
-
-* perform setup finalization before reporting failures
-
-* apply modified patches from Andreas Kloeckner to allow
- test functions to have no func_code (#22) and to make
- "-k" and function keywords work (#20)
-
-* apply patch from Daniel Peolzleithner (issue #23)
-
-* resolve issue #18, multiprocessing.Manager() and
- redirection clash
-
-* make __name__ == "__channelexec__" for remote_exec code
-
-Changes between 1.0.0b1 and 1.0.0b3
-=============================================
-
-* plugin classes are removed: one now defines
- hooks directly in conftest.py or global pytest_*.py
- files.
-
-* added new pytest_namespace(config) hook that allows
- to inject helpers directly to the py.test.* namespace.
-
-* documented and refined many hooks
-
-* added new style of generative tests via
- pytest_generate_tests hook that integrates
- well with function arguments.
-
-
-Changes between 0.9.2 and 1.0.0b1
-=============================================
-
-* introduced new "funcarg" setup method,
- see doc/test/funcarg.txt
-
-* introduced plugin architecuture and many
- new py.test plugins, see
- doc/test/plugins.txt
-
-* teardown_method is now guaranteed to get
- called after a test method has run.
-
-* new method: py.test.importorskip(mod,minversion)
- will either import or call py.test.skip()
-
-* completely revised internal py.test architecture
-
-* new py.process.ForkedFunc object allowing to
- fork execution of a function to a sub process
- and getting a result back.
-
-XXX lots of things missing here XXX
-
-Changes between 0.9.1 and 0.9.2
-===============================
-
-* refined installation and metadata, created new setup.py,
- now based on setuptools/ez_setup (thanks to Ralf Schmitt
- for his support).
-
-* improved the way of making py.* scripts available in
- windows environments, they are now added to the
- Scripts directory as ".cmd" files.
-
-* py.path.svnwc.status() now is more complete and
- uses xml output from the 'svn' command if available
- (Guido Wesdorp)
-
-* fix for py.path.svn* to work with svn 1.5
- (Chris Lamb)
-
-* fix path.relto(otherpath) method on windows to
- use normcase for checking if a path is relative.
-
-* py.test's traceback is better parseable from editors
- (follows the filenames:LINENO: MSG convention)
- (thanks to Osmo Salomaa)
-
-* fix to javascript-generation, "py.test --runbrowser"
- should work more reliably now
-
-* removed previously accidentally added
- py.test.broken and py.test.notimplemented helpers.
-
-* there now is a py.__version__ attribute
-
-Changes between 0.9.0 and 0.9.1
-===============================
-
-This is a fairly complete list of changes between 0.9 and 0.9.1, which can
-serve as a reference for developers.
-
-* allowing + signs in py.path.svn urls [39106]
-* fixed support for Failed exceptions without excinfo in py.test [39340]
-* added support for killing processes for Windows (as well as platforms that
- support os.kill) in py.misc.killproc [39655]
-* added setup/teardown for generative tests to py.test [40702]
-* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
-* fixed problem with calling .remove() on wcpaths of non-versioned files in
- py.path [44248]
-* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
-* fail to run greenlet tests when pypy is available, but without stackless
- [45294]
-* small fixes in rsession tests [45295]
-* fixed issue with 2.5 type representations in py.test [45483, 45484]
-* made that internal reporting issues displaying is done atomically in py.test
- [45518]
-* made that non-existing files are igored by the py.lookup script [45519]
-* improved exception name creation in py.test [45535]
-* made that less threads are used in execnet [merge in 45539]
-* removed lock required for atomical reporting issue displaying in py.test
- [45545]
-* removed globals from execnet [45541, 45547]
-* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
- get called in 2.5 (py.execnet) [45548]
-* fixed bug in joining threads in py.execnet's servemain [45549]
-* refactored py.test.rsession tests to not rely on exact output format anymore
- [45646]
-* using repr() on test outcome [45647]
-* added 'Reason' classes for py.test.skip() [45648, 45649]
-* killed some unnecessary sanity check in py.test.collect [45655]
-* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
- usable by Administrators [45901]
-* added support for locking and non-recursive commits to py.path.svnwc [45994]
-* locking files in py.execnet to prevent CPython from segfaulting [46010]
-* added export() method to py.path.svnurl
-* fixed -d -x in py.test [47277]
-* fixed argument concatenation problem in py.path.svnwc [49423]
-* restore py.test behaviour that it exits with code 1 when there are failures
- [49974]
-* don't fail on html files that don't have an accompanying .txt file [50606]
-* fixed 'utestconvert.py < input' [50645]
-* small fix for code indentation in py.code.source [50755]
-* fix _docgen.py documentation building [51285]
-* improved checks for source representation of code blocks in py.test [51292]
-* added support for passing authentication to py.path.svn* objects [52000,
- 52001]
-* removed sorted() call for py.apigen tests in favour of [].sort() to support
- Python 2.3 [52481]
+py.test/pylib 1.3.2: API and reporting refinements, many fixes
+===========================================================================
+
+The pylib/py.test 1.3.2 release brings many bug fixes and some new
+features. It was refined for and tested against the recently released
+Python2.7 and remains compatibile to the usual armada of interpreters
+(Python2.4 through to Python3.1.2, Jython and PyPy). Note that for using
+distributed testing features you'll need to upgrade to the jointly released
+pytest-xdist-1.4 because of some internal refactorings.
+
+See http://pytest.org for general documentation and below for
+a detailed CHANGELOG.
+
+cheers & particular thanks to Benjamin Peterson, Ronny Pfannschmidt
+and all issue and patch contributors,
+
+holger krekel
+
+Changes between 1.3.1 and 1.3.2
+==================================================
+
+New features
+++++++++++++++++++
+
+- fix issue103: introduce py.test.raises as context manager, examples::
+
+ with py.test.raises(ZeroDivisionError):
+ x = 0
+ 1 / x
+
+ with py.test.raises(RuntimeError) as excinfo:
+ call_something()
+
+ # you may do extra checks on excinfo.value|type|traceback here
+
+ (thanks Ronny Pfannschmidt)
+
+- Funcarg factories can now dynamically apply a marker to a
+ test invocation. This is for example useful if a factory
+ provides parameters to a test which are expected-to-fail::
+
+ def pytest_funcarg__arg(request):
+ request.applymarker(py.test.mark.xfail(reason="flaky config"))
+ ...
+
+ def test_function(arg):
+ ...
+
+- improved error reporting on collection and import errors. This makes
+ use of a more general mechanism, namely that for custom test item/collect
+ nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
+ override it to return a string error representation of your choice
+ which is going to be reported as a (red) string.
+
+- introduce '--junitprefix=STR' option to prepend a prefix
+ to all reports in the junitxml file.
+
+Bug fixes / Maintenance
+++++++++++++++++++++++++++
+
+- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
+ to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
+ you can properly check for their existence in a cross-python manner).
+- refine --pdb: ignore xfailed tests, unify its TB-reporting and
+ don't display failures again at the end.
+- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
+- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
+- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
+- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
+- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
+- fix py.code.compile(source) to generate unique filenames
+- fix assertion re-interp problems on PyPy, by defering code
+ compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
+- fix py.path.local.pyimport() to work with directories
+- streamline py.path.local.mkdtemp implementation and usage
+- don't print empty lines when showing junitxml-filename
+- add optional boolean ignore_errors parameter to py.path.local.remove
+- fix terminal writing on win32/python2.4
+- py.process.cmdexec() now tries harder to return properly encoded unicode objects
+ on all python versions
+- install plain py.test/py.which scripts also for Jython, this helps to
+ get canonical script paths in virtualenv situations
+- make path.bestrelpath(path) return ".", note that when calling
+ X.bestrelpath the assumption is that X is a directory.
+- make initial conftest discovery ignore "--" prefixed arguments
+- fix resultlog plugin when used in an multicpu/multihost xdist situation
+ (thanks Jakub Gustak)
+- perform distributed testing related reporting in the xdist-plugin
+ rather than having dist-related code in the generic py.test
+ distribution
+- fix homedir detection on Windows
+- ship distribute_setup.py version 0.6.13
+
+Changes between 1.3.0 and 1.3.1
+==================================================
+
+New features
+++++++++++++++++++
+
+- issue91: introduce new py.test.xfail(reason) helper
+ to imperatively mark a test as expected to fail. Can
+ be used from within setup and test functions. This is
+ useful especially for parametrized tests when certain
+ configurations are expected-to-fail. In this case the
+ declarative approach with the @py.test.mark.xfail cannot
+ be used as it would mark all configurations as xfail.
+
+- issue102: introduce new --maxfail=NUM option to stop
+ test runs after NUM failures. This is a generalization
+ of the '-x' or '--exitfirst' option which is now equivalent
+ to '--maxfail=1'. Both '-x' and '--maxfail' will
+ now also print a line near the end indicating the Interruption.
+
+- issue89: allow py.test.mark decorators to be used on classes
+ (class decorators were introduced with python2.6) and
+ also allow to have multiple markers applied at class/module level
+ by specifying a list.
+
+- improve and refine letter reporting in the progress bar:
+ . pass
+ f failed test
+ s skipped tests (reminder: use for dependency/platform mismatch only)
+ x xfailed test (test that was expected to fail)
+ X xpassed test (test that was expected to fail but passed)
+
+ You can use any combination of 'fsxX' with the '-r' extended
+ reporting option. The xfail/xpass results will show up as
+ skipped tests in the junitxml output - which also fixes
+ issue99.
+
+- make py.test.cmdline.main() return the exitstatus instead of raising
+ SystemExit and also allow it to be called multiple times. This of
+ course requires that your application and tests are properly teared
+ down and don't have global state.
+
+Fixes / Maintenance
+++++++++++++++++++++++
+
+- improved traceback presentation:
+ - improved and unified reporting for "--tb=short" option
+ - Errors during test module imports are much shorter, (using --tb=short style)
+ - raises shows shorter more relevant tracebacks
+ - --fulltrace now more systematically makes traces longer / inhibits cutting
+
+- improve support for raises and other dynamically compiled code by
+ manipulating python's linecache.cache instead of the previous
+ rather hacky way of creating custom code objects. This makes
+ it seemlessly work on Jython and PyPy where it previously didn't.
+
+- fix issue96: make capturing more resilient against Control-C
+ interruptions (involved somewhat substantial refactoring
+ to the underlying capturing functionality to avoid race
+ conditions).
+
+- fix chaining of conditional skipif/xfail decorators - so it works now
+ as expected to use multiple @py.test.mark.skipif(condition) decorators,
+ including specific reporting which of the conditions lead to skipping.
+
+- fix issue95: late-import zlib so that it's not required
+ for general py.test startup.
+
+- fix issue94: make reporting more robust against bogus source code
+ (and internally be more careful when presenting unexpected byte sequences)
+
+
+Changes between 1.2.1 and 1.3.0
+==================================================
+
+- deprecate --report option in favour of a new shorter and easier to
+ remember -r option: it takes a string argument consisting of any
+ combination of 'xfsX' characters. They relate to the single chars
+ you see during the dotted progress printing and will print an extra line
+ per test at the end of the test run. This extra line indicates the exact
+ position or test ID that you directly paste to the py.test cmdline in order
+ to re-run a particular test.
+
+- allow external plugins to register new hooks via the new
+ pytest_addhooks(pluginmanager) hook. The new release of
+ the pytest-xdist plugin for distributed and looponfailing
+ testing requires this feature.
+
+- add a new pytest_ignore_collect(path, config) hook to allow projects and
+ plugins to define exclusion behaviour for their directory structure -
+ for example you may define in a conftest.py this method::
+
+ def pytest_ignore_collect(path):
+ return path.check(link=1)
+
+ to prevent even a collection try of any tests in symlinked dirs.
+
+- new pytest_pycollect_makemodule(path, parent) hook for
+ allowing customization of the Module collection object for a
+ matching test module.
+
+- extend and refine xfail mechanism:
+ ``@py.test.mark.xfail(run=False)`` do not run the decorated test
+ ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
+ specifiying ``--runxfail`` on command line virtually ignores xfail markers
+
+- expose (previously internal) commonly useful methods:
+ py.io.get_terminal_with() -> return terminal width
+ py.io.ansi_print(...) -> print colored/bold text on linux/win32
+ py.io.saferepr(obj) -> return limited representation string
+
+- expose test outcome related exceptions as py.test.skip.Exception,
+ py.test.raises.Exception etc., useful mostly for plugins
+ doing special outcome interpretation/tweaking
+
+- (issue85) fix junitxml plugin to handle tests with non-ascii output
+
+- fix/refine python3 compatibility (thanks Benjamin Peterson)
+
+- fixes for making the jython/win32 combination work, note however:
+ jython2.5.1/win32 does not provide a command line launcher, see
+ http://bugs.jython.org/issue1491 . See pylib install documentation
+ for how to work around.
+
+- fixes for handling of unicode exception values and unprintable objects
+
+- (issue87) fix unboundlocal error in assertionold code
+
+- (issue86) improve documentation for looponfailing
+
+- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
+
+- ship distribute_setup.py version 0.6.10
+
+- added links to the new capturelog and coverage plugins
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
+
+Changes between 1.2 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
+
+Changes between 1.1.0 and 1.0.2
+=====================================
+
+* adjust and improve docs
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
+
+Changes between 1.0.1 and 1.0.2
+=====================================
+
+* fixing packaging issues, triggered by fedora redhat packaging,
+ also added doc, examples and contrib dirs to the tarball.
+
+* added a documentation link to the new django plugin.
+
+Changes between 1.0.0 and 1.0.1
+=====================================
+
+* added a 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* capturing of unicode writes or encoded strings to sys.stdout/err
+ work better, also terminalwriting was adapted and somewhat
+ unified between windows and linux.
+
+* improved documentation layout and content a lot
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
+
+Changes between 1.0.0b9 and 1.0.0
+=====================================
+
+* more terse reporting try to show filesystem path relatively to current dir
+* improve xfail output a bit
+
+Changes between 1.0.0b8 and 1.0.0b9
+=====================================
+
+* cleanly handle and report final teardown of test setup
+
+* fix svn-1.6 compat issue with py.path.svnwc().versioned()
+ (thanks Wouter Vanden Hove)
+
+* setup/teardown or collection problems now show as ERRORs
+ or with big "E"'s in the progress lines. they are reported
+ and counted separately.
+
+* dist-testing: properly handle test items that get locally
+ collected but cannot be collected on the remote side - often
+ due to platform/dependency reasons
+
+* simplified py.test.mark API - see keyword plugin documentation
+
+* integrate better with logging: capturing now by default captures
+ test functions and their immediate setup/teardown in a single stream
+
+* capsys and capfd funcargs now have a readouterr() and a close() method
+ (underlyingly py.io.StdCapture/FD objects are used which grew a
+ readouterr() method as well to return snapshots of captured out/err)
+
+* make assert-reinterpretation work better with comparisons not
+ returning bools (reported with numpy from thanks maciej fijalkowski)
+
+* reworked per-test output capturing into the pytest_iocapture.py plugin
+ and thus removed capturing code from config object
+
+* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
+
+
+Changes between 1.0.0b7 and 1.0.0b8
+=====================================
+
+* pytest_unittest-plugin is now enabled by default
+
+* introduced pytest_keyboardinterrupt hook and
+ refined pytest_sessionfinish hooked, added tests.
+
+* workaround a buggy logging module interaction ("closing already closed
+ files"). Thanks to Sridhar Ratnakumar for triggering.
+
+* if plugins use "py.test.importorskip" for importing
+ a dependency only a warning will be issued instead
+ of exiting the testing process.
+
+* many improvements to docs:
+ - refined funcargs doc , use the term "factory" instead of "provider"
+ - added a new talk/tutorial doc page
+ - better download page
+ - better plugin docstrings
+ - added new plugins page and automatic doc generation script
+
+* fixed teardown problem related to partially failing funcarg setups
+ (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
+ always invoked even if the "pytest_runtest_setup" failed.
+
+* tweaked doctest output for docstrings in py modules,
+ thanks Radomir.
+
+Changes between 1.0.0b3 and 1.0.0b7
+=============================================
+
+* renamed py.test.xfail back to py.test.mark.xfail to avoid
+ two ways to decorate for xfail
+
+* re-added py.test.mark decorator for setting keywords on functions
+ (it was actually documented so removing it was not nice)
+
+* remove scope-argument from request.addfinalizer() because
+ request.cached_setup has the scope arg. TOOWTDI.
+
+* perform setup finalization before reporting failures
+
+* apply modified patches from Andreas Kloeckner to allow
+ test functions to have no func_code (#22) and to make
+ "-k" and function keywords work (#20)
+
+* apply patch from Daniel Peolzleithner (issue #23)
+
+* resolve issue #18, multiprocessing.Manager() and
+ redirection clash
+
+* make __name__ == "__channelexec__" for remote_exec code
+
+Changes between 1.0.0b1 and 1.0.0b3
+=============================================
+
+* plugin classes are removed: one now defines
+ hooks directly in conftest.py or global pytest_*.py
+ files.
+
+* added new pytest_namespace(config) hook that allows
+ to inject helpers directly to the py.test.* namespace.
+
+* documented and refined many hooks
+
+* added new style of generative tests via
+ pytest_generate_tests hook that integrates
+ well with function arguments.
+
+
+Changes between 0.9.2 and 1.0.0b1
+=============================================
+
+* introduced new "funcarg" setup method,
+ see doc/test/funcarg.txt
+
+* introduced plugin architecuture and many
+ new py.test plugins, see
+ doc/test/plugins.txt
+
+* teardown_method is now guaranteed to get
+ called after a test method has run.
+
+* new method: py.test.importorskip(mod,minversion)
+ will either import or call py.test.skip()
+
+* completely revised internal py.test architecture
+
+* new py.process.ForkedFunc object allowing to
+ fork execution of a function to a sub process
+ and getting a result back.
+
+XXX lots of things missing here XXX
+
+Changes between 0.9.1 and 0.9.2
+===============================
+
+* refined installation and metadata, created new setup.py,
+ now based on setuptools/ez_setup (thanks to Ralf Schmitt
+ for his support).
+
+* improved the way of making py.* scripts available in
+ windows environments, they are now added to the
+ Scripts directory as ".cmd" files.
+
+* py.path.svnwc.status() now is more complete and
+ uses xml output from the 'svn' command if available
+ (Guido Wesdorp)
+
+* fix for py.path.svn* to work with svn 1.5
+ (Chris Lamb)
+
+* fix path.relto(otherpath) method on windows to
+ use normcase for checking if a path is relative.
+
+* py.test's traceback is better parseable from editors
+ (follows the filenames:LINENO: MSG convention)
+ (thanks to Osmo Salomaa)
+
+* fix to javascript-generation, "py.test --runbrowser"
+ should work more reliably now
+
+* removed previously accidentally added
+ py.test.broken and py.test.notimplemented helpers.
+
+* there now is a py.__version__ attribute
+
+Changes between 0.9.0 and 0.9.1
+===============================
+
+This is a fairly complete list of changes between 0.9 and 0.9.1, which can
+serve as a reference for developers.
+
+* allowing + signs in py.path.svn urls [39106]
+* fixed support for Failed exceptions without excinfo in py.test [39340]
+* added support for killing processes for Windows (as well as platforms that
+ support os.kill) in py.misc.killproc [39655]
+* added setup/teardown for generative tests to py.test [40702]
+* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
+* fixed problem with calling .remove() on wcpaths of non-versioned files in
+ py.path [44248]
+* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
+* fail to run greenlet tests when pypy is available, but without stackless
+ [45294]
+* small fixes in rsession tests [45295]
+* fixed issue with 2.5 type representations in py.test [45483, 45484]
+* made that internal reporting issues displaying is done atomically in py.test
+ [45518]
+* made that non-existing files are igored by the py.lookup script [45519]
+* improved exception name creation in py.test [45535]
+* made that less threads are used in execnet [merge in 45539]
+* removed lock required for atomical reporting issue displaying in py.test
+ [45545]
+* removed globals from execnet [45541, 45547]
+* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
+ get called in 2.5 (py.execnet) [45548]
+* fixed bug in joining threads in py.execnet's servemain [45549]
+* refactored py.test.rsession tests to not rely on exact output format anymore
+ [45646]
+* using repr() on test outcome [45647]
+* added 'Reason' classes for py.test.skip() [45648, 45649]
+* killed some unnecessary sanity check in py.test.collect [45655]
+* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
+ usable by Administrators [45901]
+* added support for locking and non-recursive commits to py.path.svnwc [45994]
+* locking files in py.execnet to prevent CPython from segfaulting [46010]
+* added export() method to py.path.svnurl
+* fixed -d -x in py.test [47277]
+* fixed argument concatenation problem in py.path.svnwc [49423]
+* restore py.test behaviour that it exits with code 1 when there are failures
+ [49974]
+* don't fail on html files that don't have an accompanying .txt file [50606]
+* fixed 'utestconvert.py < input' [50645]
+* small fix for code indentation in py.code.source [50755]
+* fix _docgen.py documentation building [51285]
+* improved checks for source representation of code blocks in py.test [51292]
+* added support for passing authentication to py.path.svn* objects [52000,
+ 52001]
+* removed sorted() call for py.apigen tests in favour of [].sort() to support
+ Python 2.3 [52481]
diff --git a/doc/announce/release-1.3.3.txt b/doc/announce/release-1.3.3.txt
index c62cb85..cb9b0d7 100644
--- a/doc/announce/release-1.3.3.txt
+++ b/doc/announce/release-1.3.3.txt
@@ -1,26 +1,26 @@
-py.test/pylib 1.3.3: windows and other fixes
-===========================================================================
-
-pylib/py.test 1.3.3 is a minor bugfix release featuring some improvements
-and fixes. See changelog_ for full history.
-
-have fun,
-holger krekel
-
-.. _changelog: ../changelog.html
-
-Changes between 1.3.2 and 1.3.3
-==================================================
-
-- fix issue113: assertion representation problem with triple-quoted strings
- (and possibly other cases)
-- make conftest loading detect that a conftest file with the same
- content was already loaded, avoids surprises in nested directory structures
- which can be produced e.g. by Hudson. It probably removes the need to use
- --confcutdir in most cases.
-- fix terminal coloring for win32
- (thanks Michael Foord for reporting)
-- fix weirdness: make terminal width detection work on stdout instead of stdin
- (thanks Armin Ronacher for reporting)
-- remove trailing whitespace in all py/text distribution files
-
+py.test/pylib 1.3.3: windows and other fixes
+===========================================================================
+
+pylib/py.test 1.3.3 is a minor bugfix release featuring some improvements
+and fixes. See changelog_ for full history.
+
+have fun,
+holger krekel
+
+.. _changelog: ../changelog.html
+
+Changes between 1.3.2 and 1.3.3
+==================================================
+
+- fix issue113: assertion representation problem with triple-quoted strings
+ (and possibly other cases)
+- make conftest loading detect that a conftest file with the same
+ content was already loaded, avoids surprises in nested directory structures
+ which can be produced e.g. by Hudson. It probably removes the need to use
+ --confcutdir in most cases.
+- fix terminal coloring for win32
+ (thanks Michael Foord for reporting)
+- fix weirdness: make terminal width detection work on stdout instead of stdin
+ (thanks Armin Ronacher for reporting)
+- remove trailing whitespace in all py/text distribution files
+
diff --git a/doc/announce/release-1.3.4.txt b/doc/announce/release-1.3.4.txt
index c156c8b..1e0c605 100644
--- a/doc/announce/release-1.3.4.txt
+++ b/doc/announce/release-1.3.4.txt
@@ -1,22 +1,22 @@
-py.test/pylib 1.3.4: fixes and new native traceback option
-===========================================================================
-
-pylib/py.test 1.3.4 is a minor maintenance release mostly containing bug fixes
-and a new "--tb=native" traceback option to show "normal" Python standard
-tracebacks instead of the py.test enhanced tracebacks. See below for more
-change info and http://pytest.org for more general information on features
-and configuration of the testing tool.
-
-Thanks to the issue reporters and generally to Ronny Pfannschmidt for help.
-
-cheers,
-holger krekel
-
-Changes between 1.3.3 and 1.3.4
-==================================================
-
-- fix issue111: improve install documentation for windows
-- fix issue119: fix custom collectability of __init__.py as a module
-- fix issue116: --doctestmodules work with __init__.py files as well
-- fix issue115: unify internal exception passthrough/catching/GeneratorExit
-- fix issue118: new --tb=native for presenting cpython-standard exceptions
+py.test/pylib 1.3.4: fixes and new native traceback option
+===========================================================================
+
+pylib/py.test 1.3.4 is a minor maintenance release mostly containing bug fixes
+and a new "--tb=native" traceback option to show "normal" Python standard
+tracebacks instead of the py.test enhanced tracebacks. See below for more
+change info and http://pytest.org for more general information on features
+and configuration of the testing tool.
+
+Thanks to the issue reporters and generally to Ronny Pfannschmidt for help.
+
+cheers,
+holger krekel
+
+Changes between 1.3.3 and 1.3.4
+==================================================
+
+- fix issue111: improve install documentation for windows
+- fix issue119: fix custom collectability of __init__.py as a module
+- fix issue116: --doctestmodules work with __init__.py files as well
+- fix issue115: unify internal exception passthrough/catching/GeneratorExit
+- fix issue118: new --tb=native for presenting cpython-standard exceptions
diff --git a/doc/announce/release-1.4.0.txt b/doc/announce/release-1.4.0.txt
index 6f9a771..fe31113 100644
--- a/doc/announce/release-1.4.0.txt
+++ b/doc/announce/release-1.4.0.txt
@@ -1,47 +1,47 @@
-
-.. _`release-1.4.0`:
-
-py-1.4.0: cross-python lib for path, code, io, ... manipulations
-===========================================================================
-
-"py" is a small library comprising APIs for filesystem and svn path
-manipulations, dynamic code construction and introspection, a Py2/Py3
-compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
-(on windows and linux), ini-file parsing and a lazy import mechanism.
-It runs unmodified on all Python interpreters compatible to Python2.4 up
-until Python 3.2. The general goal with "py" is to provide stable APIs
-for some common tasks that are continously tested against many Python
-interpreters and thus also to help transition. Here are some docs:
-
- http://pylib.org
-
-NOTE: The prior py-1.3.X versions contained "py.test" which now comes
-as its own separate "pytest" distribution and was just released
-as "pytest-2.0.0", see here for the revamped docs:
-
- http://pytest.org
-
-And "py.cleanup|py.lookup|py.countloc" etc. helpers are now part of
-the pycmd distribution, see http://pypi.python.org/pypi/pycmd
-
-This makes "py-1.4.0" a simple library which does not install
-any command line utilities anymore.
-
-cheers,
-holger
-
-Changes between 1.3.4 and 1.4.0
--------------------------------------
-
-- py.test was moved to a separate "pytest" package. What remains is
- a stub hook which will proxy ``import py.test`` to ``pytest``.
-- all command line tools ("py.cleanup/lookup/countloc/..." moved
- to "pycmd" package)
-- removed the old and deprecated "py.magic" namespace
-- use apipkg-1.1 and make py.apipkg.initpkg|ApiModule available
-- add py.iniconfig module for brain-dead easy ini-config file parsing
-- introduce py.builtin.any()
-- path objects have a .dirname attribute now (equivalent to
- os.path.dirname(path))
-- path.visit() accepts breadthfirst (bf) and sort options
-- remove deprecated py.compat namespace
+
+.. _`release-1.4.0`:
+
+py-1.4.0: cross-python lib for path, code, io, ... manipulations
+===========================================================================
+
+"py" is a small library comprising APIs for filesystem and svn path
+manipulations, dynamic code construction and introspection, a Py2/Py3
+compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
+(on windows and linux), ini-file parsing and a lazy import mechanism.
+It runs unmodified on all Python interpreters compatible to Python2.4 up
+until Python 3.2. The general goal with "py" is to provide stable APIs
+for some common tasks that are continously tested against many Python
+interpreters and thus also to help transition. Here are some docs:
+
+ http://pylib.org
+
+NOTE: The prior py-1.3.X versions contained "py.test" which now comes
+as its own separate "pytest" distribution and was just released
+as "pytest-2.0.0", see here for the revamped docs:
+
+ http://pytest.org
+
+And "py.cleanup|py.lookup|py.countloc" etc. helpers are now part of
+the pycmd distribution, see http://pypi.python.org/pypi/pycmd
+
+This makes "py-1.4.0" a simple library which does not install
+any command line utilities anymore.
+
+cheers,
+holger
+
+Changes between 1.3.4 and 1.4.0
+-------------------------------------
+
+- py.test was moved to a separate "pytest" package. What remains is
+ a stub hook which will proxy ``import py.test`` to ``pytest``.
+- all command line tools ("py.cleanup/lookup/countloc/..." moved
+ to "pycmd" package)
+- removed the old and deprecated "py.magic" namespace
+- use apipkg-1.1 and make py.apipkg.initpkg|ApiModule available
+- add py.iniconfig module for brain-dead easy ini-config file parsing
+- introduce py.builtin.any()
+- path objects have a .dirname attribute now (equivalent to
+ os.path.dirname(path))
+- path.visit() accepts breadthfirst (bf) and sort options
+- remove deprecated py.compat namespace
diff --git a/doc/announce/release-1.4.1.txt b/doc/announce/release-1.4.1.txt
index a5aa76b..4fafb6e 100644
--- a/doc/announce/release-1.4.1.txt
+++ b/doc/announce/release-1.4.1.txt
@@ -1,47 +1,47 @@
-
-.. _`release-1.4.1`:
-
-py-1.4.1: cross-python lib for fs path, code, io, ... manipulations
-===========================================================================
-
-This is a bug fix release of the "py" lib, see below for detailed changes.
-The py lib is a small library comprising APIs for filesystem and svn path
-manipulations, dynamic code construction and introspection, a Py2/Py3
-compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
-(on windows and linux), ini-file parsing and a lazy import mechanism.
-It runs unmodified on all Python interpreters compatible to Python2.4 up
-until Python 3.2, PyPy and Jython. The general goal with "py" is to
-provide stable APIs for some common tasks that are continously tested
-against many Python interpreters and thus also to help transition. Here
-are some docs:
-
- http://pylib.org
-
-NOTE: The prior py-1.3.X versions contained "py.test" which since py-1.4.0
-comes as its own separate "pytest" distribution, see:
-
- http://pytest.org
-
-Also, the "py.cleanup|py.lookup|py.countloc" helpers are now part of
-the pycmd distribution, see http://pypi.python.org/pypi/pycmd
-
-
-Changes between 1.4.0 and 1.4.1
-==================================================
-
-- fix issue1 - py.error.* classes to be pickleable
-
-- fix issue2 - on windows32 use PATHEXT as the list of potential
- extensions to find find binaries with py.path.local.sysfind(commandname)
-
-- fix (pytest-) issue10 and refine assertion reinterpretation
- to avoid breaking if the __nonzero__ of an object fails
-
-- fix (pytest-) issue17 where python3 does not like star-imports,
- leading to misrepresentation of import-errors in test modules
-
-- fix ``py.error.*`` attribute pypy access
-
-- allow path.samefile(arg) to succeed when arg is a relative filename
-
-- fix (pytest-) issue20 path.samefile(relpath) works as expected now
+
+.. _`release-1.4.1`:
+
+py-1.4.1: cross-python lib for fs path, code, io, ... manipulations
+===========================================================================
+
+This is a bug fix release of the "py" lib, see below for detailed changes.
+The py lib is a small library comprising APIs for filesystem and svn path
+manipulations, dynamic code construction and introspection, a Py2/Py3
+compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
+(on windows and linux), ini-file parsing and a lazy import mechanism.
+It runs unmodified on all Python interpreters compatible to Python2.4 up
+until Python 3.2, PyPy and Jython. The general goal with "py" is to
+provide stable APIs for some common tasks that are continously tested
+against many Python interpreters and thus also to help transition. Here
+are some docs:
+
+ http://pylib.org
+
+NOTE: The prior py-1.3.X versions contained "py.test" which since py-1.4.0
+comes as its own separate "pytest" distribution, see:
+
+ http://pytest.org
+
+Also, the "py.cleanup|py.lookup|py.countloc" helpers are now part of
+the pycmd distribution, see http://pypi.python.org/pypi/pycmd
+
+
+Changes between 1.4.0 and 1.4.1
+==================================================
+
+- fix issue1 - py.error.* classes to be pickleable
+
+- fix issue2 - on windows32 use PATHEXT as the list of potential
+ extensions to find find binaries with py.path.local.sysfind(commandname)
+
+- fix (pytest-) issue10 and refine assertion reinterpretation
+ to avoid breaking if the __nonzero__ of an object fails
+
+- fix (pytest-) issue17 where python3 does not like star-imports,
+ leading to misrepresentation of import-errors in test modules
+
+- fix ``py.error.*`` attribute pypy access
+
+- allow path.samefile(arg) to succeed when arg is a relative filename
+
+- fix (pytest-) issue20 path.samefile(relpath) works as expected now
diff --git a/doc/announce/releases.txt b/doc/announce/releases.txt
index 309c29b..539fb29 100644
--- a/doc/announce/releases.txt
+++ b/doc/announce/releases.txt
@@ -1,16 +1,16 @@
-=============
-Release notes
-=============
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
-.. include: release-1.1.0
-.. include: release-1.0.2
-
- release-1.0.1
- release-1.0.0
- release-0.9.2
- release-0.9.0
+=============
+Release notes
+=============
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+.. include: release-1.1.0
+.. include: release-1.0.2
+
+ release-1.0.1
+ release-1.0.0
+ release-0.9.2
+ release-0.9.0
diff --git a/doc/changelog.txt b/doc/changelog.txt
index 237daca..1acc2b7 100644
--- a/doc/changelog.txt
+++ b/doc/changelog.txt
@@ -1,3 +1,3 @@
-.. _`changelog`:
-
-.. include:: ../CHANGELOG
+.. _`changelog`:
+
+.. include:: ../CHANGELOG
diff --git a/doc/code.txt b/doc/code.txt
index bdd8691..fa545a2 100644
--- a/doc/code.txt
+++ b/doc/code.txt
@@ -1,150 +1,150 @@
-================================================================================
-py.code: higher level python code and introspection objects
-================================================================================
-
-``py.code`` provides higher level APIs and objects for Code, Frame, Traceback,
-ExceptionInfo and source code construction. The ``py.code`` library
-tries to simplify accessing the code objects as well as creating them.
-There is a small set of interfaces a user needs to deal with, all nicely
-bundled together, and with a rich set of 'Pythonic' functionality.
-
-Contents of the library
-=======================
-
-Every object in the ``py.code`` library wraps a code Python object related
-to code objects, source code, frames and tracebacks: the ``py.code.Code``
-class wraps code objects, ``py.code.Source`` source snippets,
-``py.code.Traceback` exception tracebacks, ``py.code.Frame`` frame
-objects (as found in e.g. tracebacks) and ``py.code.ExceptionInfo`` the
-tuple provided by sys.exc_info() (containing exception and traceback
-information when an exception occurs). Also in the library is a helper function
-``py.code.compile()`` that provides the same functionality as Python's
-built-in 'compile()' function, but returns a wrapped code object.
-
-The wrappers
-============
-
-``py.code.Code``
--------------------
-
-Code objects are instantiated with a code object or a callable as argument,
-and provide functionality to compare themselves with other Code objects, get to
-the source file or its contents, create new Code objects from scratch, etc.
-
-A quick example::
-
- >>> import py
- >>> c = py.code.Code(py.path.local.read)
- >>> c.path.basename
- 'common.py'
- >>> isinstance(c.source(), py.code.Source)
- True
- >>> str(c.source()).split('\n')[0]
- "def read(self, mode='r'):"
-
-.. autoclass:: py.code.Code
- :members:
- :inherited-members:
-
-
-``py.code.Source``
----------------------
-
-Source objects wrap snippets of Python source code, providing a simple yet
-powerful interface to read, deindent, slice, compare, compile and manipulate
-them, things that are not so easy in core Python.
-
-Example::
-
- >>> s = py.code.Source("""\
- ... def foo():
- ... print "foo"
- ... """)
- >>> str(s).startswith('def') # automatic de-indentation!
- True
- >>> s.isparseable()
- True
- >>> sub = s.getstatement(1) # get the statement starting at line 1
- >>> str(sub).strip() # XXX why is the strip() required?!?
- 'print "foo"'
-
-.. autoclass:: py.code.Source
- :members:
-
-
-``py.code.Traceback``
-------------------------
-
-Tracebacks are usually not very easy to examine, you need to access certain
-somewhat hidden attributes of the traceback's items (resulting in expressions
-such as 'fname = tb.tb_next.tb_frame.f_code.co_filename'). The Traceback
-interface (and its TracebackItem children) tries to improve this.
-
-Example::
-
- >>> import sys
- >>> try:
- ... py.path.local(100) # illegal argument
- ... except:
- ... exc, e, tb = sys.exc_info()
- >>> t = py.code.Traceback(tb)
- >>> first = t[1] # get the second entry (first is in this doc)
- >>> first.path.basename # second is in py/path/local.py
- 'local.py'
- >>> isinstance(first.statement, py.code.Source)
- True
- >>> str(first.statement).strip().startswith('raise ValueError')
- True
-
-.. autoclass:: py.code.Traceback
- :members:
-
-``py.code.Frame``
---------------------
-
-Frame wrappers are used in ``py.code.Traceback`` items, and will usually not
-directly be instantiated. They provide some nice methods to evaluate code
-'inside' the frame (using the frame's local variables), get to the underlying
-code (frames have a code attribute that points to a ``py.code.Code`` object)
-and examine the arguments.
-
-Example (using the 'first' TracebackItem instance created above)::
-
- >>> frame = first.frame
- >>> isinstance(frame.code, py.code.Code)
- True
- >>> isinstance(frame.eval('self'), py.path.local)
- True
- >>> [namevalue[0] for namevalue in frame.getargs()]
- ['cls', 'path']
-
-.. autoclass:: py.code.Frame
- :members:
-
-``py.code.ExceptionInfo``
-----------------------------
-
-A wrapper around the tuple returned by sys.exc_info() (will call sys.exc_info()
-itself if the tuple is not provided as an argument), provides some handy
-attributes to easily access the traceback and exception string.
-
-Example::
-
- >>> import sys
- >>> try:
- ... foobar()
- ... except:
- ... excinfo = py.code.ExceptionInfo()
- >>> excinfo.typename
- 'NameError'
- >>> isinstance(excinfo.traceback, py.code.Traceback)
- True
- >>> excinfo.exconly()
- "NameError: name 'foobar' is not defined"
-
-.. autoclass:: py.code.ExceptionInfo
- :members:
-
-.. autoclass:: py.code.Traceback
- :members:
-
+================================================================================
+py.code: higher level python code and introspection objects
+================================================================================
+
+``py.code`` provides higher level APIs and objects for Code, Frame, Traceback,
+ExceptionInfo and source code construction. The ``py.code`` library
+tries to simplify accessing the code objects as well as creating them.
+There is a small set of interfaces a user needs to deal with, all nicely
+bundled together, and with a rich set of 'Pythonic' functionality.
+
+Contents of the library
+=======================
+
+Every object in the ``py.code`` library wraps a code Python object related
+to code objects, source code, frames and tracebacks: the ``py.code.Code``
+class wraps code objects, ``py.code.Source`` source snippets,
+``py.code.Traceback` exception tracebacks, ``py.code.Frame`` frame
+objects (as found in e.g. tracebacks) and ``py.code.ExceptionInfo`` the
+tuple provided by sys.exc_info() (containing exception and traceback
+information when an exception occurs). Also in the library is a helper function
+``py.code.compile()`` that provides the same functionality as Python's
+built-in 'compile()' function, but returns a wrapped code object.
+
+The wrappers
+============
+
+``py.code.Code``
+-------------------
+
+Code objects are instantiated with a code object or a callable as argument,
+and provide functionality to compare themselves with other Code objects, get to
+the source file or its contents, create new Code objects from scratch, etc.
+
+A quick example::
+
+ >>> import py
+ >>> c = py.code.Code(py.path.local.read)
+ >>> c.path.basename
+ 'common.py'
+ >>> isinstance(c.source(), py.code.Source)
+ True
+ >>> str(c.source()).split('\n')[0]
+ "def read(self, mode='r'):"
+
+.. autoclass:: py.code.Code
+ :members:
+ :inherited-members:
+
+
+``py.code.Source``
+---------------------
+
+Source objects wrap snippets of Python source code, providing a simple yet
+powerful interface to read, deindent, slice, compare, compile and manipulate
+them, things that are not so easy in core Python.
+
+Example::
+
+ >>> s = py.code.Source("""\
+ ... def foo():
+ ... print "foo"
+ ... """)
+ >>> str(s).startswith('def') # automatic de-indentation!
+ True
+ >>> s.isparseable()
+ True
+ >>> sub = s.getstatement(1) # get the statement starting at line 1
+ >>> str(sub).strip() # XXX why is the strip() required?!?
+ 'print "foo"'
+
+.. autoclass:: py.code.Source
+ :members:
+
+
+``py.code.Traceback``
+------------------------
+
+Tracebacks are usually not very easy to examine, you need to access certain
+somewhat hidden attributes of the traceback's items (resulting in expressions
+such as 'fname = tb.tb_next.tb_frame.f_code.co_filename'). The Traceback
+interface (and its TracebackItem children) tries to improve this.
+
+Example::
+
+ >>> import sys
+ >>> try:
+ ... py.path.local(100) # illegal argument
+ ... except:
+ ... exc, e, tb = sys.exc_info()
+ >>> t = py.code.Traceback(tb)
+ >>> first = t[1] # get the second entry (first is in this doc)
+ >>> first.path.basename # second is in py/path/local.py
+ 'local.py'
+ >>> isinstance(first.statement, py.code.Source)
+ True
+ >>> str(first.statement).strip().startswith('raise ValueError')
+ True
+
+.. autoclass:: py.code.Traceback
+ :members:
+
+``py.code.Frame``
+--------------------
+
+Frame wrappers are used in ``py.code.Traceback`` items, and will usually not
+directly be instantiated. They provide some nice methods to evaluate code
+'inside' the frame (using the frame's local variables), get to the underlying
+code (frames have a code attribute that points to a ``py.code.Code`` object)
+and examine the arguments.
+
+Example (using the 'first' TracebackItem instance created above)::
+
+ >>> frame = first.frame
+ >>> isinstance(frame.code, py.code.Code)
+ True
+ >>> isinstance(frame.eval('self'), py.path.local)
+ True
+ >>> [namevalue[0] for namevalue in frame.getargs()]
+ ['cls', 'path']
+
+.. autoclass:: py.code.Frame
+ :members:
+
+``py.code.ExceptionInfo``
+----------------------------
+
+A wrapper around the tuple returned by sys.exc_info() (will call sys.exc_info()
+itself if the tuple is not provided as an argument), provides some handy
+attributes to easily access the traceback and exception string.
+
+Example::
+
+ >>> import sys
+ >>> try:
+ ... foobar()
+ ... except:
+ ... excinfo = py.code.ExceptionInfo()
+ >>> excinfo.typename
+ 'NameError'
+ >>> isinstance(excinfo.traceback, py.code.Traceback)
+ True
+ >>> excinfo.exconly()
+ "NameError: name 'foobar' is not defined"
+
+.. autoclass:: py.code.ExceptionInfo
+ :members:
+
+.. autoclass:: py.code.Traceback
+ :members:
+
diff --git a/doc/conf.py b/doc/conf.py
index de4cbf8..979d3e2 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,263 +1,263 @@
-# -*- coding: utf-8 -*-
-#
-# py documentation build configuration file, created by
-# sphinx-quickstart on Thu Oct 21 08:30:10 2010.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.txt'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'py'
-copyright = u'2010, holger krekel et. al.'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-# The full version, including alpha/beta/rc tags.
-import py
-release = py.__version__
-version = ".".join(release.split(".")[:2])
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'py'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'py.tex', u'py Documentation',
- u'holger krekel et. al.', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'py', u'py Documentation',
- [u'holger krekel et. al.'], 1)
-]
-
-autodoc_member_order = "bysource"
-autodoc_default_flags = "inherited-members"
-
-# -- Options for Epub output ---------------------------------------------------
-
-# Bibliographic Dublin Core info.
-epub_title = u'py'
-epub_author = u'holger krekel et. al.'
-epub_publisher = u'holger krekel et. al.'
-epub_copyright = u'2010, holger krekel et. al.'
-
-# The language of the text. It defaults to the language option
-# or en if the language is not set.
-#epub_language = ''
-
-# The scheme of the identifier. Typical schemes are ISBN or URL.
-#epub_scheme = ''
-
-# The unique identifier of the text. This can be a ISBN number
-# or the project homepage.
-#epub_identifier = ''
-
-# A unique identification for the text.
-#epub_uid = ''
-
-# HTML files that should be inserted before the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_pre_files = []
-
-# HTML files shat should be inserted after the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_post_files = []
-
-# A list of files that should not be packed into the epub file.
-#epub_exclude_files = []
-
-# The depth of the table of contents in toc.ncx.
-#epub_tocdepth = 3
-
-# Allow duplicate toc entries.
-#epub_tocdup = True
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
+# -*- coding: utf-8 -*-
+#
+# py documentation build configuration file, created by
+# sphinx-quickstart on Thu Oct 21 08:30:10 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.txt'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'py'
+copyright = u'2010, holger krekel et. al.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+# The full version, including alpha/beta/rc tags.
+import py
+release = py.__version__
+version = ".".join(release.split(".")[:2])
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'py'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'py.tex', u'py Documentation',
+ u'holger krekel et. al.', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'py', u'py Documentation',
+ [u'holger krekel et. al.'], 1)
+]
+
+autodoc_member_order = "bysource"
+autodoc_default_flags = "inherited-members"
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = u'py'
+epub_author = u'holger krekel et. al.'
+epub_publisher = u'holger krekel et. al.'
+epub_copyright = u'2010, holger krekel et. al.'
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/download.html b/doc/download.html
index 5f4c466..5339f29 100644
--- a/doc/download.html
+++ b/doc/download.html
@@ -1,18 +1,18 @@
-<html>
- <head>
- <meta http-equiv="refresh" content=" 1 ; URL=install.html" />
- </head>
-
- <body>
-<script type="text/javascript">
-var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
-document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
-</script>
-<script type="text/javascript">
-try {
-var pageTracker = _gat._getTracker("UA-7597274-3");
-pageTracker._trackPageview();
-} catch(err) {}</script>
-</body>
-</html>
-
+<html>
+ <head>
+ <meta http-equiv="refresh" content=" 1 ; URL=install.html" />
+ </head>
+
+ <body>
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-7597274-3");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+</body>
+</html>
+
diff --git a/doc/example/genhtml.py b/doc/example/genhtml.py
index b5c8f52..f562b05 100644
--- a/doc/example/genhtml.py
+++ b/doc/example/genhtml.py
@@ -1,13 +1,13 @@
-from py.xml import html
-
-paras = "First Para", "Second para"
-
-doc = html.html(
- html.head(
- html.meta(name="Content-Type", value="text/html; charset=latin1")),
- html.body(
- [html.p(p) for p in paras]))
-
-print unicode(doc).encode('latin1')
-
-
+from py.xml import html
+
+paras = "First Para", "Second para"
+
+doc = html.html(
+ html.head(
+ html.meta(name="Content-Type", value="text/html; charset=latin1")),
+ html.body(
+ [html.p(p) for p in paras]))
+
+print unicode(doc).encode('latin1')
+
+
diff --git a/doc/example/genhtmlcss.py b/doc/example/genhtmlcss.py
index 3e6d0af..d1e2e4e 100644
--- a/doc/example/genhtmlcss.py
+++ b/doc/example/genhtmlcss.py
@@ -1,23 +1,23 @@
-import py
-html = py.xml.html
-
-class my(html):
- "a custom style"
- class body(html.body):
- style = html.Style(font_size = "120%")
-
- class h2(html.h2):
- style = html.Style(background = "grey")
-
- class p(html.p):
- style = html.Style(font_weight="bold")
-
-doc = my.html(
- my.head(),
- my.body(
- my.h2("hello world"),
- my.p("bold as bold can")
- )
-)
-
-print doc.unicode(indent=2)
+import py
+html = py.xml.html
+
+class my(html):
+ "a custom style"
+ class body(html.body):
+ style = html.Style(font_size = "120%")
+
+ class h2(html.h2):
+ style = html.Style(background = "grey")
+
+ class p(html.p):
+ style = html.Style(font_weight="bold")
+
+doc = my.html(
+ my.head(),
+ my.body(
+ my.h2("hello world"),
+ my.p("bold as bold can")
+ )
+)
+
+print doc.unicode(indent=2)
diff --git a/doc/example/genxml.py b/doc/example/genxml.py
index 5f754e8..7cf6a01 100644
--- a/doc/example/genxml.py
+++ b/doc/example/genxml.py
@@ -1,17 +1,17 @@
-
-import py
-class ns(py.xml.Namespace):
- pass
-
-doc = ns.books(
- ns.book(
- ns.author("May Day"),
- ns.title("python for java programmers"),),
- ns.book(
- ns.author("why", class_="somecssclass"),
- ns.title("Java for Python programmers"),),
- publisher="N.N",
- )
-print doc.unicode(indent=2).encode('utf8')
-
-
+
+import py
+class ns(py.xml.Namespace):
+ pass
+
+doc = ns.books(
+ ns.book(
+ ns.author("May Day"),
+ ns.title("python for java programmers"),),
+ ns.book(
+ ns.author("why", class_="somecssclass"),
+ ns.title("Java for Python programmers"),),
+ publisher="N.N",
+ )
+print doc.unicode(indent=2).encode('utf8')
+
+
diff --git a/doc/faq.txt b/doc/faq.txt
index 52cb4b3..d52fc5d 100644
--- a/doc/faq.txt
+++ b/doc/faq.txt
@@ -1,172 +1,172 @@
-==================================
-Frequently Asked Questions
-==================================
-
-.. contents::
- :local:
- :depth: 2
-
-
-On naming, nosetests, licensing and magic
-===========================================
-
-Why the ``py`` naming? Why not ``pytest``?
-----------------------------------------------------
-
-This mostly has historic reasons - the aim is
-to get away from the somewhat questionable 'py' name
-at some point. These days (2010) the 'py' library
-almost completely comprises APIs that are used
-by the ``py.test`` tool. There also are some
-other uses, e.g. of the ``py.path.local()`` and
-other path implementations. So it requires some
-work to factor them out and do the shift.
-
-Why the ``py.test`` naming?
-------------------------------------
-
-because of TAB-completion under Bash/Shells. If you hit
-``py.<TAB>`` you'll get a list of available development
-tools that all share the ``py.`` prefix. Another motivation
-was to unify the package ("py.test") and tool filename.
-
-What's py.test's relation to ``nosetests``?
----------------------------------------------
-
-py.test and nose_ share basic philosophy when it comes
-to running Python tests. In fact,
-with py.test-1.1.0 it is ever easier to run many test suites
-that currently work with ``nosetests``. nose_ was created
-as a clone of ``py.test`` when py.test was in the ``0.8`` release
-cycle so some of the newer features_ introduced with py.test-1.0
-and py.test-1.1 have no counterpart in nose_.
-
-.. _nose: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/
-.. _features: test/features.html
-.. _apipkg: http://pypi.python.org/pypi/apipkg
-
-
-What's this "magic" with py.test?
-----------------------------------------
-
-issues where people have used the term "magic" in the past:
-
-* `py/__init__.py`_ uses the apipkg_ mechanism for lazy-importing
- and full control on what API you get when importing "import py".
-
-* when an ``assert`` statement fails, py.test re-interprets the expression
- to show intermediate values if a test fails. If your expression
- has side effects the intermediate values may not be the same, obfuscating
- the initial error (this is also explained at the command line if it happens).
- ``py.test --no-assert`` turns off assert re-intepretation.
- Sidenote: it is good practise to avoid asserts with side effects.
-
-
-.. _`py namespaces`: index.html
-.. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py
-
-Where does my ``py.test`` come/import from?
-----------------------------------------------
-
-You can issue::
-
- py.test --version
-
-which tells you both version and import location of the tool.
-
-
-function arguments, parametrized tests and setup
-====================================================
-
-.. _funcargs: test/funcargs.html
-
-Is using funcarg- versus xUnit-based setup a style question?
----------------------------------------------------------------
-
-It depends. For simple applications or for people experienced
-with nose_ or unittest-style test setup using `xUnit style setup`_
-make some sense. For larger test suites, parametrized testing
-or setup of complex test resources using funcargs_ is recommended.
-Moreover, funcargs are ideal for writing advanced test support
-code (like e.g. the monkeypatch_, the tmpdir_ or capture_ funcargs)
-because the support code can register setup/teardown functions
-in a managed class/module/function scope.
-
-.. _monkeypatch: test/plugin/monkeypatch.html
-.. _tmpdir: test/plugin/tmpdir.html
-.. _capture: test/plugin/capture.html
-.. _`xUnit style setup`: test/xunit_setup.html
-.. _`pytest_nose`: test/plugin/nose.html
-
-.. _`why pytest_pyfuncarg__ methods?`:
-
-Why the ``pytest_funcarg__*`` name for funcarg factories?
----------------------------------------------------------------
-
-When experimenting with funcargs an explicit registration mechanism
-was considered. But lacking a good use case for this indirection and
-flexibility we decided to go for `Convention over Configuration`_ and
-allow to directly specify the factory. Besides removing the need
-for an indirection it allows to "grep" for ``pytest_funcarg__MYARG``
-and will safely find all factory functions for the ``MYARG`` function
-argument. It helps to alleviate the de-coupling of function
-argument usage and creation.
-
-.. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
-
-Can I yield multiple values from a factory function?
------------------------------------------------------
-
-There are two conceptual reasons why yielding from a factory function
-is not possible:
-
-* Calling factories for obtaining test function arguments
- is part of setting up and running a test. At that
- point it is not possible to add new test calls to
- the test collection anymore.
-
-* If multiple factories yielded values there would
- be no natural place to determine the combination
- policy - in real-world examples some combinations
- often should not run.
-
-Use the `pytest_generate_tests`_ hook to solve both issues
-and implement the `parametrization scheme of your choice`_.
-
-.. _`pytest_generate_tests`: test/funcargs.html#parametrizing-tests
-.. _`parametrization scheme of your choice`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
-
-
-py.test interaction with other packages
-===============================================
-
-Issues with py.test, multiprocess and setuptools?
-------------------------------------------------------------
-
-On windows the multiprocess package will instantiate sub processes
-by pickling and thus implicitely re-import a lot of local modules.
-Unfortuantely, setuptools-0.6.11 does not ``if __name__=='__main__'``
-protect its generated command line script. This leads to infinite
-recursion when running a test that instantiates Processes.
-There are these workarounds:
-
-* `install Distribute`_ as a drop-in replacement for setuptools
- and install py.test
-
-* `directly use a checkout`_ which avoids all setuptools/Distribute
- installation
-
-If those options are not available to you, you may also manually
-fix the script that is created by setuptools by inserting an
-``if __name__ == '__main__'``. Or you can create a "pytest.py"
-script with this content and invoke that with the python version::
-
- import py
- if __name__ == '__main__':
- py.cmdline.pytest()
-
-.. _`directly use a checkout`: install.html#directly-use-a-checkout
-
-.. _`install distribute`: http://pypi.python.org/pypi/distribute#installation-instructions
-
-
+==================================
+Frequently Asked Questions
+==================================
+
+.. contents::
+ :local:
+ :depth: 2
+
+
+On naming, nosetests, licensing and magic
+===========================================
+
+Why the ``py`` naming? Why not ``pytest``?
+----------------------------------------------------
+
+This mostly has historic reasons - the aim is
+to get away from the somewhat questionable 'py' name
+at some point. These days (2010) the 'py' library
+almost completely comprises APIs that are used
+by the ``py.test`` tool. There also are some
+other uses, e.g. of the ``py.path.local()`` and
+other path implementations. So it requires some
+work to factor them out and do the shift.
+
+Why the ``py.test`` naming?
+------------------------------------
+
+because of TAB-completion under Bash/Shells. If you hit
+``py.<TAB>`` you'll get a list of available development
+tools that all share the ``py.`` prefix. Another motivation
+was to unify the package ("py.test") and tool filename.
+
+What's py.test's relation to ``nosetests``?
+---------------------------------------------
+
+py.test and nose_ share basic philosophy when it comes
+to running Python tests. In fact,
+with py.test-1.1.0 it is ever easier to run many test suites
+that currently work with ``nosetests``. nose_ was created
+as a clone of ``py.test`` when py.test was in the ``0.8`` release
+cycle so some of the newer features_ introduced with py.test-1.0
+and py.test-1.1 have no counterpart in nose_.
+
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/
+.. _features: test/features.html
+.. _apipkg: http://pypi.python.org/pypi/apipkg
+
+
+What's this "magic" with py.test?
+----------------------------------------
+
+issues where people have used the term "magic" in the past:
+
+* `py/__init__.py`_ uses the apipkg_ mechanism for lazy-importing
+ and full control on what API you get when importing "import py".
+
+* when an ``assert`` statement fails, py.test re-interprets the expression
+ to show intermediate values if a test fails. If your expression
+ has side effects the intermediate values may not be the same, obfuscating
+ the initial error (this is also explained at the command line if it happens).
+ ``py.test --no-assert`` turns off assert re-intepretation.
+ Sidenote: it is good practise to avoid asserts with side effects.
+
+
+.. _`py namespaces`: index.html
+.. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py
+
+Where does my ``py.test`` come/import from?
+----------------------------------------------
+
+You can issue::
+
+ py.test --version
+
+which tells you both version and import location of the tool.
+
+
+function arguments, parametrized tests and setup
+====================================================
+
+.. _funcargs: test/funcargs.html
+
+Is using funcarg- versus xUnit-based setup a style question?
+---------------------------------------------------------------
+
+It depends. For simple applications or for people experienced
+with nose_ or unittest-style test setup using `xUnit style setup`_
+make some sense. For larger test suites, parametrized testing
+or setup of complex test resources using funcargs_ is recommended.
+Moreover, funcargs are ideal for writing advanced test support
+code (like e.g. the monkeypatch_, the tmpdir_ or capture_ funcargs)
+because the support code can register setup/teardown functions
+in a managed class/module/function scope.
+
+.. _monkeypatch: test/plugin/monkeypatch.html
+.. _tmpdir: test/plugin/tmpdir.html
+.. _capture: test/plugin/capture.html
+.. _`xUnit style setup`: test/xunit_setup.html
+.. _`pytest_nose`: test/plugin/nose.html
+
+.. _`why pytest_pyfuncarg__ methods?`:
+
+Why the ``pytest_funcarg__*`` name for funcarg factories?
+---------------------------------------------------------------
+
+When experimenting with funcargs an explicit registration mechanism
+was considered. But lacking a good use case for this indirection and
+flexibility we decided to go for `Convention over Configuration`_ and
+allow to directly specify the factory. Besides removing the need
+for an indirection it allows to "grep" for ``pytest_funcarg__MYARG``
+and will safely find all factory functions for the ``MYARG`` function
+argument. It helps to alleviate the de-coupling of function
+argument usage and creation.
+
+.. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
+
+Can I yield multiple values from a factory function?
+-----------------------------------------------------
+
+There are two conceptual reasons why yielding from a factory function
+is not possible:
+
+* Calling factories for obtaining test function arguments
+ is part of setting up and running a test. At that
+ point it is not possible to add new test calls to
+ the test collection anymore.
+
+* If multiple factories yielded values there would
+ be no natural place to determine the combination
+ policy - in real-world examples some combinations
+ often should not run.
+
+Use the `pytest_generate_tests`_ hook to solve both issues
+and implement the `parametrization scheme of your choice`_.
+
+.. _`pytest_generate_tests`: test/funcargs.html#parametrizing-tests
+.. _`parametrization scheme of your choice`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
+
+
+py.test interaction with other packages
+===============================================
+
+Issues with py.test, multiprocess and setuptools?
+------------------------------------------------------------
+
+On windows the multiprocess package will instantiate sub processes
+by pickling and thus implicitely re-import a lot of local modules.
+Unfortuantely, setuptools-0.6.11 does not ``if __name__=='__main__'``
+protect its generated command line script. This leads to infinite
+recursion when running a test that instantiates Processes.
+There are these workarounds:
+
+* `install Distribute`_ as a drop-in replacement for setuptools
+ and install py.test
+
+* `directly use a checkout`_ which avoids all setuptools/Distribute
+ installation
+
+If those options are not available to you, you may also manually
+fix the script that is created by setuptools by inserting an
+``if __name__ == '__main__'``. Or you can create a "pytest.py"
+script with this content and invoke that with the python version::
+
+ import py
+ if __name__ == '__main__':
+ py.cmdline.pytest()
+
+.. _`directly use a checkout`: install.html#directly-use-a-checkout
+
+.. _`install distribute`: http://pypi.python.org/pypi/distribute#installation-instructions
+
+
diff --git a/doc/index.txt b/doc/index.txt
index 7eb5c63..751a0e6 100644
--- a/doc/index.txt
+++ b/doc/index.txt
@@ -1,43 +1,39 @@
-.. py documentation master file, created by
- sphinx-quickstart on Thu Oct 21 08:30:10 2010.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-Welcome to py's documentation!
-=================================
-
-see :ref:`CHANGELOG <changelog>` for latest changes.
-
-.. note::
-
- Since version 1.4, the testing tool "py.test" is part of its own `pytest distribution`_.
-
-.. _`pytest distribution`: http://pytest.org
-
-Contents:
-
-.. toctree::
-
- install
- path
- code
- io
- log
- xml
- misc
-
- :maxdepth: 2
-
-.. toctree::
- :hidden:
-
- announce/release-2.0.0
- changelog
- announce/*
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`search`
-
+.. py documentation master file, created by
+ sphinx-quickstart on Thu Oct 21 08:30:10 2010.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to py's documentation!
+=================================
+
+see :ref:`CHANGELOG <changelog>` for latest changes.
+
+.. _`pytest distribution`: http://pytest.org
+
+Contents:
+
+.. toctree::
+
+ install
+ path
+ code
+ io
+ log
+ xml
+ misc
+
+ :maxdepth: 2
+
+.. toctree::
+ :hidden:
+
+ announce/release-2.0.0
+ changelog
+ announce/*
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`search`
+
diff --git a/doc/install.txt b/doc/install.txt
index d0e981d..569b3e7 100644
--- a/doc/install.txt
+++ b/doc/install.txt
@@ -1,88 +1,88 @@
-
-.. _`py`:
-.. _`index page`: http://pypi.python.org/pypi/py/
-
-installation info in a nutshell
-===================================================
-
-**PyPI name**: py_
-
-**Pythons**: CPython 2.6, 2.7, 3.3, 3.4, PyPy-2.3
-
-**Operating systems**: Linux, Windows, OSX, Unix
-
-**Requirements**: setuptools_ or Distribute_
-
-**Installers**: ``easy_install`` and ``pip``
-
-**hg repository**: https://bitbucket.org/hpk42/py
-
-easy install or pip ``py``
------------------------------
-
-Both `Distribute`_ and setuptools_ provide the ``easy_install``
-installation tool with which you can type into a command line window::
-
- easy_install -U py
-
-to install the latest release of the py lib. The ``-U`` switch
-will trigger an upgrade if you already have an older version installed.
-
-.. note::
-
- As of version 1.4 py does not contain py.test anymore - you
- need to install the new `pytest`_ distribution.
-
-.. _pytest: http://pytest.org
-
-Working from version control or a tarball
------------------------------------------------
-
-To follow development or start experiments, checkout the
-complete code and documentation source with mercurial_::
-
- hg clone https://bitbucket.org/hpk42/py
-
-Development takes place on the 'trunk' branch.
-
-You can also go to the python package index and
-download and unpack a TAR file::
-
- http://pypi.python.org/pypi/py/
-
-activating a checkout with setuptools
---------------------------------------------
-
-With a working `Distribute`_ or setuptools_ installation you can type::
-
- python setup.py develop
-
-in order to work inline with the tools and the lib of your checkout.
-
-.. _`no-setuptools`:
-
-.. _`directly use a checkout`:
-
-.. _`setuptools`: http://pypi.python.org/pypi/setuptools
-
-
-Mailing list and issue tracker
---------------------------------------
-
-- `py-dev developers list`_ and `commit mailing list`_.
-
-- #pylib on irc.freenode.net IRC channel for random questions.
-
-- `bitbucket issue tracker`_ use this bitbucket issue tracker to report
- bugs or request features.
-
-.. _`bitbucket issue tracker`: http://bitbucket.org/hpk42/py/issues/
-
-.. _codespeak: http://codespeak.net/
-.. _`py-dev`:
-.. _`development mailing list`:
-.. _`py-dev developers list`: http://codespeak.net/mailman/listinfo/py-dev
-.. _`py-svn`:
-.. _`commit mailing list`: http://codespeak.net/mailman/listinfo/py-svn
-
-.. include:: links.inc
+
+.. _`py`:
+.. _`index page`: http://pypi.python.org/pypi/py/
+
+installation info in a nutshell
+===================================================
+
+**PyPI name**: py_
+
+**Pythons**: CPython 2.6, 2.7, 3.3, 3.4, PyPy-2.3
+
+**Operating systems**: Linux, Windows, OSX, Unix
+
+**Requirements**: setuptools_ or Distribute_
+
+**Installers**: ``easy_install`` and ``pip``
+
+**hg repository**: https://bitbucket.org/hpk42/py
+
+easy install or pip ``py``
+-----------------------------
+
+Both `Distribute`_ and setuptools_ provide the ``easy_install``
+installation tool with which you can type into a command line window::
+
+ easy_install -U py
+
+to install the latest release of the py lib. The ``-U`` switch
+will trigger an upgrade if you already have an older version installed.
+
+.. note::
+
+ As of version 1.4 py does not contain py.test anymore - you
+ need to install the new `pytest`_ distribution.
+
+.. _pytest: http://pytest.org
+
+Working from version control or a tarball
+-----------------------------------------------
+
+To follow development or start experiments, checkout the
+complete code and documentation source with mercurial_::
+
+ hg clone https://bitbucket.org/hpk42/py
+
+Development takes place on the 'trunk' branch.
+
+You can also go to the python package index and
+download and unpack a TAR file::
+
+ http://pypi.python.org/pypi/py/
+
+activating a checkout with setuptools
+--------------------------------------------
+
+With a working `Distribute`_ or setuptools_ installation you can type::
+
+ python setup.py develop
+
+in order to work inline with the tools and the lib of your checkout.
+
+.. _`no-setuptools`:
+
+.. _`directly use a checkout`:
+
+.. _`setuptools`: http://pypi.python.org/pypi/setuptools
+
+
+Mailing list and issue tracker
+--------------------------------------
+
+- `py-dev developers list`_ and `commit mailing list`_.
+
+- #pylib on irc.freenode.net IRC channel for random questions.
+
+- `bitbucket issue tracker`_ use this bitbucket issue tracker to report
+ bugs or request features.
+
+.. _`bitbucket issue tracker`: http://bitbucket.org/hpk42/py/issues/
+
+.. _codespeak: http://codespeak.net/
+.. _`py-dev`:
+.. _`development mailing list`:
+.. _`py-dev developers list`: http://codespeak.net/mailman/listinfo/py-dev
+.. _`py-svn`:
+.. _`commit mailing list`: http://codespeak.net/mailman/listinfo/py-svn
+
+.. include:: links.inc
diff --git a/doc/io.txt b/doc/io.txt
index c11308a..903e290 100644
--- a/doc/io.txt
+++ b/doc/io.txt
@@ -1,59 +1,59 @@
-=======
-py.io
-=======
-
-
-The 'py' lib provides helper classes for capturing IO during
-execution of a program.
-
-IO Capturing examples
-===============================================
-
-``py.io.StdCapture``
----------------------------
-
-Basic Example::
-
- >>> import py
- >>> capture = py.io.StdCapture()
- >>> print "hello"
- >>> out,err = capture.reset()
- >>> out.strip() == "hello"
- True
-
-For calling functions you may use a shortcut::
-
- >>> import py
- >>> def f(): print "hello"
- >>> res, out, err = py.io.StdCapture.call(f)
- >>> out.strip() == "hello"
- True
-
-``py.io.StdCaptureFD``
----------------------------
-
-If you also want to capture writes to the stdout/stderr
-filedescriptors you may invoke::
-
- >>> import py, sys
- >>> capture = py.io.StdCaptureFD(out=False, in_=False)
- >>> sys.stderr.write("world")
- >>> out,err = capture.reset()
- >>> err
- 'world'
-
-py.io object reference
-============================
-
-.. autoclass:: py.io.StdCaptureFD
- :members:
- :inherited-members:
-
-.. autoclass:: py.io.StdCapture
- :members:
- :inherited-members:
-
-.. autoclass:: py.io.TerminalWriter
- :members:
- :inherited-members:
-
+=======
+py.io
+=======
+
+
+The 'py' lib provides helper classes for capturing IO during
+execution of a program.
+
+IO Capturing examples
+===============================================
+
+``py.io.StdCapture``
+---------------------------
+
+Basic Example::
+
+ >>> import py
+ >>> capture = py.io.StdCapture()
+ >>> print "hello"
+ >>> out,err = capture.reset()
+ >>> out.strip() == "hello"
+ True
+
+For calling functions you may use a shortcut::
+
+ >>> import py
+ >>> def f(): print "hello"
+ >>> res, out, err = py.io.StdCapture.call(f)
+ >>> out.strip() == "hello"
+ True
+
+``py.io.StdCaptureFD``
+---------------------------
+
+If you also want to capture writes to the stdout/stderr
+filedescriptors you may invoke::
+
+ >>> import py, sys
+ >>> capture = py.io.StdCaptureFD(out=False, in_=False)
+ >>> sys.stderr.write("world")
+ >>> out,err = capture.reset()
+ >>> err
+ 'world'
+
+py.io object reference
+============================
+
+.. autoclass:: py.io.StdCaptureFD
+ :members:
+ :inherited-members:
+
+.. autoclass:: py.io.StdCapture
+ :members:
+ :inherited-members:
+
+.. autoclass:: py.io.TerminalWriter
+ :members:
+ :inherited-members:
+
diff --git a/doc/links.inc b/doc/links.inc
index 9bcfe5c..3e1d5b8 100644
--- a/doc/links.inc
+++ b/doc/links.inc
@@ -1,16 +1,16 @@
-
-.. _`skipping plugin`: plugin/skipping.html
-.. _`funcargs mechanism`: funcargs.html
-.. _`doctest.py`: http://docs.python.org/library/doctest.html
-.. _`xUnit style setup`: xunit_setup.html
-.. _`pytest_nose`: plugin/nose.html
-.. _`reStructured Text`: http://docutils.sourceforge.net
-.. _`Python debugger`: http://docs.python.org/lib/module-pdb.html
-.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
-.. _pytest: http://pypi.python.org/pypi/pytest
-.. _mercurial: http://mercurial.selenic.com/wiki/
-.. _`setuptools`: http://pypi.python.org/pypi/setuptools
-.. _`distribute`: http://pypi.python.org/pypi/distribute
-.. _`pip`: http://pypi.python.org/pypi/pip
-.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
-.. _hudson: http://hudson-ci.org/
+
+.. _`skipping plugin`: plugin/skipping.html
+.. _`funcargs mechanism`: funcargs.html
+.. _`doctest.py`: http://docs.python.org/library/doctest.html
+.. _`xUnit style setup`: xunit_setup.html
+.. _`pytest_nose`: plugin/nose.html
+.. _`reStructured Text`: http://docutils.sourceforge.net
+.. _`Python debugger`: http://docs.python.org/lib/module-pdb.html
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
+.. _pytest: http://pypi.python.org/pypi/pytest
+.. _mercurial: http://mercurial.selenic.com/wiki/
+.. _`setuptools`: http://pypi.python.org/pypi/setuptools
+.. _`distribute`: http://pypi.python.org/pypi/distribute
+.. _`pip`: http://pypi.python.org/pypi/pip
+.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
+.. _hudson: http://hudson-ci.org/
diff --git a/doc/log.txt b/doc/log.txt
index ca60fca..7727d75 100644
--- a/doc/log.txt
+++ b/doc/log.txt
@@ -1,208 +1,208 @@
-.. role:: code(literal)
-.. role:: file(literal)
-
-.. XXX figure out how the code literals should be dealt with in sphinx. There is probably something builtin.
-
-========================================
-py.log documentation and musings
-========================================
-
-
-Foreword
-========
-
-This document is an attempt to briefly state the actual specification of the
-:code:`py.log` module. It was written by Francois Pinard and also contains
-some ideas for enhancing the py.log facilities.
-
-NOTE that :code:`py.log` is subject to refactorings, it may change with
-the next release.
-
-This document is meant to trigger or facilitate discussions. It shamelessly
-steals from the `Agile Testing`__ comments, and from other sources as well,
-without really trying to sort them out.
-
-__ http://agiletesting.blogspot.com/2005/06/keyword-based-logging-with-py-library.html
-
-
-Logging organisation
-====================
-
-The :code:`py.log` module aims a niche comparable to the one of the
-`logging module`__ found within the standard Python distributions, yet
-with much simpler paradigms for configuration and usage.
-
-__ http://www.python.org/doc/2.4.2/lib/module-logging.html
-
-Holger Krekel, the main :code:`py` library developer, introduced
-the idea of keyword-based logging and the idea of logging *producers* and
-*consumers*. A log producer is an object used by the application code
-to send messages to various log consumers. When you create a log
-producer, you define a set of keywords that are then used to both route
-the logging messages to consumers, and to prefix those messages.
-
-In fact, each log producer has a few keywords associated with it for
-identification purposes. These keywords form a tuple of strings, and
-may be used to later retrieve a particular log producer.
-
-A log producer may (or may not) be associated with a log consumer, meant
-to handle log messages in particular ways. The log consumers can be
-``STDOUT``, ``STDERR``, log files, syslog, the Windows Event Log, user
-defined functions, etc. (Yet, logging to syslog or to the Windows Event
-Log is only future plans for now). A log producer has never more than
-one consumer at a given time, but it is possible to dynamically switch
-a producer to use another consumer. On the other hand, a single log
-consumer may be associated with many producers.
-
-Note that creating and associating a producer and a consumer is done
-automatically when not otherwise overriden, so using :code:`py` logging
-is quite comfortable even in the smallest programs. More typically,
-the application programmer will likely design a hierarchy of producers,
-and will select keywords appropriately for marking the hierarchy tree.
-If a node of the hierarchical tree of producers has to be divided in
-sub-trees, all producers in the sub-trees share, as a common prefix, the
-keywords of the node being divided. In other words, we go further down
-in the hierarchy of producers merely by adding keywords.
-
-Using the py.log library
-================================
-
-To use the :code:`py.log` library, the user must import it into a Python
-application, create at least one log producer and one log consumer, have
-producers and consumers associated, and finally call the log producers
-as needed, giving them log messages.
-
-Importing
----------
-
-Once the :code:`py` library is installed on your system, a mere::
-
- import py
-
-holds enough magic for lazily importing the various facilities of the
-:code:`py` library when they are first needed. This is really how
-:code:`py.log` is made available to the application. For example, after
-the above ``import py``, one may directly write ``py.log.Producer(...)``
-and everything should work fine, the user does not have to worry about
-specifically importing more modules.
-
-Creating a producer
--------------------
-
-There are three ways for creating a log producer instance:
-
- + As soon as ``py.log`` is first evaluated within an application
- program, a default log producer is created, and made available under
- the name ``py.log.default``. The keyword ``default`` is associated
- with that producer.
-
- + The ``py.log.Producer()`` constructor may be explicitly called
- for creating a new instance of a log producer. That constructor
- accepts, as an argument, the keywords that should be associated with
- that producer. Keywords may be given either as a tuple of keyword
- strings, or as a single space-separated string of keywords.
-
- + Whenever an attribute is *taken* out of a log producer instance,
- for the first time that attribute is taken, a new log producer is
- created. The keywords associated with that new producer are those
- of the initial producer instance, to which is appended the name of
- the attribute being taken.
-
-The last point is especially useful, as it allows using log producers
-without further declarations, merely creating them *on-the-fly*.
-
-Creating a consumer
--------------------
-
-There are many ways for creating or denoting a log consumer:
-
- + A default consumer exists within the ``py.log`` facilities, which
- has the effect of writing log messages on the Python standard output
- stream. That consumer is associated at the very top of the producer
- hierarchy, and as such, is called whenever no other consumer is
- found.
-
- + The notation ``py.log.STDOUT`` accesses a log consumer which writes
- log messages on the Python standard output stream.
-
- + The notation ``py.log.STDERR`` accesses a log consumer which writes
- log messages on the Python standard error stream.
-
- + The ``py.log.File()`` constructor accepts, as argument, either a file
- already opened in write mode or any similar file-like object, and
- creates a log consumer able to write log messages onto that file.
-
- + The ``py.log.Path()`` constructor accepts a file name for its first
- argument, and creates a log consumer able to write log messages into
- that file. The constructor call accepts a few keyword parameters:
-
- + ``append``, which is ``False`` by default, may be used for
- opening the file in append mode instead of write mode.
-
- + ``delayed_create``, which is ``False`` by default, maybe be used
- for opening the file at the latest possible time. Consequently,
- the file will not be created if it did not exist, and no actual
- log message gets written to it.
-
- + ``buffering``, which is 1 by default, is used when opening the
- file. Buffering can be turned off by specifying a 0 value. The
- buffer size may also be selected through this argument.
-
- + Any user defined function may be used for a log consumer. Such a
- function should accept a single argument, which is the message to
- write, and do whatever is deemed appropriate by the programmer.
- When the need arises, this may be an especially useful and flexible
- feature.
-
- + The special value ``None`` means no consumer at all. This acts just
- like if there was a consumer which would silently discard all log
- messages sent to it.
-
-Associating producers and consumers
------------------------------------
-
-Each log producer may have at most one log consumer associated with
-it. A log producer gets associated with a log consumer through a
-``py.log.setconsumer()`` call. That function accepts two arguments,
-the first identifying a producer (a tuple of keyword strings or a single
-space-separated string of keywords), the second specifying the precise
-consumer to use for that producer. Until this function is called for a
-producer, that producer does not have any explicit consumer associated
-with it.
-
-Now, the hierarchy of log producers establishes which consumer gets used
-whenever a producer has no explicit consumer. When a log producer
-has no consumer explicitly associated with it, it dynamically and
-recursively inherits the consumer of its parent node, that is, that node
-being a bit closer to the root of the hierarchy. In other words, the
-rightmost keywords of that producer are dropped until another producer
-is found which has an explicit consumer. A nice side-effect is that,
-by explicitly associating a consumer with a producer, all consumer-less
-producers which appear under that producer, in the hierarchy tree,
-automatically *inherits* that consumer.
-
-Writing log messages
---------------------
-
-All log producer instances are also functions, and this is by calling
-them that log messages are generated. Each call to a producer object
-produces the text for one log entry, which in turn, is sent to the log
-consumer for that producer.
-
-The log entry displays, after a prefix identifying the log producer
-being used, all arguments given in the call, converted to strings and
-space-separated. (This is meant by design to be fairly similar to what
-the ``print`` statement does in Python). The prefix itself is made up
-of a colon-separated list of keywords associated with the producer, the
-whole being set within square brackets.
-
-Note that the consumer is responsible for adding the newline at the end
-of the log entry. That final newline is not part of the text for the
-log entry.
-
-.. Other details
-.. -------------
-.. XXX: fill in details
-.. + Should speak about pickle-ability of :code:`py.log`.
-..
-.. + What is :code:`log.get` (in :file:`logger.py`)?
+.. role:: code(literal)
+.. role:: file(literal)
+
+.. XXX figure out how the code literals should be dealt with in sphinx. There is probably something builtin.
+
+========================================
+py.log documentation and musings
+========================================
+
+
+Foreword
+========
+
+This document is an attempt to briefly state the actual specification of the
+:code:`py.log` module. It was written by Francois Pinard and also contains
+some ideas for enhancing the py.log facilities.
+
+NOTE that :code:`py.log` is subject to refactorings, it may change with
+the next release.
+
+This document is meant to trigger or facilitate discussions. It shamelessly
+steals from the `Agile Testing`__ comments, and from other sources as well,
+without really trying to sort them out.
+
+__ http://agiletesting.blogspot.com/2005/06/keyword-based-logging-with-py-library.html
+
+
+Logging organisation
+====================
+
+The :code:`py.log` module aims a niche comparable to the one of the
+`logging module`__ found within the standard Python distributions, yet
+with much simpler paradigms for configuration and usage.
+
+__ http://www.python.org/doc/2.4.2/lib/module-logging.html
+
+Holger Krekel, the main :code:`py` library developer, introduced
+the idea of keyword-based logging and the idea of logging *producers* and
+*consumers*. A log producer is an object used by the application code
+to send messages to various log consumers. When you create a log
+producer, you define a set of keywords that are then used to both route
+the logging messages to consumers, and to prefix those messages.
+
+In fact, each log producer has a few keywords associated with it for
+identification purposes. These keywords form a tuple of strings, and
+may be used to later retrieve a particular log producer.
+
+A log producer may (or may not) be associated with a log consumer, meant
+to handle log messages in particular ways. The log consumers can be
+``STDOUT``, ``STDERR``, log files, syslog, the Windows Event Log, user
+defined functions, etc. (Yet, logging to syslog or to the Windows Event
+Log is only future plans for now). A log producer has never more than
+one consumer at a given time, but it is possible to dynamically switch
+a producer to use another consumer. On the other hand, a single log
+consumer may be associated with many producers.
+
+Note that creating and associating a producer and a consumer is done
+automatically when not otherwise overriden, so using :code:`py` logging
+is quite comfortable even in the smallest programs. More typically,
+the application programmer will likely design a hierarchy of producers,
+and will select keywords appropriately for marking the hierarchy tree.
+If a node of the hierarchical tree of producers has to be divided in
+sub-trees, all producers in the sub-trees share, as a common prefix, the
+keywords of the node being divided. In other words, we go further down
+in the hierarchy of producers merely by adding keywords.
+
+Using the py.log library
+================================
+
+To use the :code:`py.log` library, the user must import it into a Python
+application, create at least one log producer and one log consumer, have
+producers and consumers associated, and finally call the log producers
+as needed, giving them log messages.
+
+Importing
+---------
+
+Once the :code:`py` library is installed on your system, a mere::
+
+ import py
+
+holds enough magic for lazily importing the various facilities of the
+:code:`py` library when they are first needed. This is really how
+:code:`py.log` is made available to the application. For example, after
+the above ``import py``, one may directly write ``py.log.Producer(...)``
+and everything should work fine, the user does not have to worry about
+specifically importing more modules.
+
+Creating a producer
+-------------------
+
+There are three ways for creating a log producer instance:
+
+ + As soon as ``py.log`` is first evaluated within an application
+ program, a default log producer is created, and made available under
+ the name ``py.log.default``. The keyword ``default`` is associated
+ with that producer.
+
+ + The ``py.log.Producer()`` constructor may be explicitly called
+ for creating a new instance of a log producer. That constructor
+ accepts, as an argument, the keywords that should be associated with
+ that producer. Keywords may be given either as a tuple of keyword
+ strings, or as a single space-separated string of keywords.
+
+ + Whenever an attribute is *taken* out of a log producer instance,
+ for the first time that attribute is taken, a new log producer is
+ created. The keywords associated with that new producer are those
+ of the initial producer instance, to which is appended the name of
+ the attribute being taken.
+
+The last point is especially useful, as it allows using log producers
+without further declarations, merely creating them *on-the-fly*.
+
+Creating a consumer
+-------------------
+
+There are many ways for creating or denoting a log consumer:
+
+ + A default consumer exists within the ``py.log`` facilities, which
+ has the effect of writing log messages on the Python standard output
+ stream. That consumer is associated at the very top of the producer
+ hierarchy, and as such, is called whenever no other consumer is
+ found.
+
+ + The notation ``py.log.STDOUT`` accesses a log consumer which writes
+ log messages on the Python standard output stream.
+
+ + The notation ``py.log.STDERR`` accesses a log consumer which writes
+ log messages on the Python standard error stream.
+
+ + The ``py.log.File()`` constructor accepts, as argument, either a file
+ already opened in write mode or any similar file-like object, and
+ creates a log consumer able to write log messages onto that file.
+
+ + The ``py.log.Path()`` constructor accepts a file name for its first
+ argument, and creates a log consumer able to write log messages into
+ that file. The constructor call accepts a few keyword parameters:
+
+ + ``append``, which is ``False`` by default, may be used for
+ opening the file in append mode instead of write mode.
+
+ + ``delayed_create``, which is ``False`` by default, maybe be used
+ for opening the file at the latest possible time. Consequently,
+ the file will not be created if it did not exist, and no actual
+ log message gets written to it.
+
+ + ``buffering``, which is 1 by default, is used when opening the
+ file. Buffering can be turned off by specifying a 0 value. The
+ buffer size may also be selected through this argument.
+
+ + Any user defined function may be used for a log consumer. Such a
+ function should accept a single argument, which is the message to
+ write, and do whatever is deemed appropriate by the programmer.
+ When the need arises, this may be an especially useful and flexible
+ feature.
+
+ + The special value ``None`` means no consumer at all. This acts just
+ like if there was a consumer which would silently discard all log
+ messages sent to it.
+
+Associating producers and consumers
+-----------------------------------
+
+Each log producer may have at most one log consumer associated with
+it. A log producer gets associated with a log consumer through a
+``py.log.setconsumer()`` call. That function accepts two arguments,
+the first identifying a producer (a tuple of keyword strings or a single
+space-separated string of keywords), the second specifying the precise
+consumer to use for that producer. Until this function is called for a
+producer, that producer does not have any explicit consumer associated
+with it.
+
+Now, the hierarchy of log producers establishes which consumer gets used
+whenever a producer has no explicit consumer. When a log producer
+has no consumer explicitly associated with it, it dynamically and
+recursively inherits the consumer of its parent node, that is, that node
+being a bit closer to the root of the hierarchy. In other words, the
+rightmost keywords of that producer are dropped until another producer
+is found which has an explicit consumer. A nice side-effect is that,
+by explicitly associating a consumer with a producer, all consumer-less
+producers which appear under that producer, in the hierarchy tree,
+automatically *inherits* that consumer.
+
+Writing log messages
+--------------------
+
+All log producer instances are also functions, and this is by calling
+them that log messages are generated. Each call to a producer object
+produces the text for one log entry, which in turn, is sent to the log
+consumer for that producer.
+
+The log entry displays, after a prefix identifying the log producer
+being used, all arguments given in the call, converted to strings and
+space-separated. (This is meant by design to be fairly similar to what
+the ``print`` statement does in Python). The prefix itself is made up
+of a colon-separated list of keywords associated with the producer, the
+whole being set within square brackets.
+
+Note that the consumer is responsible for adding the newline at the end
+of the log entry. That final newline is not part of the text for the
+log entry.
+
+.. Other details
+.. -------------
+.. XXX: fill in details
+.. + Should speak about pickle-ability of :code:`py.log`.
+..
+.. + What is :code:`log.get` (in :file:`logger.py`)?
diff --git a/doc/misc.txt b/doc/misc.txt
index 8c3c0b3..dec918b 100644
--- a/doc/misc.txt
+++ b/doc/misc.txt
@@ -1,93 +1,93 @@
-====================================
-Miscellaneous features of the py lib
-====================================
-
-Mapping the standard python library into py
-===========================================
-
-The ``py.std`` object allows lazy access to
-standard library modules. For example, to get to the print-exception
-functionality of the standard library you can write::
-
- py.std.traceback.print_exc()
-
-without having to do anything else than the usual ``import py``
-at the beginning. You can access any other top-level standard
-library module this way. This means that you will only trigger
-imports of modules that are actually needed. Note that no attempt
-is made to import submodules.
-
-Support for interaction with system utilities/binaries
-======================================================
-
-Currently, the py lib offers two ways to interact with
-system executables. ``py.process.cmdexec()`` invokes
-the shell in order to execute a string. The other
-one, ``py.path.local``'s 'sysexec()' method lets you
-directly execute a binary.
-
-Both approaches will raise an exception in case of a return-
-code other than 0 and otherwise return the stdout-output
-of the child process.
-
-The shell based approach
-------------------------
-
-You can execute a command via your system shell
-by doing something like::
-
- out = py.process.cmdexec('ls -v')
-
-However, the ``cmdexec`` approach has a few shortcomings:
-
-- it relies on the underlying system shell
-- it neccessitates shell-escaping for expressing arguments
-- it does not easily allow to "fix" the binary you want to run.
-- it only allows to execute executables from the local
- filesystem
-
-.. _sysexec:
-
-local paths have ``sysexec``
-----------------------------
-
-In order to synchronously execute an executable file you
-can use ``sysexec``::
-
- binsvn.sysexec('ls', 'http://codespeak.net/svn')
-
-where ``binsvn`` is a path that points to the ``svn`` commandline
-binary. Note that this function does not offer any shell-escaping
-so you have to pass in already separated arguments.
-
-finding an executable local path
---------------------------------
-
-Finding an executable is quite different on multiple platforms.
-Currently, the ``PATH`` environment variable based search on
-unix platforms is supported::
-
- py.path.local.sysfind('svn')
-
-which returns the first path whose ``basename`` matches ``svn``.
-In principle, `sysfind` deploys platform specific algorithms
-to perform the search. On Windows, for example, it may look
-at the registry (XXX).
-
-To make the story complete, we allow to pass in a second ``checker``
-argument that is called for each found executable. For example, if
-you have multiple binaries available you may want to select the
-right version::
-
- def mysvn(p):
- """ check that the given svn binary has version 1.1. """
- line = p.execute('--version'').readlines()[0]
- if line.find('version 1.1'):
- return p
- binsvn = py.path.local.sysfind('svn', checker=mysvn)
-
-
-Cross-Python Version compatibility helpers
-=============================================
-
-The ``py.builtin`` namespace provides a number of helpers that help to write python code compatible across Python interpreters, mainly Python2 and Python3. Type ``help(py.builtin)`` on a Python prompt for a the selection of builtins.
+====================================
+Miscellaneous features of the py lib
+====================================
+
+Mapping the standard python library into py
+===========================================
+
+The ``py.std`` object allows lazy access to
+standard library modules. For example, to get to the print-exception
+functionality of the standard library you can write::
+
+ py.std.traceback.print_exc()
+
+without having to do anything else than the usual ``import py``
+at the beginning. You can access any other top-level standard
+library module this way. This means that you will only trigger
+imports of modules that are actually needed. Note that no attempt
+is made to import submodules.
+
+Support for interaction with system utilities/binaries
+======================================================
+
+Currently, the py lib offers two ways to interact with
+system executables. ``py.process.cmdexec()`` invokes
+the shell in order to execute a string. The other
+one, ``py.path.local``'s 'sysexec()' method lets you
+directly execute a binary.
+
+Both approaches will raise an exception in case of a return-
+code other than 0 and otherwise return the stdout-output
+of the child process.
+
+The shell based approach
+------------------------
+
+You can execute a command via your system shell
+by doing something like::
+
+ out = py.process.cmdexec('ls -v')
+
+However, the ``cmdexec`` approach has a few shortcomings:
+
+- it relies on the underlying system shell
+- it neccessitates shell-escaping for expressing arguments
+- it does not easily allow to "fix" the binary you want to run.
+- it only allows to execute executables from the local
+ filesystem
+
+.. _sysexec:
+
+local paths have ``sysexec``
+----------------------------
+
+In order to synchronously execute an executable file you
+can use ``sysexec``::
+
+ binsvn.sysexec('ls', 'http://codespeak.net/svn')
+
+where ``binsvn`` is a path that points to the ``svn`` commandline
+binary. Note that this function does not offer any shell-escaping
+so you have to pass in already separated arguments.
+
+finding an executable local path
+--------------------------------
+
+Finding an executable is quite different on multiple platforms.
+Currently, the ``PATH`` environment variable based search on
+unix platforms is supported::
+
+ py.path.local.sysfind('svn')
+
+which returns the first path whose ``basename`` matches ``svn``.
+In principle, `sysfind` deploys platform specific algorithms
+to perform the search. On Windows, for example, it may look
+at the registry (XXX).
+
+To make the story complete, we allow to pass in a second ``checker``
+argument that is called for each found executable. For example, if
+you have multiple binaries available you may want to select the
+right version::
+
+ def mysvn(p):
+ """ check that the given svn binary has version 1.1. """
+ line = p.execute('--version'').readlines()[0]
+ if line.find('version 1.1'):
+ return p
+ binsvn = py.path.local.sysfind('svn', checker=mysvn)
+
+
+Cross-Python Version compatibility helpers
+=============================================
+
+The ``py.builtin`` namespace provides a number of helpers that help to write python code compatible across Python interpreters, mainly Python2 and Python3. Type ``help(py.builtin)`` on a Python prompt for a the selection of builtins.
diff --git a/doc/path.txt b/doc/path.txt
index 837c1d1..1e45d78 100644
--- a/doc/path.txt
+++ b/doc/path.txt
@@ -1,260 +1,260 @@
-=======
-py.path
-=======
-
-The 'py' lib provides a uniform high-level api to deal with filesystems
-and filesystem-like interfaces: ``py.path``. It aims to offer a central
-object to fs-like object trees (reading from and writing to files, adding
-files/directories, examining the types and structure, etc.), and out-of-the-box
-provides a number of implementations of this API.
-
-py.path.local - local file system path
-===============================================
-
-.. _`local`:
-
-basic interactive example
--------------------------------------
-
-The first and most obvious of the implementations is a wrapper around a local
-filesystem. It's just a bit nicer in usage than the regular Python APIs, and
-of course all the functionality is bundled together rather than spread over a
-number of modules.
-
-Example usage, here we use the ``py.test.ensuretemp()`` function to create
-a ``py.path.local`` object for us (which wraps a directory):
-
-.. sourcecode:: pycon
-
- >>> import py
- >>> temppath = py.test.ensuretemp('py.path_documentation')
- >>> foopath = temppath.join('foo') # get child 'foo' (lazily)
- >>> foopath.check() # check if child 'foo' exists
- False
- >>> foopath.write('bar') # write some data to it
- >>> foopath.check()
- True
- >>> foopath.read()
- 'bar'
- >>> foofile = foopath.open() # return a 'real' file object
- >>> foofile.read(1)
- 'b'
-
-reference documentation
----------------------------------
-
-.. autoclass:: py._path.local.LocalPath
- :members:
- :inherited-members:
-
-``py.path.svnurl`` and ``py.path.svnwc``
-==================================================
-
-Two other ``py.path`` implementations that the py lib provides wrap the
-popular `Subversion`_ revision control system: the first (called 'svnurl')
-by interfacing with a remote server, the second by wrapping a local checkout.
-Both allow you to access relatively advanced features such as metadata and
-versioning, and both in a way more user-friendly manner than existing other
-solutions.
-
-Some example usage of ``py.path.svnurl``:
-
-.. sourcecode:: pycon
-
- .. >>> import py
- .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
- >>> url = py.path.svnurl('http://codespeak.net/svn/py')
- >>> info = url.info()
- >>> info.kind
- 'dir'
- >>> firstentry = url.log()[-1]
- >>> import time
- >>> time.strftime('%Y-%m-%d', time.gmtime(firstentry.date))
- '2004-10-02'
-
-Example usage of ``py.path.svnwc``:
-
-.. sourcecode:: pycon
-
- .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
- >>> temp = py.test.ensuretemp('py.path_documentation')
- >>> wc = py.path.svnwc(temp.join('svnwc'))
- >>> wc.checkout('http://codespeak.net/svn/py/dist/py/path/local')
- >>> wc.join('local.py').check()
- True
-
-.. _`Subversion`: http://subversion.tigris.org/
-
-svn path related API reference
------------------------------------------
-
-.. autoclass:: py._path.svnwc.SvnWCCommandPath
- :members:
- :inherited-members:
-
-.. autoclass:: py._path.svnurl.SvnCommandPath
- :members:
- :inherited-members:
-
-.. autoclass:: py._path.svnwc.SvnAuth
- :members:
- :inherited-members:
-
-Common vs. specific API, Examples
-========================================
-
-All Path objects support a common set of operations, suitable
-for many use cases and allowing to transparently switch the
-path object within an application (e.g. from "local" to "svnwc").
-The common set includes functions such as `path.read()` to read all data
-from a file, `path.write()` to write data, `path.listdir()` to get a list
-of directory entries, `path.check()` to check if a node exists
-and is of a particular type, `path.join()` to get
-to a (grand)child, `path.visit()` to recursively walk through a node's
-children, etc. Only things that are not common on 'normal' filesystems (yet),
-such as handling metadata (e.g. the Subversion "properties") require
-using specific APIs.
-
-A quick 'cookbook' of small examples that will be useful 'in real life',
-which also presents parts of the 'common' API, and shows some non-common
-methods:
-
-Searching `.txt` files
---------------------------------
-
-Search for a particular string inside all files with a .txt extension in a
-specific directory.
-
-.. sourcecode:: pycon
-
- >>> dirpath = temppath.ensure('testdir', dir=True)
- >>> dirpath.join('textfile1.txt').write('foo bar baz')
- >>> dirpath.join('textfile2.txt').write('frob bar spam eggs')
- >>> subdir = dirpath.ensure('subdir', dir=True)
- >>> subdir.join('textfile1.txt').write('foo baz')
- >>> subdir.join('textfile2.txt').write('spam eggs spam foo bar spam')
- >>> results = []
- >>> for fpath in dirpath.visit('*.txt'):
- ... if 'bar' in fpath.read():
- ... results.append(fpath.basename)
- >>> results.sort()
- >>> results
- ['textfile1.txt', 'textfile2.txt', 'textfile2.txt']
-
-Working with Paths
-----------------------------
-
-This example shows the ``py.path`` features to deal with
-filesystem paths Note that the filesystem is never touched,
-all operations are performed on a string level (so the paths
-don't have to exist, either):
-
-.. sourcecode:: pycon
-
- >>> p1 = py.path.local('/foo/bar')
- >>> p2 = p1.join('baz/qux')
- >>> p2 == py.path.local('/foo/bar/baz/qux')
- True
- >>> sep = py.path.local.sep
- >>> p2.relto(p1).replace(sep, '/') # os-specific path sep in the string
- 'baz/qux'
- >>> p2.bestrelpath(p1).replace(sep, '/')
- '../..'
- >>> p2.join(p2.bestrelpath(p1)) == p1
- True
- >>> p3 = p1 / 'baz/qux' # the / operator allows joining, too
- >>> p2 == p3
- True
- >>> p4 = p1 + ".py"
- >>> p4.basename == "bar.py"
- True
- >>> p4.ext == ".py"
- True
- >>> p4.purebasename == "bar"
- True
-
-This should be possible on every implementation of ``py.path``, so
-regardless of whether the implementation wraps a UNIX filesystem, a Windows
-one, or a database or object tree, these functions should be available (each
-with their own notion of path seperators and dealing with conversions, etc.).
-
-Checking path types
--------------------------------
-
-Now we will show a bit about the powerful 'check()' method on paths, which
-allows you to check whether a file exists, what type it is, etc.:
-
-.. sourcecode:: pycon
-
- >>> file1 = temppath.join('file1')
- >>> file1.check() # does it exist?
- False
- >>> file1 = file1.ensure(file=True) # 'touch' the file
- >>> file1.check()
- True
- >>> file1.check(dir=True) # is it a dir?
- False
- >>> file1.check(file=True) # or a file?
- True
- >>> file1.check(ext='.txt') # check the extension
- False
- >>> textfile = temppath.ensure('text.txt', file=True)
- >>> textfile.check(ext='.txt')
- True
- >>> file1.check(basename='file1') # we can use all the path's properties here
- True
-
-Setting svn-properties
---------------------------------
-
-As an example of 'uncommon' methods, we'll show how to read and write
-properties in an ``py.path.svnwc`` instance:
-
-.. sourcecode:: pycon
-
- .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
- >>> wc.propget('foo')
- ''
- >>> wc.propset('foo', 'bar')
- >>> wc.propget('foo')
- 'bar'
- >>> len(wc.status().prop_modified) # our own props
- 1
- >>> msg = wc.revert() # roll back our changes
- >>> len(wc.status().prop_modified)
- 0
-
-SVN authentication
-----------------------------
-
-Some uncommon functionality can also be provided as extensions, such as SVN
-authentication:
-
-.. sourcecode:: pycon
-
- .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
- >>> auth = py.path.SvnAuth('anonymous', 'user', cache_auth=False,
- ... interactive=False)
- >>> wc.auth = auth
- >>> wc.update() # this should work
- >>> path = wc.ensure('thisshouldnotexist.txt')
- >>> try:
- ... path.commit('testing')
- ... except py.process.cmdexec.Error, e:
- ... pass
- >>> 'authorization failed' in str(e)
- True
-
-Known problems / limitations
-===================================
-
-* The SVN path objects require the "svn" command line,
- there is currently no support for python bindings.
- Parsing the svn output can lead to problems, particularly
- regarding if you have a non-english "locales" setting.
-
-* While the path objects basically work on windows,
- there is no attention yet on making unicode paths
- work or deal with the famous "8.3" filename issues.
-
-
+=======
+py.path
+=======
+
+The 'py' lib provides a uniform high-level api to deal with filesystems
+and filesystem-like interfaces: ``py.path``. It aims to offer a central
+object to fs-like object trees (reading from and writing to files, adding
+files/directories, examining the types and structure, etc.), and out-of-the-box
+provides a number of implementations of this API.
+
+py.path.local - local file system path
+===============================================
+
+.. _`local`:
+
+basic interactive example
+-------------------------------------
+
+The first and most obvious of the implementations is a wrapper around a local
+filesystem. It's just a bit nicer in usage than the regular Python APIs, and
+of course all the functionality is bundled together rather than spread over a
+number of modules.
+
+Example usage, here we use the ``py.test.ensuretemp()`` function to create
+a ``py.path.local`` object for us (which wraps a directory):
+
+.. sourcecode:: pycon
+
+ >>> import py
+ >>> temppath = py.test.ensuretemp('py.path_documentation')
+ >>> foopath = temppath.join('foo') # get child 'foo' (lazily)
+ >>> foopath.check() # check if child 'foo' exists
+ False
+ >>> foopath.write('bar') # write some data to it
+ >>> foopath.check()
+ True
+ >>> foopath.read()
+ 'bar'
+ >>> foofile = foopath.open() # return a 'real' file object
+ >>> foofile.read(1)
+ 'b'
+
+reference documentation
+---------------------------------
+
+.. autoclass:: py._path.local.LocalPath
+ :members:
+ :inherited-members:
+
+``py.path.svnurl`` and ``py.path.svnwc``
+==================================================
+
+Two other ``py.path`` implementations that the py lib provides wrap the
+popular `Subversion`_ revision control system: the first (called 'svnurl')
+by interfacing with a remote server, the second by wrapping a local checkout.
+Both allow you to access relatively advanced features such as metadata and
+versioning, and both in a way more user-friendly manner than existing other
+solutions.
+
+Some example usage of ``py.path.svnurl``:
+
+.. sourcecode:: pycon
+
+ .. >>> import py
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> url = py.path.svnurl('http://codespeak.net/svn/py')
+ >>> info = url.info()
+ >>> info.kind
+ 'dir'
+ >>> firstentry = url.log()[-1]
+ >>> import time
+ >>> time.strftime('%Y-%m-%d', time.gmtime(firstentry.date))
+ '2004-10-02'
+
+Example usage of ``py.path.svnwc``:
+
+.. sourcecode:: pycon
+
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> temp = py.test.ensuretemp('py.path_documentation')
+ >>> wc = py.path.svnwc(temp.join('svnwc'))
+ >>> wc.checkout('http://codespeak.net/svn/py/dist/py/path/local')
+ >>> wc.join('local.py').check()
+ True
+
+.. _`Subversion`: http://subversion.tigris.org/
+
+svn path related API reference
+-----------------------------------------
+
+.. autoclass:: py._path.svnwc.SvnWCCommandPath
+ :members:
+ :inherited-members:
+
+.. autoclass:: py._path.svnurl.SvnCommandPath
+ :members:
+ :inherited-members:
+
+.. autoclass:: py._path.svnwc.SvnAuth
+ :members:
+ :inherited-members:
+
+Common vs. specific API, Examples
+========================================
+
+All Path objects support a common set of operations, suitable
+for many use cases and allowing to transparently switch the
+path object within an application (e.g. from "local" to "svnwc").
+The common set includes functions such as `path.read()` to read all data
+from a file, `path.write()` to write data, `path.listdir()` to get a list
+of directory entries, `path.check()` to check if a node exists
+and is of a particular type, `path.join()` to get
+to a (grand)child, `path.visit()` to recursively walk through a node's
+children, etc. Only things that are not common on 'normal' filesystems (yet),
+such as handling metadata (e.g. the Subversion "properties") require
+using specific APIs.
+
+A quick 'cookbook' of small examples that will be useful 'in real life',
+which also presents parts of the 'common' API, and shows some non-common
+methods:
+
+Searching `.txt` files
+--------------------------------
+
+Search for a particular string inside all files with a .txt extension in a
+specific directory.
+
+.. sourcecode:: pycon
+
+ >>> dirpath = temppath.ensure('testdir', dir=True)
+ >>> dirpath.join('textfile1.txt').write('foo bar baz')
+ >>> dirpath.join('textfile2.txt').write('frob bar spam eggs')
+ >>> subdir = dirpath.ensure('subdir', dir=True)
+ >>> subdir.join('textfile1.txt').write('foo baz')
+ >>> subdir.join('textfile2.txt').write('spam eggs spam foo bar spam')
+ >>> results = []
+ >>> for fpath in dirpath.visit('*.txt'):
+ ... if 'bar' in fpath.read():
+ ... results.append(fpath.basename)
+ >>> results.sort()
+ >>> results
+ ['textfile1.txt', 'textfile2.txt', 'textfile2.txt']
+
+Working with Paths
+----------------------------
+
+This example shows the ``py.path`` features to deal with
+filesystem paths Note that the filesystem is never touched,
+all operations are performed on a string level (so the paths
+don't have to exist, either):
+
+.. sourcecode:: pycon
+
+ >>> p1 = py.path.local('/foo/bar')
+ >>> p2 = p1.join('baz/qux')
+ >>> p2 == py.path.local('/foo/bar/baz/qux')
+ True
+ >>> sep = py.path.local.sep
+ >>> p2.relto(p1).replace(sep, '/') # os-specific path sep in the string
+ 'baz/qux'
+ >>> p2.bestrelpath(p1).replace(sep, '/')
+ '../..'
+ >>> p2.join(p2.bestrelpath(p1)) == p1
+ True
+ >>> p3 = p1 / 'baz/qux' # the / operator allows joining, too
+ >>> p2 == p3
+ True
+ >>> p4 = p1 + ".py"
+ >>> p4.basename == "bar.py"
+ True
+ >>> p4.ext == ".py"
+ True
+ >>> p4.purebasename == "bar"
+ True
+
+This should be possible on every implementation of ``py.path``, so
+regardless of whether the implementation wraps a UNIX filesystem, a Windows
+one, or a database or object tree, these functions should be available (each
+with their own notion of path seperators and dealing with conversions, etc.).
+
+Checking path types
+-------------------------------
+
+Now we will show a bit about the powerful 'check()' method on paths, which
+allows you to check whether a file exists, what type it is, etc.:
+
+.. sourcecode:: pycon
+
+ >>> file1 = temppath.join('file1')
+ >>> file1.check() # does it exist?
+ False
+ >>> file1 = file1.ensure(file=True) # 'touch' the file
+ >>> file1.check()
+ True
+ >>> file1.check(dir=True) # is it a dir?
+ False
+ >>> file1.check(file=True) # or a file?
+ True
+ >>> file1.check(ext='.txt') # check the extension
+ False
+ >>> textfile = temppath.ensure('text.txt', file=True)
+ >>> textfile.check(ext='.txt')
+ True
+ >>> file1.check(basename='file1') # we can use all the path's properties here
+ True
+
+Setting svn-properties
+--------------------------------
+
+As an example of 'uncommon' methods, we'll show how to read and write
+properties in an ``py.path.svnwc`` instance:
+
+.. sourcecode:: pycon
+
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> wc.propget('foo')
+ ''
+ >>> wc.propset('foo', 'bar')
+ >>> wc.propget('foo')
+ 'bar'
+ >>> len(wc.status().prop_modified) # our own props
+ 1
+ >>> msg = wc.revert() # roll back our changes
+ >>> len(wc.status().prop_modified)
+ 0
+
+SVN authentication
+----------------------------
+
+Some uncommon functionality can also be provided as extensions, such as SVN
+authentication:
+
+.. sourcecode:: pycon
+
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> auth = py.path.SvnAuth('anonymous', 'user', cache_auth=False,
+ ... interactive=False)
+ >>> wc.auth = auth
+ >>> wc.update() # this should work
+ >>> path = wc.ensure('thisshouldnotexist.txt')
+ >>> try:
+ ... path.commit('testing')
+ ... except py.process.cmdexec.Error, e:
+ ... pass
+ >>> 'authorization failed' in str(e)
+ True
+
+Known problems / limitations
+===================================
+
+* The SVN path objects require the "svn" command line,
+ there is currently no support for python bindings.
+ Parsing the svn output can lead to problems, particularly
+ regarding if you have a non-english "locales" setting.
+
+* While the path objects basically work on windows,
+ there is no attention yet on making unicode paths
+ work or deal with the famous "8.3" filename issues.
+
+
diff --git a/doc/style.css b/doc/style.css
index 1faf762..af9e88f 100644
--- a/doc/style.css
+++ b/doc/style.css
@@ -1,1044 +1,1044 @@
-body,body.editor,body.body {
- font: 110% "Times New Roman", Arial, Verdana, Helvetica, serif;
- background: White;
- color: Black;
-}
-
-a, a.reference {
- text-decoration: none;
-}
-a[href]:hover { text-decoration: underline; }
-
-img {
- border: none;
- vertical-align: middle;
-}
-
-p, div.text {
- text-align: left;
- line-height: 1.5em;
- margin: 0.5em 0em 0em 0em;
-}
-
-
-
-p a:active {
- color: Red;
- background-color: transparent;
-}
-
-p img {
- border: 0;
- margin: 0;
-}
-
-img.inlinephoto {
- padding: 0;
- padding-right: 1em;
- padding-top: 0.7em;
- float: left;
-}
-
-hr {
- clear: both;
- height: 1px;
- color: #8CACBB;
- background-color: transparent;
-}
-
-
-ul {
- line-height: 1.5em;
- /*list-style-image: url("bullet.gif"); */
- margin-left: 1.5em;
- padding:0;
-}
-
-ol {
- line-height: 1.5em;
- margin-left: 1.5em;
- padding:0;
-}
-
-ul a, ol a {
- text-decoration: underline;
-}
-
-dl {
-}
-
-dd {
- line-height: 1.5em;
- margin-bottom: 1em;
-}
-
-blockquote {
- font-family: Times, "Times New Roman", serif;
- font-style: italic;
- font-size: 120%;
-}
-
-code {
- color: Black;
- /*background-color: #dee7ec;*/
- /*background-color: #cccccc;*/
-}
-
-pre {
- padding: 1em;
- border: 1px dotted #8cacbb;
- color: Black;
- /*
- background-color: #dee7ec;
- background-color: #cccccc;
- background-color: #dee7ec;
- */
- overflow: auto;
-}
-
-
-.netscape4 {
- display: none;
-}
-
-/* main page styles */
-
-/*a[href]:hover { color: black; text-decoration: underline; }
-a[href]:link { color: black; text-decoration: underline; }
-a[href] { color: black; text-decoration: underline; }
-*/
-
-span.menu_selected {
- color: black;
- text-decoration: none;
- padding-right: 0.3em;
- background-color: #cccccc;
-}
-
-
-a.menu {
- /*color: #3ba6ec; */
- font: 120% Verdana, Helvetica, Arial, sans-serif;
- text-decoration: none;
- padding-right: 0.3em;
-}
-
-a.menu[href]:visited, a.menu[href]:link{
- /*color: #3ba6ec; */
- text-decoration: none;
-}
-
-a.menu[href]:hover {
- /*color: black;*/
-}
-
-div#pagetitle{
- /*border-spacing: 20px;*/
- font: 160% Verdana, Helvetica, Arial, sans-serif;
- color: #3ba6ec;
- vertical-align: middle;
- left: 80 px;
- padding-bottom: 0.3em;
-}
-
-a.wikicurrent {
- font: 100% Verdana, Helvetica, Arial, sans-serif;
- color: #3ba6ec;
- vertical-align: middle;
-}
-
-
-table.body {
- border: 0;
- /*padding: 0;
- border-spacing: 0px;
- border-collapse: separate;
- */
-}
-
-td.page-header-left {
- padding: 5px;
- /*border-bottom: 1px solid #444444;*/
-}
-
-td.page-header-top {
- padding: 0;
-
- /*border-bottom: 1px solid #444444;*/
-}
-
-td.sidebar {
- padding: 1 0 0 1;
-}
-
-td.sidebar p.classblock {
- padding: 0 5 0 5;
- margin: 1 1 1 1;
- border: 1px solid #444444;
- background-color: #eeeeee;
-}
-
-td.sidebar p.userblock {
- padding: 0 5 0 5;
- margin: 1 1 1 1;
- border: 1px solid #444444;
- background-color: #eeeeff;
-}
-
-td.content {
- padding: 1 5 1 5;
- vertical-align: top;
- width: 100%;
-}
-
-p.ok-message {
- background-color: #22bb22;
- padding: 5 5 5 5;
- color: white;
- font-weight: bold;
-}
-p.error-message {
- background-color: #bb2222;
- padding: 5 5 5 5;
- color: white;
- font-weight: bold;
-}
-
-p:first-child {
- margin: 0 ;
- padding: 0;
-}
-
-/* style for forms */
-table.form {
- padding: 2;
- border-spacing: 0px;
- border-collapse: separate;
-}
-
-table.form th {
- color: #333388;
- text-align: right;
- vertical-align: top;
- font-weight: normal;
-}
-table.form th.header {
- font-weight: bold;
- background-color: #eeeeff;
- text-align: left;
-}
-
-table.form th.required {
- font-weight: bold;
-}
-
-table.form td {
- color: #333333;
- empty-cells: show;
- vertical-align: top;
-}
-
-table.form td.optional {
- font-weight: bold;
- font-style: italic;
-}
-
-table.form td.html {
- color: #777777;
-}
-
-/* style for lists */
-table.list {
- border-spacing: 0px;
- border-collapse: separate;
- vertical-align: top;
- padding-top: 0;
- width: 100%;
-}
-
-table.list th {
- padding: 0 4 0 4;
- color: #404070;
- background-color: #eeeeff;
- border-right: 1px solid #404070;
- border-top: 1px solid #404070;
- border-bottom: 1px solid #404070;
- vertical-align: top;
- empty-cells: show;
-}
-table.list th a[href]:hover { color: #404070 }
-table.list th a[href]:link { color: #404070 }
-table.list th a[href] { color: #404070 }
-table.list th.group {
- background-color: #f4f4ff;
- text-align: center;
- font-size: 120%;
-}
-
-table.list td {
- padding: 0 4 0 4;
- border: 0 2 0 2;
- border-right: 1px solid #404070;
- color: #404070;
- background-color: white;
- vertical-align: top;
- empty-cells: show;
-}
-
-table.list tr.normal td {
- background-color: white;
- white-space: nowrap;
-}
-
-table.list tr.alt td {
- background-color: #efefef;
- white-space: nowrap;
-}
-
-table.list td:first-child {
- border-left: 1px solid #404070;
- border-right: 1px solid #404070;
-}
-
-table.list th:first-child {
- border-left: 1px solid #404070;
- border-right: 1px solid #404070;
-}
-
-table.list tr.navigation th {
- text-align: right;
-}
-table.list tr.navigation th:first-child {
- border-right: none;
- text-align: left;
-}
-
-
-/* style for message displays */
-table.messages {
- border-spacing: 0px;
- border-collapse: separate;
- width: 100%;
-}
-
-table.messages th.header{
- padding-top: 10px;
- border-bottom: 1px solid gray;
- font-weight: bold;
- background-color: white;
- color: #707040;
-}
-
-table.messages th {
- font-weight: bold;
- color: black;
- text-align: left;
- border-bottom: 1px solid #afafaf;
-}
-
-table.messages td {
- font-family: monospace;
- background-color: #efefef;
- border-bottom: 1px solid #afafaf;
- color: black;
- empty-cells: show;
- border-right: 1px solid #afafaf;
- vertical-align: top;
- padding: 2 5 2 5;
-}
-
-table.messages td:first-child {
- border-left: 1px solid #afafaf;
- border-right: 1px solid #afafaf;
-}
-
-/* style for file displays */
-table.files {
- border-spacing: 0px;
- border-collapse: separate;
- width: 100%;
-}
-
-table.files th.header{
- padding-top: 10px;
- border-bottom: 1px solid gray;
- font-weight: bold;
- background-color: white;
- color: #707040;
-}
-
-table.files th {
- border-bottom: 1px solid #afafaf;
- font-weight: bold;
- text-align: left;
-}
-
-table.files td {
- font-family: monospace;
- empty-cells: show;
-}
-
-/* style for history displays */
-table.history {
- border-spacing: 0px;
- border-collapse: separate;
- width: 100%;
-}
-
-table.history th.header{
- padding-top: 10px;
- border-bottom: 1px solid gray;
- font-weight: bold;
- background-color: white;
- color: #707040;
- font-size: 100%;
-}
-
-table.history th {
- border-bottom: 1px solid #afafaf;
- font-weight: bold;
- text-align: left;
- font-size: 90%;
-}
-
-table.history td {
- font-size: 90%;
- vertical-align: top;
- empty-cells: show;
-}
-
-
-/* style for class list */
-table.classlist {
- border-spacing: 0px;
- border-collapse: separate;
- width: 100%;
-}
-
-table.classlist th.header{
- padding-top: 10px;
- border-bottom: 1px solid gray;
- font-weight: bold;
- background-color: white;
- color: #707040;
-}
-
-table.classlist th {
- font-weight: bold;
- text-align: left;
-}
-
-
-/* style for class help display */
-table.classhelp {
- border-spacing: 0px;
- border-collapse: separate;
- width: 100%;
-}
-
-table.classhelp th {
- font-weight: bold;
- text-align: left;
- color: #707040;
-}
-
-table.classhelp td {
- padding: 2 2 2 2;
- border: 1px solid black;
- text-align: left;
- vertical-align: top;
- empty-cells: show;
-}
-
-
-/* style for "other" displays */
-table.otherinfo {
- border-spacing: 0px;
- border-collapse: separate;
- width: 100%;
-}
-
-table.otherinfo th.header{
- padding-top: 10px;
- border-bottom: 1px solid gray;
- font-weight: bold;
- background-color: white;
- color: #707040;
-}
-
-table.otherinfo th {
- border-bottom: 1px solid #afafaf;
- font-weight: bold;
- text-align: left;
-}
-
-input {
- border: 1px solid #8cacbb;
- color: Black;
- background-color: white;
- vertical-align: middle;
- margin-bottom: 1px; /* IE bug fix */
- padding: 0.1em;
-}
-
-select {
- border: 1px solid #8cacbb;
- color: Black;
- background-color: white;
- vertical-align: middle;
- margin-bottom: 1px; /* IE bug fix */
- padding: 0.1em;
-}
-
-
-a.nonexistent {
- color: #FF2222;
-}
-a.nonexistent:visited {
- color: #FF2222;
-}
-a.external {
- color: #AA6600;
-}
-
-/*
-dl,ul,ol {
- margin-top: 1pt;
-}
-tt,pre {
- font-family: Lucida Console,Courier New,Courier,monotype;
- font-size: 12pt;
-}
-pre.code {
- margin-top: 8pt;
- margin-bottom: 8pt;
- background-color: #FFFFEE;
- white-space:pre;
- border-style:solid;
- border-width:1pt;
- border-color:#999999;
- color:#111111;
- padding:5px;
- width:100%;
-}
-*/
-div.diffold {
- background-color: #FFFF80;
- border-style:none;
- border-width:thin;
- width:100%;
-}
-div.diffnew {
- background-color: #80FF80;
- border-style:none;
- border-width:thin;
- width:100%;
-}
-div.message {
- margin-top: 6pt;
- background-color: #E8FFE8;
- border-style:solid;
- border-width:1pt;
- border-color:#999999;
- color:#440000;
- padding:5px;
- width:100%;
-}
-strong.highlight {
- background-color: #FFBBBB;
-/* as usual, NetScape fucks up with innocent CSS
- border-color: #FFAAAA;
- border-style: solid;
- border-width: 1pt;
-*/
-}
-
-table.navibar {
- background-color: #C8C8C8;
- border-spacing: 3px;
-}
-td.navibar {
- background-color: #E8E8E8;
- vertical-align: top;
- text-align: right;
- padding: 0px;
-}
-
-a#versioninfo {
- color: blue;
-}
-
-div#pagename {
- font-size: 140%;
- color: blue;
- text-align: center;
- font-weight: bold;
- background-color: white;
- padding: 0 ;
-}
-
-a.wikiaction, input.wikiaction {
- color: black;
- text-decoration: None;
- text-align: center;
- color: black;
- /*border: 1px solid #3ba6ec; */
- margin: 4px;
- padding: 5;
- padding-bottom: 0;
- white-space: nowrap;
-}
-
-a.wikiaction[href]:hover {
- color: black;
- text-decoration: none;
- /*background-color: #dddddd; */
-}
-
-
-div.legenditem {
- padding-top: 0.5em;
- padding-left: 0.3em;
-}
-
-span.wikitoken {
- background-color: #eeeeee;
-}
-
-
-div#contentspace h1:first-child, div.heading:first-child {
- padding-top: 0;
- margin-top: 0;
-}
-div#contentspace h2:first-child {
- padding-top: 0;
- margin-top: 0;
-}
-
-/* heading and paragraph text */
-
-div.heading, h1 {
- font-family: Verdana, Helvetica, Arial, sans-serif;
- background-color: #58b3ef;
- background-color: #FFFFFF;
- /*color: #4893cf;*/
- color: black;
- padding-top: 1.0em;
- padding-bottom:0.2em;
- text-align: left;
- margin-top: 0em;
- /*margin-bottom:8pt;*/
- font-weight: bold;
- font-size: 115%;
- border-bottom: 1px solid #8CACBB;
-}
-
-h2 {
- border-bottom: 1px dotted #8CACBB;
-}
-
-
-h1, h2, h3, h4, h5, h6 {
- color: Black;
- clear: left;
- font: 100% Verdana, Helvetica, Arial, sans-serif;
- margin: 0;
- padding-left: 0em;
- padding-top: 1em;
- padding-bottom: 0.2em;
- /*border-bottom: 1px solid #8CACBB;*/
-}
-/* h1,h2 { padding-top: 0; }*/
-
-
-h1 { font-size: 145%; }
-h2 { font-size: 115%; }
-h3 { font-size: 105%; }
-h4 { font-size: 100%; }
-h5 { font-size: 100%; }
-
-h1 a { text-decoration: None;}
-
-div.exception {
- background-color: #bb2222;
- padding: 5 5 5 5;
- color: white;
- font-weight: bold;
-}
-pre.exception {
- font-size: 110%;
- padding: 1em;
- border: 1px solid #8cacbb;
- color: Black;
- background-color: #dee7ec;
- background-color: #cccccc;
-}
-
-/* defines for navgiation bar (documentation) */
-
-
-div.direntry {
- padding-top: 0.3em;
- padding-bottom: 0.3em;
- margin-right: 1em;
- font-weight: bold;
- background-color: #dee7ec;
- font-size: 110%;
-}
-
-div.fileentry {
- font-family: Verdana, Helvetica, Arial, sans-serif;
- padding-bottom: 0.3em;
- white-space: nowrap;
- line-height: 150%;
-}
-
-a.fileentry {
- white-space: nowrap;
-}
-
-
-span.left {
- text-align: left;
-}
-span.right {
- text-align: right;
-}
-
-div.navbar {
- /*margin: 0;*/
- font-size: 80% /*smaller*/;
- font-weight: bold;
- text-align: left;
- /* position: fixed; */
- top: 100pt;
- left: 0pt; /* auto; */
- width: 120pt;
- /* right: auto;
- right: 0pt; 2em; */
-}
-
-
-div.history a {
- /* font-size: 70%; */
-}
-
-div.wikiactiontitle {
- font-weight: bold;
-}
-
-/* REST defines */
-
-div.document {
- margin: 0;
-}
-
-h1.title {
- margin: 0;
- margin-bottom: 0.5em;
-}
-
-td.toplist {
- vertical-align: top;
-}
-
-img#pyimg {
- float: left;
- padding-bottom: 1em;
-}
-
-div#navspace {
- position: absolute;
- font-size: 100%;
- width: 150px;
- overflow: hidden; /* scroll; */
-}
-
-
-div#errorline {
- position: relative;
- top: 5px;
- float: right;
-}
-
-div#contentspace {
- position: absolute;
- /* font: 120% "Times New Roman", serif;*/
- font: 110% Verdana, Helvetica, Arial, sans-serif;
- left: 170px;
- margin-right: 5px;
-}
-
-div#menubar {
-/* width: 400px; */
- float: left;
-}
-
-/* for the documentation page */
-div#title{
-
- font-size: 110%;
- color: black;
-
-
- /*background-color: #dee7ec;
- #padding: 5pt;
- #padding-bottom: 1em;
- #color: black;
- border-width: 1pt;
- border-style: solid;*/
-
-}
-
-div#docnavlist {
- /*background-color: #dee7ec; */
- padding: 5pt;
- padding-bottom: 2em;
- color: black;
- border-width: 1pt;
- /*border-style: solid;*/
-}
-
-
-/* text markup */
-
-div.listtitle {
- color: Black;
- clear: left;
- font: 120% Verdana, Helvetica, Arial, sans-serif;
- margin: 0;
- padding-left: 0em;
- padding-top: 0em;
- padding-bottom: 0.2em;
- margin-right: 0.5em;
- border-bottom: 1px solid #8CACBB;
-}
-
-div.actionbox h3 {
- padding-top: 0;
- padding-right: 0.5em;
- padding-left: 0.5em;
- background-color: #fabf00;
- text-align: center;
- border: 1px solid black; /* 8cacbb; */
-}
-
-div.actionbox a {
- display: block;
- padding-bottom: 0.5em;
- padding-top: 0.5em;
- margin-left: 0.5em;
-}
-
-div.actionbox a.history {
- display: block;
- padding-bottom: 0.5em;
- padding-top: 0.5em;
- margin-left: 0.5em;
- font-size: 90%;
-}
-
-div.actionbox {
- margin-bottom: 2em;
- padding-bottom: 1em;
- overflow: hidden; /* scroll; */
-}
-
-/* taken from docutils (oh dear, a bit senseless) */
-ol.simple, ul.simple {
- margin-bottom: 1em }
-
-ol.arabic {
- list-style: decimal }
-
-ol.loweralpha {
- list-style: lower-alpha }
-
-ol.upperalpha {
- list-style: upper-alpha }
-
-ol.lowerroman {
- list-style: lower-roman }
-
-ol.upperroman {
- list-style: upper-roman }
-
-
-/*
-:Author: David Goodger
-:Contact: goodger@users.sourceforge.net
-:date: $Date: 2003/01/22 22:26:48 $
-:version: $Revision: 1.29 $
-:copyright: This stylesheet has been placed in the public domain.
-
-Default cascading style sheet for the HTML output of Docutils.
-*/
-/*
-.first {
- margin-top: 0 }
-
-.last {
- margin-bottom: 0 }
-
-a.toc-backref {
- text-decoration: none ;
- color: black }
-
-dd {
- margin-bottom: 0.5em }
-
-div.abstract {
- margin: 2em 5em }
-
-div.abstract p.topic-title {
- font-weight: bold ;
- text-align: center }
-
-div.attention, div.caution, div.danger, div.error, div.hint,
-div.important, div.note, div.tip, div.warning {
- margin: 2em ;
- border: medium outset ;
- padding: 1em }
-
-div.attention p.admonition-title, div.caution p.admonition-title,
-div.danger p.admonition-title, div.error p.admonition-title,
-div.warning p.admonition-title {
- color: red ;
- font-weight: bold ;
- font-family: sans-serif }
-
-div.hint p.admonition-title, div.important p.admonition-title,
-div.note p.admonition-title, div.tip p.admonition-title {
- font-weight: bold ;
- font-family: sans-serif }
-
-div.dedication {
- margin: 2em 5em ;
- text-align: center ;
- font-style: italic }
-
-div.dedication p.topic-title {
- font-weight: bold ;
- font-style: normal }
-
-div.figure {
- margin-left: 2em }
-
-div.footer, div.header {
- font-size: smaller }
-
-div.system-messages {
- margin: 5em }
-
-div.system-messages h1 {
- color: red }
-
-div.system-message {
- border: medium outset ;
- padding: 1em }
-
-div.system-message p.system-message-title {
- color: red ;
- font-weight: bold }
-
-div.topic {
- margin: 2em }
-
-h1.title {
- text-align: center }
-
-h2.subtitle {
- text-align: center }
-
-hr {
- width: 75% }
-
-p.caption {
- font-style: italic }
-
-p.credits {
- font-style: italic ;
- font-size: smaller }
-
-p.label {
- white-space: nowrap }
-
-p.topic-title {
- font-weight: bold }
-
-pre.address {
- margin-bottom: 0 ;
- margin-top: 0 ;
- font-family: serif ;
- font-size: 100% }
-
-pre.line-block {
- font-family: serif ;
- font-size: 100% }
-
-pre.literal-block, pre.doctest-block {
- margin-left: 2em ;
- margin-right: 2em ;
- background-color: #eeeeee }
-
-span.classifier {
- font-family: sans-serif ;
- font-style: oblique }
-
-span.classifier-delimiter {
- font-family: sans-serif ;
- font-weight: bold }
-
-span.interpreted {
- font-family: sans-serif }
-
-span.option {
- white-space: nowrap }
-
-span.option-argument {
- font-style: italic }
-
-span.pre {
- white-space: pre }
-
-span.problematic {
- color: red }
-
-table {
- margin-top: 0.5em ;
- margin-bottom: 0.5em }
-
-table.citation {
- border-left: solid thin gray ;
- padding-left: 0.5ex }
-
-table.docinfo {
- margin: 2em 4em }
-
-table.footnote {
- border-left: solid thin black ;
- padding-left: 0.5ex }
-
-td, th {
- padding-left: 0.5em ;
- padding-right: 0.5em ;
- vertical-align: top }
-
-th.docinfo-name, th.field-name {
- font-weight: bold ;
- text-align: left ;
- white-space: nowrap }
-
-h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
- font-size: 100% }
-
-tt {
- background-color: #eeeeee }
-
-ul.auto-toc {
- list-style-type: none }
-*/
-
-div.section {
- margin-top: 1.0em ;
-}
+body,body.editor,body.body {
+ font: 110% "Times New Roman", Arial, Verdana, Helvetica, serif;
+ background: White;
+ color: Black;
+}
+
+a, a.reference {
+ text-decoration: none;
+}
+a[href]:hover { text-decoration: underline; }
+
+img {
+ border: none;
+ vertical-align: middle;
+}
+
+p, div.text {
+ text-align: left;
+ line-height: 1.5em;
+ margin: 0.5em 0em 0em 0em;
+}
+
+
+
+p a:active {
+ color: Red;
+ background-color: transparent;
+}
+
+p img {
+ border: 0;
+ margin: 0;
+}
+
+img.inlinephoto {
+ padding: 0;
+ padding-right: 1em;
+ padding-top: 0.7em;
+ float: left;
+}
+
+hr {
+ clear: both;
+ height: 1px;
+ color: #8CACBB;
+ background-color: transparent;
+}
+
+
+ul {
+ line-height: 1.5em;
+ /*list-style-image: url("bullet.gif"); */
+ margin-left: 1.5em;
+ padding:0;
+}
+
+ol {
+ line-height: 1.5em;
+ margin-left: 1.5em;
+ padding:0;
+}
+
+ul a, ol a {
+ text-decoration: underline;
+}
+
+dl {
+}
+
+dd {
+ line-height: 1.5em;
+ margin-bottom: 1em;
+}
+
+blockquote {
+ font-family: Times, "Times New Roman", serif;
+ font-style: italic;
+ font-size: 120%;
+}
+
+code {
+ color: Black;
+ /*background-color: #dee7ec;*/
+ /*background-color: #cccccc;*/
+}
+
+pre {
+ padding: 1em;
+ border: 1px dotted #8cacbb;
+ color: Black;
+ /*
+ background-color: #dee7ec;
+ background-color: #cccccc;
+ background-color: #dee7ec;
+ */
+ overflow: auto;
+}
+
+
+.netscape4 {
+ display: none;
+}
+
+/* main page styles */
+
+/*a[href]:hover { color: black; text-decoration: underline; }
+a[href]:link { color: black; text-decoration: underline; }
+a[href] { color: black; text-decoration: underline; }
+*/
+
+span.menu_selected {
+ color: black;
+ text-decoration: none;
+ padding-right: 0.3em;
+ background-color: #cccccc;
+}
+
+
+a.menu {
+ /*color: #3ba6ec; */
+ font: 120% Verdana, Helvetica, Arial, sans-serif;
+ text-decoration: none;
+ padding-right: 0.3em;
+}
+
+a.menu[href]:visited, a.menu[href]:link{
+ /*color: #3ba6ec; */
+ text-decoration: none;
+}
+
+a.menu[href]:hover {
+ /*color: black;*/
+}
+
+div#pagetitle{
+ /*border-spacing: 20px;*/
+ font: 160% Verdana, Helvetica, Arial, sans-serif;
+ color: #3ba6ec;
+ vertical-align: middle;
+ left: 80 px;
+ padding-bottom: 0.3em;
+}
+
+a.wikicurrent {
+ font: 100% Verdana, Helvetica, Arial, sans-serif;
+ color: #3ba6ec;
+ vertical-align: middle;
+}
+
+
+table.body {
+ border: 0;
+ /*padding: 0;
+ border-spacing: 0px;
+ border-collapse: separate;
+ */
+}
+
+td.page-header-left {
+ padding: 5px;
+ /*border-bottom: 1px solid #444444;*/
+}
+
+td.page-header-top {
+ padding: 0;
+
+ /*border-bottom: 1px solid #444444;*/
+}
+
+td.sidebar {
+ padding: 1 0 0 1;
+}
+
+td.sidebar p.classblock {
+ padding: 0 5 0 5;
+ margin: 1 1 1 1;
+ border: 1px solid #444444;
+ background-color: #eeeeee;
+}
+
+td.sidebar p.userblock {
+ padding: 0 5 0 5;
+ margin: 1 1 1 1;
+ border: 1px solid #444444;
+ background-color: #eeeeff;
+}
+
+td.content {
+ padding: 1 5 1 5;
+ vertical-align: top;
+ width: 100%;
+}
+
+p.ok-message {
+ background-color: #22bb22;
+ padding: 5 5 5 5;
+ color: white;
+ font-weight: bold;
+}
+p.error-message {
+ background-color: #bb2222;
+ padding: 5 5 5 5;
+ color: white;
+ font-weight: bold;
+}
+
+p:first-child {
+ margin: 0 ;
+ padding: 0;
+}
+
+/* style for forms */
+table.form {
+ padding: 2;
+ border-spacing: 0px;
+ border-collapse: separate;
+}
+
+table.form th {
+ color: #333388;
+ text-align: right;
+ vertical-align: top;
+ font-weight: normal;
+}
+table.form th.header {
+ font-weight: bold;
+ background-color: #eeeeff;
+ text-align: left;
+}
+
+table.form th.required {
+ font-weight: bold;
+}
+
+table.form td {
+ color: #333333;
+ empty-cells: show;
+ vertical-align: top;
+}
+
+table.form td.optional {
+ font-weight: bold;
+ font-style: italic;
+}
+
+table.form td.html {
+ color: #777777;
+}
+
+/* style for lists */
+table.list {
+ border-spacing: 0px;
+ border-collapse: separate;
+ vertical-align: top;
+ padding-top: 0;
+ width: 100%;
+}
+
+table.list th {
+ padding: 0 4 0 4;
+ color: #404070;
+ background-color: #eeeeff;
+ border-right: 1px solid #404070;
+ border-top: 1px solid #404070;
+ border-bottom: 1px solid #404070;
+ vertical-align: top;
+ empty-cells: show;
+}
+table.list th a[href]:hover { color: #404070 }
+table.list th a[href]:link { color: #404070 }
+table.list th a[href] { color: #404070 }
+table.list th.group {
+ background-color: #f4f4ff;
+ text-align: center;
+ font-size: 120%;
+}
+
+table.list td {
+ padding: 0 4 0 4;
+ border: 0 2 0 2;
+ border-right: 1px solid #404070;
+ color: #404070;
+ background-color: white;
+ vertical-align: top;
+ empty-cells: show;
+}
+
+table.list tr.normal td {
+ background-color: white;
+ white-space: nowrap;
+}
+
+table.list tr.alt td {
+ background-color: #efefef;
+ white-space: nowrap;
+}
+
+table.list td:first-child {
+ border-left: 1px solid #404070;
+ border-right: 1px solid #404070;
+}
+
+table.list th:first-child {
+ border-left: 1px solid #404070;
+ border-right: 1px solid #404070;
+}
+
+table.list tr.navigation th {
+ text-align: right;
+}
+table.list tr.navigation th:first-child {
+ border-right: none;
+ text-align: left;
+}
+
+
+/* style for message displays */
+table.messages {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.messages th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.messages th {
+ font-weight: bold;
+ color: black;
+ text-align: left;
+ border-bottom: 1px solid #afafaf;
+}
+
+table.messages td {
+ font-family: monospace;
+ background-color: #efefef;
+ border-bottom: 1px solid #afafaf;
+ color: black;
+ empty-cells: show;
+ border-right: 1px solid #afafaf;
+ vertical-align: top;
+ padding: 2 5 2 5;
+}
+
+table.messages td:first-child {
+ border-left: 1px solid #afafaf;
+ border-right: 1px solid #afafaf;
+}
+
+/* style for file displays */
+table.files {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.files th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.files th {
+ border-bottom: 1px solid #afafaf;
+ font-weight: bold;
+ text-align: left;
+}
+
+table.files td {
+ font-family: monospace;
+ empty-cells: show;
+}
+
+/* style for history displays */
+table.history {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.history th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+ font-size: 100%;
+}
+
+table.history th {
+ border-bottom: 1px solid #afafaf;
+ font-weight: bold;
+ text-align: left;
+ font-size: 90%;
+}
+
+table.history td {
+ font-size: 90%;
+ vertical-align: top;
+ empty-cells: show;
+}
+
+
+/* style for class list */
+table.classlist {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.classlist th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.classlist th {
+ font-weight: bold;
+ text-align: left;
+}
+
+
+/* style for class help display */
+table.classhelp {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.classhelp th {
+ font-weight: bold;
+ text-align: left;
+ color: #707040;
+}
+
+table.classhelp td {
+ padding: 2 2 2 2;
+ border: 1px solid black;
+ text-align: left;
+ vertical-align: top;
+ empty-cells: show;
+}
+
+
+/* style for "other" displays */
+table.otherinfo {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.otherinfo th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.otherinfo th {
+ border-bottom: 1px solid #afafaf;
+ font-weight: bold;
+ text-align: left;
+}
+
+input {
+ border: 1px solid #8cacbb;
+ color: Black;
+ background-color: white;
+ vertical-align: middle;
+ margin-bottom: 1px; /* IE bug fix */
+ padding: 0.1em;
+}
+
+select {
+ border: 1px solid #8cacbb;
+ color: Black;
+ background-color: white;
+ vertical-align: middle;
+ margin-bottom: 1px; /* IE bug fix */
+ padding: 0.1em;
+}
+
+
+a.nonexistent {
+ color: #FF2222;
+}
+a.nonexistent:visited {
+ color: #FF2222;
+}
+a.external {
+ color: #AA6600;
+}
+
+/*
+dl,ul,ol {
+ margin-top: 1pt;
+}
+tt,pre {
+ font-family: Lucida Console,Courier New,Courier,monotype;
+ font-size: 12pt;
+}
+pre.code {
+ margin-top: 8pt;
+ margin-bottom: 8pt;
+ background-color: #FFFFEE;
+ white-space:pre;
+ border-style:solid;
+ border-width:1pt;
+ border-color:#999999;
+ color:#111111;
+ padding:5px;
+ width:100%;
+}
+*/
+div.diffold {
+ background-color: #FFFF80;
+ border-style:none;
+ border-width:thin;
+ width:100%;
+}
+div.diffnew {
+ background-color: #80FF80;
+ border-style:none;
+ border-width:thin;
+ width:100%;
+}
+div.message {
+ margin-top: 6pt;
+ background-color: #E8FFE8;
+ border-style:solid;
+ border-width:1pt;
+ border-color:#999999;
+ color:#440000;
+ padding:5px;
+ width:100%;
+}
+strong.highlight {
+ background-color: #FFBBBB;
+/* as usual, NetScape fucks up with innocent CSS
+ border-color: #FFAAAA;
+ border-style: solid;
+ border-width: 1pt;
+*/
+}
+
+table.navibar {
+ background-color: #C8C8C8;
+ border-spacing: 3px;
+}
+td.navibar {
+ background-color: #E8E8E8;
+ vertical-align: top;
+ text-align: right;
+ padding: 0px;
+}
+
+a#versioninfo {
+ color: blue;
+}
+
+div#pagename {
+ font-size: 140%;
+ color: blue;
+ text-align: center;
+ font-weight: bold;
+ background-color: white;
+ padding: 0 ;
+}
+
+a.wikiaction, input.wikiaction {
+ color: black;
+ text-decoration: None;
+ text-align: center;
+ color: black;
+ /*border: 1px solid #3ba6ec; */
+ margin: 4px;
+ padding: 5;
+ padding-bottom: 0;
+ white-space: nowrap;
+}
+
+a.wikiaction[href]:hover {
+ color: black;
+ text-decoration: none;
+ /*background-color: #dddddd; */
+}
+
+
+div.legenditem {
+ padding-top: 0.5em;
+ padding-left: 0.3em;
+}
+
+span.wikitoken {
+ background-color: #eeeeee;
+}
+
+
+div#contentspace h1:first-child, div.heading:first-child {
+ padding-top: 0;
+ margin-top: 0;
+}
+div#contentspace h2:first-child {
+ padding-top: 0;
+ margin-top: 0;
+}
+
+/* heading and paragraph text */
+
+div.heading, h1 {
+ font-family: Verdana, Helvetica, Arial, sans-serif;
+ background-color: #58b3ef;
+ background-color: #FFFFFF;
+ /*color: #4893cf;*/
+ color: black;
+ padding-top: 1.0em;
+ padding-bottom:0.2em;
+ text-align: left;
+ margin-top: 0em;
+ /*margin-bottom:8pt;*/
+ font-weight: bold;
+ font-size: 115%;
+ border-bottom: 1px solid #8CACBB;
+}
+
+h2 {
+ border-bottom: 1px dotted #8CACBB;
+}
+
+
+h1, h2, h3, h4, h5, h6 {
+ color: Black;
+ clear: left;
+ font: 100% Verdana, Helvetica, Arial, sans-serif;
+ margin: 0;
+ padding-left: 0em;
+ padding-top: 1em;
+ padding-bottom: 0.2em;
+ /*border-bottom: 1px solid #8CACBB;*/
+}
+/* h1,h2 { padding-top: 0; }*/
+
+
+h1 { font-size: 145%; }
+h2 { font-size: 115%; }
+h3 { font-size: 105%; }
+h4 { font-size: 100%; }
+h5 { font-size: 100%; }
+
+h1 a { text-decoration: None;}
+
+div.exception {
+ background-color: #bb2222;
+ padding: 5 5 5 5;
+ color: white;
+ font-weight: bold;
+}
+pre.exception {
+ font-size: 110%;
+ padding: 1em;
+ border: 1px solid #8cacbb;
+ color: Black;
+ background-color: #dee7ec;
+ background-color: #cccccc;
+}
+
+/* defines for navgiation bar (documentation) */
+
+
+div.direntry {
+ padding-top: 0.3em;
+ padding-bottom: 0.3em;
+ margin-right: 1em;
+ font-weight: bold;
+ background-color: #dee7ec;
+ font-size: 110%;
+}
+
+div.fileentry {
+ font-family: Verdana, Helvetica, Arial, sans-serif;
+ padding-bottom: 0.3em;
+ white-space: nowrap;
+ line-height: 150%;
+}
+
+a.fileentry {
+ white-space: nowrap;
+}
+
+
+span.left {
+ text-align: left;
+}
+span.right {
+ text-align: right;
+}
+
+div.navbar {
+ /*margin: 0;*/
+ font-size: 80% /*smaller*/;
+ font-weight: bold;
+ text-align: left;
+ /* position: fixed; */
+ top: 100pt;
+ left: 0pt; /* auto; */
+ width: 120pt;
+ /* right: auto;
+ right: 0pt; 2em; */
+}
+
+
+div.history a {
+ /* font-size: 70%; */
+}
+
+div.wikiactiontitle {
+ font-weight: bold;
+}
+
+/* REST defines */
+
+div.document {
+ margin: 0;
+}
+
+h1.title {
+ margin: 0;
+ margin-bottom: 0.5em;
+}
+
+td.toplist {
+ vertical-align: top;
+}
+
+img#pyimg {
+ float: left;
+ padding-bottom: 1em;
+}
+
+div#navspace {
+ position: absolute;
+ font-size: 100%;
+ width: 150px;
+ overflow: hidden; /* scroll; */
+}
+
+
+div#errorline {
+ position: relative;
+ top: 5px;
+ float: right;
+}
+
+div#contentspace {
+ position: absolute;
+ /* font: 120% "Times New Roman", serif;*/
+ font: 110% Verdana, Helvetica, Arial, sans-serif;
+ left: 170px;
+ margin-right: 5px;
+}
+
+div#menubar {
+/* width: 400px; */
+ float: left;
+}
+
+/* for the documentation page */
+div#title{
+
+ font-size: 110%;
+ color: black;
+
+
+ /*background-color: #dee7ec;
+ #padding: 5pt;
+ #padding-bottom: 1em;
+ #color: black;
+ border-width: 1pt;
+ border-style: solid;*/
+
+}
+
+div#docnavlist {
+ /*background-color: #dee7ec; */
+ padding: 5pt;
+ padding-bottom: 2em;
+ color: black;
+ border-width: 1pt;
+ /*border-style: solid;*/
+}
+
+
+/* text markup */
+
+div.listtitle {
+ color: Black;
+ clear: left;
+ font: 120% Verdana, Helvetica, Arial, sans-serif;
+ margin: 0;
+ padding-left: 0em;
+ padding-top: 0em;
+ padding-bottom: 0.2em;
+ margin-right: 0.5em;
+ border-bottom: 1px solid #8CACBB;
+}
+
+div.actionbox h3 {
+ padding-top: 0;
+ padding-right: 0.5em;
+ padding-left: 0.5em;
+ background-color: #fabf00;
+ text-align: center;
+ border: 1px solid black; /* 8cacbb; */
+}
+
+div.actionbox a {
+ display: block;
+ padding-bottom: 0.5em;
+ padding-top: 0.5em;
+ margin-left: 0.5em;
+}
+
+div.actionbox a.history {
+ display: block;
+ padding-bottom: 0.5em;
+ padding-top: 0.5em;
+ margin-left: 0.5em;
+ font-size: 90%;
+}
+
+div.actionbox {
+ margin-bottom: 2em;
+ padding-bottom: 1em;
+ overflow: hidden; /* scroll; */
+}
+
+/* taken from docutils (oh dear, a bit senseless) */
+ol.simple, ul.simple {
+ margin-bottom: 1em }
+
+ol.arabic {
+ list-style: decimal }
+
+ol.loweralpha {
+ list-style: lower-alpha }
+
+ol.upperalpha {
+ list-style: upper-alpha }
+
+ol.lowerroman {
+ list-style: lower-roman }
+
+ol.upperroman {
+ list-style: upper-roman }
+
+
+/*
+:Author: David Goodger
+:Contact: goodger@users.sourceforge.net
+:date: $Date: 2003/01/22 22:26:48 $
+:version: $Revision: 1.29 $
+:copyright: This stylesheet has been placed in the public domain.
+
+Default cascading style sheet for the HTML output of Docutils.
+*/
+/*
+.first {
+ margin-top: 0 }
+
+.last {
+ margin-bottom: 0 }
+
+a.toc-backref {
+ text-decoration: none ;
+ color: black }
+
+dd {
+ margin-bottom: 0.5em }
+
+div.abstract {
+ margin: 2em 5em }
+
+div.abstract p.topic-title {
+ font-weight: bold ;
+ text-align: center }
+
+div.attention, div.caution, div.danger, div.error, div.hint,
+div.important, div.note, div.tip, div.warning {
+ margin: 2em ;
+ border: medium outset ;
+ padding: 1em }
+
+div.attention p.admonition-title, div.caution p.admonition-title,
+div.danger p.admonition-title, div.error p.admonition-title,
+div.warning p.admonition-title {
+ color: red ;
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.hint p.admonition-title, div.important p.admonition-title,
+div.note p.admonition-title, div.tip p.admonition-title {
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.dedication {
+ margin: 2em 5em ;
+ text-align: center ;
+ font-style: italic }
+
+div.dedication p.topic-title {
+ font-weight: bold ;
+ font-style: normal }
+
+div.figure {
+ margin-left: 2em }
+
+div.footer, div.header {
+ font-size: smaller }
+
+div.system-messages {
+ margin: 5em }
+
+div.system-messages h1 {
+ color: red }
+
+div.system-message {
+ border: medium outset ;
+ padding: 1em }
+
+div.system-message p.system-message-title {
+ color: red ;
+ font-weight: bold }
+
+div.topic {
+ margin: 2em }
+
+h1.title {
+ text-align: center }
+
+h2.subtitle {
+ text-align: center }
+
+hr {
+ width: 75% }
+
+p.caption {
+ font-style: italic }
+
+p.credits {
+ font-style: italic ;
+ font-size: smaller }
+
+p.label {
+ white-space: nowrap }
+
+p.topic-title {
+ font-weight: bold }
+
+pre.address {
+ margin-bottom: 0 ;
+ margin-top: 0 ;
+ font-family: serif ;
+ font-size: 100% }
+
+pre.line-block {
+ font-family: serif ;
+ font-size: 100% }
+
+pre.literal-block, pre.doctest-block {
+ margin-left: 2em ;
+ margin-right: 2em ;
+ background-color: #eeeeee }
+
+span.classifier {
+ font-family: sans-serif ;
+ font-style: oblique }
+
+span.classifier-delimiter {
+ font-family: sans-serif ;
+ font-weight: bold }
+
+span.interpreted {
+ font-family: sans-serif }
+
+span.option {
+ white-space: nowrap }
+
+span.option-argument {
+ font-style: italic }
+
+span.pre {
+ white-space: pre }
+
+span.problematic {
+ color: red }
+
+table {
+ margin-top: 0.5em ;
+ margin-bottom: 0.5em }
+
+table.citation {
+ border-left: solid thin gray ;
+ padding-left: 0.5ex }
+
+table.docinfo {
+ margin: 2em 4em }
+
+table.footnote {
+ border-left: solid thin black ;
+ padding-left: 0.5ex }
+
+td, th {
+ padding-left: 0.5em ;
+ padding-right: 0.5em ;
+ vertical-align: top }
+
+th.docinfo-name, th.field-name {
+ font-weight: bold ;
+ text-align: left ;
+ white-space: nowrap }
+
+h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
+ font-size: 100% }
+
+tt {
+ background-color: #eeeeee }
+
+ul.auto-toc {
+ list-style-type: none }
+*/
+
+div.section {
+ margin-top: 1.0em ;
+}
diff --git a/doc/xml.txt b/doc/xml.txt
index 1022de6..7fa2128 100644
--- a/doc/xml.txt
+++ b/doc/xml.txt
@@ -1,164 +1,164 @@
-====================================================
-py.xml: simple pythonic xml/html file generation
-====================================================
-
-Motivation
-==========
-
-There are a plethora of frameworks and libraries to generate
-xml and html trees. However, many of them are large, have a
-steep learning curve and are often hard to debug. Not to
-speak of the fact that they are frameworks to begin with.
-
-.. _xist: http://www.livinglogic.de/Python/xist/index.html
-
-a pythonic object model , please
-================================
-
-The py lib offers a pythonic way to generate xml/html, based on
-ideas from xist_ which `uses python class objects`_ to build
-xml trees. However, xist_'s implementation is somewhat heavy
-because it has additional goals like transformations and
-supporting many namespaces. But its basic idea is very easy.
-
-.. _`uses python class objects`: http://www.livinglogic.de/Python/xist/Howto.html
-
-generating arbitrary xml structures
------------------------------------
-
-With ``py.xml.Namespace`` you have the basis
-to generate custom xml-fragments on the fly::
-
- class ns(py.xml.Namespace):
- "my custom xml namespace"
- doc = ns.books(
- ns.book(
- ns.author("May Day"),
- ns.title("python for java programmers"),),
- ns.book(
- ns.author("why"),
- ns.title("Java for Python programmers"),),
- publisher="N.N",
- )
- print doc.unicode(indent=2).encode('utf8')
-
-will give you this representation::
-
- <books publisher="N.N">
- <book>
- <author>May Day</author>
- <title>python for java programmers</title></book>
- <book>
- <author>why</author>
- <title>Java for Python programmers</title></book></books>
-
-In a sentence: positional arguments are child-tags and
-keyword-arguments are attributes.
-
-On a side note, you'll see that the unicode-serializer
-supports a nice indentation style which keeps your generated
-html readable, basically through emulating python's white
-space significance by putting closing-tags rightmost and
-almost invisible at first glance :-)
-
-basic example for generating html
----------------------------------
-
-Consider this example::
-
- from py.xml import html # html namespace
-
- paras = "First Para", "Second para"
-
- doc = html.html(
- html.head(
- html.meta(name="Content-Type", value="text/html; charset=latin1")),
- html.body(
- [html.p(p) for p in paras]))
-
- print unicode(doc).encode('latin1')
-
-Again, tags are objects which contain tags and have attributes.
-More exactly, Tags inherit from the list type and thus can be
-manipulated as list objects. They additionally support a default
-way to represent themselves as a serialized unicode object.
-
-If you happen to look at the py.xml implementation you'll
-note that the tag/namespace implementation consumes some 50 lines
-with another 50 lines for the unicode serialization code.
-
-CSS-styling your html Tags
---------------------------
-
-One aspect where many of the huge python xml/html generation
-frameworks utterly fail is a clean and convenient integration
-of CSS styling. Often, developers are left alone with keeping
-CSS style definitions in sync with some style files
-represented as strings (often in a separate .css file). Not
-only is this hard to debug but the missing abstractions make
-it hard to modify the styling of your tags or to choose custom
-style representations (inline, html.head or external). Add the
-Browers usual tolerance of messyness and errors in Style
-references and welcome to hell, known as the domain of
-developing web applications :-)
-
-By contrast, consider this CSS styling example::
-
- class my(html):
- "my initial custom style"
- class body(html.body):
- style = html.Style(font_size = "120%")
-
- class h2(html.h2):
- style = html.Style(background = "grey")
-
- class p(html.p):
- style = html.Style(font_weight="bold")
-
- doc = my.html(
- my.head(),
- my.body(
- my.h2("hello world"),
- my.p("bold as bold can")
- )
- )
-
- print doc.unicode(indent=2)
-
-This will give you a small'n mean self contained
-represenation by default::
-
- <html>
- <head/>
- <body style="font-size: 120%">
- <h2 style="background: grey">hello world</h2>
- <p style="font-weight: bold">bold as bold can</p></body></html>
-
-Most importantly, note that the inline-styling is just an
-implementation detail of the unicode serialization code.
-You can easily modify the serialization to put your styling into the
-``html.head`` or in a separate file and autogenerate CSS-class
-names or ids.
-
-Hey, you could even write tests that you are using correct
-styles suitable for specific browser requirements. Did i mention
-that the ability to easily write tests for your generated
-html and its serialization could help to develop _stable_ user
-interfaces?
-
-More to come ...
-----------------
-
-For now, i don't think we should strive to offer much more
-than the above. However, it is probably not hard to offer
-*partial serialization* to allow generating maybe hundreds of
-complex html documents per second. Basically we would allow
-putting callables both as Tag content and as values of
-attributes. A slightly more advanced Serialization would then
-produce a list of unicode objects intermingled with callables.
-At HTTP-Request time the callables would get called to
-complete the probably request-specific serialization of
-your Tags. Hum, it's probably harder to explain this than to
-actually code it :-)
-
-.. _`py.test`: test/index.html
+====================================================
+py.xml: simple pythonic xml/html file generation
+====================================================
+
+Motivation
+==========
+
+There are a plethora of frameworks and libraries to generate
+xml and html trees. However, many of them are large, have a
+steep learning curve and are often hard to debug. Not to
+speak of the fact that they are frameworks to begin with.
+
+.. _xist: http://www.livinglogic.de/Python/xist/index.html
+
+a pythonic object model , please
+================================
+
+The py lib offers a pythonic way to generate xml/html, based on
+ideas from xist_ which `uses python class objects`_ to build
+xml trees. However, xist_'s implementation is somewhat heavy
+because it has additional goals like transformations and
+supporting many namespaces. But its basic idea is very easy.
+
+.. _`uses python class objects`: http://www.livinglogic.de/Python/xist/Howto.html
+
+generating arbitrary xml structures
+-----------------------------------
+
+With ``py.xml.Namespace`` you have the basis
+to generate custom xml-fragments on the fly::
+
+ class ns(py.xml.Namespace):
+ "my custom xml namespace"
+ doc = ns.books(
+ ns.book(
+ ns.author("May Day"),
+ ns.title("python for java programmers"),),
+ ns.book(
+ ns.author("why"),
+ ns.title("Java for Python programmers"),),
+ publisher="N.N",
+ )
+ print doc.unicode(indent=2).encode('utf8')
+
+will give you this representation::
+
+ <books publisher="N.N">
+ <book>
+ <author>May Day</author>
+ <title>python for java programmers</title></book>
+ <book>
+ <author>why</author>
+ <title>Java for Python programmers</title></book></books>
+
+In a sentence: positional arguments are child-tags and
+keyword-arguments are attributes.
+
+On a side note, you'll see that the unicode-serializer
+supports a nice indentation style which keeps your generated
+html readable, basically through emulating python's white
+space significance by putting closing-tags rightmost and
+almost invisible at first glance :-)
+
+basic example for generating html
+---------------------------------
+
+Consider this example::
+
+ from py.xml import html # html namespace
+
+ paras = "First Para", "Second para"
+
+ doc = html.html(
+ html.head(
+ html.meta(name="Content-Type", value="text/html; charset=latin1")),
+ html.body(
+ [html.p(p) for p in paras]))
+
+ print unicode(doc).encode('latin1')
+
+Again, tags are objects which contain tags and have attributes.
+More exactly, Tags inherit from the list type and thus can be
+manipulated as list objects. They additionally support a default
+way to represent themselves as a serialized unicode object.
+
+If you happen to look at the py.xml implementation you'll
+note that the tag/namespace implementation consumes some 50 lines
+with another 50 lines for the unicode serialization code.
+
+CSS-styling your html Tags
+--------------------------
+
+One aspect where many of the huge python xml/html generation
+frameworks utterly fail is a clean and convenient integration
+of CSS styling. Often, developers are left alone with keeping
+CSS style definitions in sync with some style files
+represented as strings (often in a separate .css file). Not
+only is this hard to debug but the missing abstractions make
+it hard to modify the styling of your tags or to choose custom
+style representations (inline, html.head or external). Add the
+Browers usual tolerance of messyness and errors in Style
+references and welcome to hell, known as the domain of
+developing web applications :-)
+
+By contrast, consider this CSS styling example::
+
+ class my(html):
+ "my initial custom style"
+ class body(html.body):
+ style = html.Style(font_size = "120%")
+
+ class h2(html.h2):
+ style = html.Style(background = "grey")
+
+ class p(html.p):
+ style = html.Style(font_weight="bold")
+
+ doc = my.html(
+ my.head(),
+ my.body(
+ my.h2("hello world"),
+ my.p("bold as bold can")
+ )
+ )
+
+ print doc.unicode(indent=2)
+
+This will give you a small'n mean self contained
+represenation by default::
+
+ <html>
+ <head/>
+ <body style="font-size: 120%">
+ <h2 style="background: grey">hello world</h2>
+ <p style="font-weight: bold">bold as bold can</p></body></html>
+
+Most importantly, note that the inline-styling is just an
+implementation detail of the unicode serialization code.
+You can easily modify the serialization to put your styling into the
+``html.head`` or in a separate file and autogenerate CSS-class
+names or ids.
+
+Hey, you could even write tests that you are using correct
+styles suitable for specific browser requirements. Did i mention
+that the ability to easily write tests for your generated
+html and its serialization could help to develop _stable_ user
+interfaces?
+
+More to come ...
+----------------
+
+For now, i don't think we should strive to offer much more
+than the above. However, it is probably not hard to offer
+*partial serialization* to allow generating maybe hundreds of
+complex html documents per second. Basically we would allow
+putting callables both as Tag content and as values of
+attributes. A slightly more advanced Serialization would then
+produce a list of unicode objects intermingled with callables.
+At HTTP-Request time the callables would get called to
+complete the probably request-specific serialization of
+your Tags. Hum, it's probably harder to explain this than to
+actually code it :-)
+
+.. _`py.test`: test/index.html
diff --git a/py.egg-info/PKG-INFO b/py.egg-info/PKG-INFO
index fadff4c..2bb58ea 100644
--- a/py.egg-info/PKG-INFO
+++ b/py.egg-info/PKG-INFO
@@ -1,46 +1,46 @@
-Metadata-Version: 1.1
-Name: py
-Version: 1.4.32
-Summary: library with cross-python path, ini-parsing, io, code, log facilities
-Home-page: http://pylib.readthedocs.org/
-Author: holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others
-Author-email: pytest-dev@python.org
-License: MIT license
-Description: .. image:: https://img.shields.io/pypi/pyversions/pytest.svg
- :target: https://pypi.org/project/py
- .. image:: https://img.shields.io/travis/pytest-dev/py.svg
- :target: https://travis-ci.org/pytest-dev/py
-
- The py lib is a Python development support library featuring
- the following tools and modules:
-
- * ``py.path``: uniform local and svn path objects
- * ``py.apipkg``: explicit API control and lazy-importing
- * ``py.iniconfig``: easy parsing of .ini files
- * ``py.code``: dynamic code generation and introspection
-
- NOTE: prior to the 1.4 release this distribution used to
- contain py.test which is now its own package, see http://pytest.org
-
- For questions and more information please visit http://pylib.readthedocs.org
-
- Bugs and issues: https://github.com/pytest-dev/py
-
- Authors: Holger Krekel and others, 2004-2016
-
-Platform: unix
-Platform: linux
-Platform: osx
-Platform: cygwin
-Platform: win32
-Classifier: Development Status :: 6 - Mature
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Topic :: Software Development :: Testing
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
+Metadata-Version: 1.1
+Name: py
+Version: 1.4.33
+Summary: library with cross-python path, ini-parsing, io, code, log facilities
+Home-page: http://py.readthedocs.io/
+Author: holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others
+Author-email: pytest-dev@python.org
+License: MIT license
+Description: .. image:: https://img.shields.io/pypi/pyversions/pytest.svg
+ :target: https://pypi.org/project/py
+ .. image:: https://img.shields.io/travis/pytest-dev/py.svg
+ :target: https://travis-ci.org/pytest-dev/py
+
+ The py lib is a Python development support library featuring
+ the following tools and modules:
+
+ * ``py.path``: uniform local and svn path objects
+ * ``py.apipkg``: explicit API control and lazy-importing
+ * ``py.iniconfig``: easy parsing of .ini files
+ * ``py.code``: dynamic code generation and introspection
+
+ NOTE: prior to the 1.4 release this distribution used to
+ contain py.test which is now its own package, see http://pytest.org
+
+ For questions and more information please visit http://py.readthedocs.org
+
+ Bugs and issues: https://github.com/pytest-dev/py
+
+ Authors: Holger Krekel and others, 2004-2017
+
+Platform: unix
+Platform: linux
+Platform: osx
+Platform: cygwin
+Platform: win32
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
diff --git a/py.egg-info/SOURCES.txt b/py.egg-info/SOURCES.txt
index 253a26b..577d87f 100644
--- a/py.egg-info/SOURCES.txt
+++ b/py.egg-info/SOURCES.txt
@@ -83,106 +83,34 @@ py/_process/cmdexec.py
py/_process/forkedfunc.py
py/_process/killproc.py
testing/conftest.py
-testing/conftest.pyc
testing/test_iniconfig.py
-testing/__pycache__/conftest.cpython-27-PYTEST.pyc
-testing/__pycache__/conftest.cpython-35-PYTEST.pyc
-testing/__pycache__/conftest.cpython-35.pyc
-testing/__pycache__/test_iniconfig.cpython-27-PYTEST.pyc
-testing/__pycache__/test_iniconfig.cpython-35-PYTEST.pyc
testing/code/test_assertion.py
testing/code/test_code.py
testing/code/test_excinfo.py
testing/code/test_source.py
-testing/code/__pycache__/test_assertion.cpython-27-PYTEST.pyc
-testing/code/__pycache__/test_assertion.cpython-35-PYTEST.pyc
-testing/code/__pycache__/test_code.cpython-27-PYTEST.pyc
-testing/code/__pycache__/test_code.cpython-35-PYTEST.pyc
-testing/code/__pycache__/test_excinfo.cpython-27-PYTEST.pyc
-testing/code/__pycache__/test_excinfo.cpython-35-PYTEST.pyc
-testing/code/__pycache__/test_source.cpython-27-PYTEST.pyc
-testing/code/__pycache__/test_source.cpython-35-PYTEST.pyc
testing/io_/__init__.py
-testing/io_/__init__.pyc
testing/io_/test_capture.py
testing/io_/test_saferepr.py
testing/io_/test_terminalwriter.py
-testing/io_/__pycache__/__init__.cpython-35.pyc
-testing/io_/__pycache__/test_capture.cpython-27-PYTEST.pyc
-testing/io_/__pycache__/test_capture.cpython-35-PYTEST.pyc
-testing/io_/__pycache__/test_saferepr.cpython-27-PYTEST.pyc
-testing/io_/__pycache__/test_saferepr.cpython-35-PYTEST.pyc
-testing/io_/__pycache__/test_terminalwriter.cpython-27-PYTEST.pyc
-testing/io_/__pycache__/test_terminalwriter.cpython-35-PYTEST.pyc
testing/log/__init__.py
-testing/log/__init__.pyc
testing/log/test_log.py
testing/log/test_warning.py
-testing/log/__pycache__/__init__.cpython-35.pyc
-testing/log/__pycache__/test_log.cpython-27-PYTEST.pyc
-testing/log/__pycache__/test_log.cpython-35-PYTEST.pyc
-testing/log/__pycache__/test_warning.cpython-27-PYTEST.pyc
-testing/log/__pycache__/test_warning.cpython-35-PYTEST.pyc
-testing/path/__init__.pyc
testing/path/common.py
-testing/path/common.pyc
testing/path/conftest.py
-testing/path/conftest.pyc
testing/path/repotest.dump
testing/path/svntestbase.py
-testing/path/svntestbase.pyc
testing/path/test_cacheutil.py
testing/path/test_local.py
testing/path/test_svnauth.py
testing/path/test_svnurl.py
testing/path/test_svnwc.py
-testing/path/__pycache__/__init__.cpython-35.pyc
-testing/path/__pycache__/common.cpython-27-PYTEST.pyc
-testing/path/__pycache__/common.cpython-35-PYTEST.pyc
-testing/path/__pycache__/common.cpython-35.pyc
-testing/path/__pycache__/conftest.cpython-27-PYTEST.pyc
-testing/path/__pycache__/conftest.cpython-35-PYTEST.pyc
-testing/path/__pycache__/conftest.cpython-35.pyc
-testing/path/__pycache__/svntestbase.cpython-27-PYTEST.pyc
-testing/path/__pycache__/svntestbase.cpython-35-PYTEST.pyc
-testing/path/__pycache__/svntestbase.cpython-35.pyc
-testing/path/__pycache__/test_cacheutil.cpython-27-PYTEST.pyc
-testing/path/__pycache__/test_cacheutil.cpython-35-PYTEST.pyc
-testing/path/__pycache__/test_local.cpython-27-PYTEST.pyc
-testing/path/__pycache__/test_local.cpython-35-PYTEST.pyc
-testing/path/__pycache__/test_svnauth.cpython-27-PYTEST.pyc
-testing/path/__pycache__/test_svnauth.cpython-35-PYTEST.pyc
-testing/path/__pycache__/test_svnurl.cpython-27-PYTEST.pyc
-testing/path/__pycache__/test_svnurl.cpython-35-PYTEST.pyc
-testing/path/__pycache__/test_svnwc.cpython-27-PYTEST.pyc
-testing/path/__pycache__/test_svnwc.cpython-35-PYTEST.pyc
testing/process/__init__.py
-testing/process/__init__.pyc
testing/process/test_cmdexec.py
testing/process/test_forkedfunc.py
testing/process/test_killproc.py
-testing/process/__pycache__/__init__.cpython-35.pyc
-testing/process/__pycache__/test_cmdexec.cpython-27-PYTEST.pyc
-testing/process/__pycache__/test_cmdexec.cpython-35-PYTEST.pyc
-testing/process/__pycache__/test_forkedfunc.cpython-27-PYTEST.pyc
-testing/process/__pycache__/test_forkedfunc.cpython-35-PYTEST.pyc
-testing/process/__pycache__/test_killproc.cpython-27-PYTEST.pyc
-testing/process/__pycache__/test_killproc.cpython-35-PYTEST.pyc
testing/root/__init__.py
-testing/root/__init__.pyc
testing/root/test_builtin.py
testing/root/test_error.py
testing/root/test_py_imports.py
testing/root/test_std.py
-testing/root/test_xmlgen.py
-testing/root/__pycache__/__init__.cpython-35.pyc
-testing/root/__pycache__/test_builtin.cpython-27-PYTEST.pyc
-testing/root/__pycache__/test_builtin.cpython-35-PYTEST.pyc
-testing/root/__pycache__/test_error.cpython-27-PYTEST.pyc
-testing/root/__pycache__/test_error.cpython-35-PYTEST.pyc
-testing/root/__pycache__/test_py_imports.cpython-27-PYTEST.pyc
-testing/root/__pycache__/test_py_imports.cpython-35-PYTEST.pyc
-testing/root/__pycache__/test_std.cpython-27-PYTEST.pyc
-testing/root/__pycache__/test_std.cpython-35-PYTEST.pyc
-testing/root/__pycache__/test_xmlgen.cpython-27-PYTEST.pyc
-testing/root/__pycache__/test_xmlgen.cpython-35-PYTEST.pyc \ No newline at end of file
+testing/root/test_xmlgen.py \ No newline at end of file
diff --git a/py.egg-info/not-zip-safe b/py.egg-info/not-zip-safe
index 8b13789..d3f5a12 100644
--- a/py.egg-info/not-zip-safe
+++ b/py.egg-info/not-zip-safe
@@ -1 +1 @@
-
+
diff --git a/py/__init__.py b/py/__init__.py
index c2273a2..a694ffe 100644
--- a/py/__init__.py
+++ b/py/__init__.py
@@ -1,150 +1,152 @@
-"""
-py.test and pylib: rapid testing and development utils
-
-this module uses apipkg.py for lazy-loading sub modules
-and classes. The initpkg-dictionary below specifies
-name->value mappings where value can be another namespace
-dictionary or an import path.
-
-(c) Holger Krekel and others, 2004-2014
-"""
-__version__ = '1.4.32'
-
-from py import _apipkg
-
-# so that py.error.* instances are picklable
-import sys
-sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error')
-
-_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={
- # access to all standard lib modules
- 'std': '._std:std',
- # access to all posix errno's as classes
- 'error': '._error:error',
-
- '_pydir' : '.__metainfo:pydir',
- 'version': 'py:__version__', # backward compatibility
-
- # pytest-2.0 has a flat namespace, we use alias modules
- # to keep old references compatible
- 'test' : 'pytest',
- 'test.collect' : 'pytest',
- 'test.cmdline' : 'pytest',
-
- # hook into the top-level standard library
- 'process' : {
- '__doc__' : '._process:__doc__',
- 'cmdexec' : '._process.cmdexec:cmdexec',
- 'kill' : '._process.killproc:kill',
- 'ForkedFunc' : '._process.forkedfunc:ForkedFunc',
- },
-
- 'apipkg' : {
- 'initpkg' : '._apipkg:initpkg',
- 'ApiModule' : '._apipkg:ApiModule',
- },
-
- 'iniconfig' : {
- 'IniConfig' : '._iniconfig:IniConfig',
- 'ParseError' : '._iniconfig:ParseError',
- },
-
- 'path' : {
- '__doc__' : '._path:__doc__',
- 'svnwc' : '._path.svnwc:SvnWCCommandPath',
- 'svnurl' : '._path.svnurl:SvnCommandPath',
- 'local' : '._path.local:LocalPath',
- 'SvnAuth' : '._path.svnwc:SvnAuth',
- },
-
- # python inspection/code-generation API
- 'code' : {
- '__doc__' : '._code:__doc__',
- 'compile' : '._code.source:compile_',
- 'Source' : '._code.source:Source',
- 'Code' : '._code.code:Code',
- 'Frame' : '._code.code:Frame',
- 'ExceptionInfo' : '._code.code:ExceptionInfo',
- 'Traceback' : '._code.code:Traceback',
- 'getfslineno' : '._code.source:getfslineno',
- 'getrawcode' : '._code.code:getrawcode',
- 'patch_builtins' : '._code.code:patch_builtins',
- 'unpatch_builtins' : '._code.code:unpatch_builtins',
- '_AssertionError' : '._code.assertion:AssertionError',
- '_reinterpret_old' : '._code.assertion:reinterpret_old',
- '_reinterpret' : '._code.assertion:reinterpret',
- '_reprcompare' : '._code.assertion:_reprcompare',
- '_format_explanation' : '._code.assertion:_format_explanation',
- },
-
- # backports and additions of builtins
- 'builtin' : {
- '__doc__' : '._builtin:__doc__',
- 'enumerate' : '._builtin:enumerate',
- 'reversed' : '._builtin:reversed',
- 'sorted' : '._builtin:sorted',
- 'any' : '._builtin:any',
- 'all' : '._builtin:all',
- 'set' : '._builtin:set',
- 'frozenset' : '._builtin:frozenset',
- 'BaseException' : '._builtin:BaseException',
- 'GeneratorExit' : '._builtin:GeneratorExit',
- '_sysex' : '._builtin:_sysex',
- 'print_' : '._builtin:print_',
- '_reraise' : '._builtin:_reraise',
- '_tryimport' : '._builtin:_tryimport',
- 'exec_' : '._builtin:exec_',
- '_basestring' : '._builtin:_basestring',
- '_totext' : '._builtin:_totext',
- '_isbytes' : '._builtin:_isbytes',
- '_istext' : '._builtin:_istext',
- '_getimself' : '._builtin:_getimself',
- '_getfuncdict' : '._builtin:_getfuncdict',
- '_getcode' : '._builtin:_getcode',
- 'builtins' : '._builtin:builtins',
- 'execfile' : '._builtin:execfile',
- 'callable' : '._builtin:callable',
- 'bytes' : '._builtin:bytes',
- 'text' : '._builtin:text',
- },
-
- # input-output helping
- 'io' : {
- '__doc__' : '._io:__doc__',
- 'dupfile' : '._io.capture:dupfile',
- 'TextIO' : '._io.capture:TextIO',
- 'BytesIO' : '._io.capture:BytesIO',
- 'FDCapture' : '._io.capture:FDCapture',
- 'StdCapture' : '._io.capture:StdCapture',
- 'StdCaptureFD' : '._io.capture:StdCaptureFD',
- 'TerminalWriter' : '._io.terminalwriter:TerminalWriter',
- 'ansi_print' : '._io.terminalwriter:ansi_print',
- 'get_terminal_width' : '._io.terminalwriter:get_terminal_width',
- 'saferepr' : '._io.saferepr:saferepr',
- },
-
- # small and mean xml/html generation
- 'xml' : {
- '__doc__' : '._xmlgen:__doc__',
- 'html' : '._xmlgen:html',
- 'Tag' : '._xmlgen:Tag',
- 'raw' : '._xmlgen:raw',
- 'Namespace' : '._xmlgen:Namespace',
- 'escape' : '._xmlgen:escape',
- },
-
- 'log' : {
- # logging API ('producers' and 'consumers' connected via keywords)
- '__doc__' : '._log:__doc__',
- '_apiwarn' : '._log.warning:_apiwarn',
- 'Producer' : '._log.log:Producer',
- 'setconsumer' : '._log.log:setconsumer',
- '_setstate' : '._log.log:setstate',
- '_getstate' : '._log.log:getstate',
- 'Path' : '._log.log:Path',
- 'STDOUT' : '._log.log:STDOUT',
- 'STDERR' : '._log.log:STDERR',
- 'Syslog' : '._log.log:Syslog',
- },
-
-})
+"""
+pylib: rapid testing and development utils
+
+this module uses apipkg.py for lazy-loading sub modules
+and classes. The initpkg-dictionary below specifies
+name->value mappings where value can be another namespace
+dictionary or an import path.
+
+(c) Holger Krekel and others, 2004-2014
+"""
+__version__ = '1.4.33'
+
+from py import _apipkg
+
+# so that py.error.* instances are picklable
+import sys
+sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error')
+import py.error # "Dereference" it now just to be safe (issue110)
+
+
+_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={
+ # access to all standard lib modules
+ 'std': '._std:std',
+ # access to all posix errno's as classes
+ 'error': '._error:error',
+
+ '_pydir' : '.__metainfo:pydir',
+ 'version': 'py:__version__', # backward compatibility
+
+ # pytest-2.0 has a flat namespace, we use alias modules
+ # to keep old references compatible
+ 'test' : 'pytest',
+ 'test.collect' : 'pytest',
+ 'test.cmdline' : 'pytest',
+
+ # hook into the top-level standard library
+ 'process' : {
+ '__doc__' : '._process:__doc__',
+ 'cmdexec' : '._process.cmdexec:cmdexec',
+ 'kill' : '._process.killproc:kill',
+ 'ForkedFunc' : '._process.forkedfunc:ForkedFunc',
+ },
+
+ 'apipkg' : {
+ 'initpkg' : '._apipkg:initpkg',
+ 'ApiModule' : '._apipkg:ApiModule',
+ },
+
+ 'iniconfig' : {
+ 'IniConfig' : '._iniconfig:IniConfig',
+ 'ParseError' : '._iniconfig:ParseError',
+ },
+
+ 'path' : {
+ '__doc__' : '._path:__doc__',
+ 'svnwc' : '._path.svnwc:SvnWCCommandPath',
+ 'svnurl' : '._path.svnurl:SvnCommandPath',
+ 'local' : '._path.local:LocalPath',
+ 'SvnAuth' : '._path.svnwc:SvnAuth',
+ },
+
+ # python inspection/code-generation API
+ 'code' : {
+ '__doc__' : '._code:__doc__',
+ 'compile' : '._code.source:compile_',
+ 'Source' : '._code.source:Source',
+ 'Code' : '._code.code:Code',
+ 'Frame' : '._code.code:Frame',
+ 'ExceptionInfo' : '._code.code:ExceptionInfo',
+ 'Traceback' : '._code.code:Traceback',
+ 'getfslineno' : '._code.source:getfslineno',
+ 'getrawcode' : '._code.code:getrawcode',
+ 'patch_builtins' : '._code.code:patch_builtins',
+ 'unpatch_builtins' : '._code.code:unpatch_builtins',
+ '_AssertionError' : '._code.assertion:AssertionError',
+ '_reinterpret_old' : '._code.assertion:reinterpret_old',
+ '_reinterpret' : '._code.assertion:reinterpret',
+ '_reprcompare' : '._code.assertion:_reprcompare',
+ '_format_explanation' : '._code.assertion:_format_explanation',
+ },
+
+ # backports and additions of builtins
+ 'builtin' : {
+ '__doc__' : '._builtin:__doc__',
+ 'enumerate' : '._builtin:enumerate',
+ 'reversed' : '._builtin:reversed',
+ 'sorted' : '._builtin:sorted',
+ 'any' : '._builtin:any',
+ 'all' : '._builtin:all',
+ 'set' : '._builtin:set',
+ 'frozenset' : '._builtin:frozenset',
+ 'BaseException' : '._builtin:BaseException',
+ 'GeneratorExit' : '._builtin:GeneratorExit',
+ '_sysex' : '._builtin:_sysex',
+ 'print_' : '._builtin:print_',
+ '_reraise' : '._builtin:_reraise',
+ '_tryimport' : '._builtin:_tryimport',
+ 'exec_' : '._builtin:exec_',
+ '_basestring' : '._builtin:_basestring',
+ '_totext' : '._builtin:_totext',
+ '_isbytes' : '._builtin:_isbytes',
+ '_istext' : '._builtin:_istext',
+ '_getimself' : '._builtin:_getimself',
+ '_getfuncdict' : '._builtin:_getfuncdict',
+ '_getcode' : '._builtin:_getcode',
+ 'builtins' : '._builtin:builtins',
+ 'execfile' : '._builtin:execfile',
+ 'callable' : '._builtin:callable',
+ 'bytes' : '._builtin:bytes',
+ 'text' : '._builtin:text',
+ },
+
+ # input-output helping
+ 'io' : {
+ '__doc__' : '._io:__doc__',
+ 'dupfile' : '._io.capture:dupfile',
+ 'TextIO' : '._io.capture:TextIO',
+ 'BytesIO' : '._io.capture:BytesIO',
+ 'FDCapture' : '._io.capture:FDCapture',
+ 'StdCapture' : '._io.capture:StdCapture',
+ 'StdCaptureFD' : '._io.capture:StdCaptureFD',
+ 'TerminalWriter' : '._io.terminalwriter:TerminalWriter',
+ 'ansi_print' : '._io.terminalwriter:ansi_print',
+ 'get_terminal_width' : '._io.terminalwriter:get_terminal_width',
+ 'saferepr' : '._io.saferepr:saferepr',
+ },
+
+ # small and mean xml/html generation
+ 'xml' : {
+ '__doc__' : '._xmlgen:__doc__',
+ 'html' : '._xmlgen:html',
+ 'Tag' : '._xmlgen:Tag',
+ 'raw' : '._xmlgen:raw',
+ 'Namespace' : '._xmlgen:Namespace',
+ 'escape' : '._xmlgen:escape',
+ },
+
+ 'log' : {
+ # logging API ('producers' and 'consumers' connected via keywords)
+ '__doc__' : '._log:__doc__',
+ '_apiwarn' : '._log.warning:_apiwarn',
+ 'Producer' : '._log.log:Producer',
+ 'setconsumer' : '._log.log:setconsumer',
+ '_setstate' : '._log.log:setstate',
+ '_getstate' : '._log.log:getstate',
+ 'Path' : '._log.log:Path',
+ 'STDOUT' : '._log.log:STDOUT',
+ 'STDERR' : '._log.log:STDERR',
+ 'Syslog' : '._log.log:Syslog',
+ },
+
+})
diff --git a/py/__metainfo.py b/py/__metainfo.py
index 12581eb..067806c 100644
--- a/py/__metainfo.py
+++ b/py/__metainfo.py
@@ -1,2 +1,2 @@
-import py
-pydir = py.path.local(py.__file__).dirpath()
+import py
+pydir = py.path.local(py.__file__).dirpath()
diff --git a/py/_apipkg.py b/py/_apipkg.py
index a73b8f6..42bd29b 100644
--- a/py/_apipkg.py
+++ b/py/_apipkg.py
@@ -1,181 +1,181 @@
-"""
-apipkg: control the exported namespace of a python package.
-
-see http://pypi.python.org/pypi/apipkg
-
-(c) holger krekel, 2009 - MIT license
-"""
-import os
-import sys
-from types import ModuleType
-
-__version__ = '1.3.dev'
-
-def _py_abspath(path):
- """
- special version of abspath
- that will leave paths from jython jars alone
- """
- if path.startswith('__pyclasspath__'):
-
- return path
- else:
- return os.path.abspath(path)
-
-def initpkg(pkgname, exportdefs, attr=dict()):
- """ initialize given package from the export definitions. """
- oldmod = sys.modules.get(pkgname)
- d = {}
- f = getattr(oldmod, '__file__', None)
- if f:
- f = _py_abspath(f)
- d['__file__'] = f
- if hasattr(oldmod, '__version__'):
- d['__version__'] = oldmod.__version__
- if hasattr(oldmod, '__loader__'):
- d['__loader__'] = oldmod.__loader__
- if hasattr(oldmod, '__path__'):
- d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
- if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
- d['__doc__'] = oldmod.__doc__
- d.update(attr)
- if hasattr(oldmod, "__dict__"):
- oldmod.__dict__.update(d)
- mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
- sys.modules[pkgname] = mod
-
-def importobj(modpath, attrname):
- module = __import__(modpath, None, None, ['__doc__'])
- if not attrname:
- return module
-
- retval = module
- names = attrname.split(".")
- for x in names:
- retval = getattr(retval, x)
- return retval
-
-class ApiModule(ModuleType):
- def __docget(self):
- try:
- return self.__doc
- except AttributeError:
- if '__doc__' in self.__map__:
- return self.__makeattr('__doc__')
- def __docset(self, value):
- self.__doc = value
- __doc__ = property(__docget, __docset)
-
- def __init__(self, name, importspec, implprefix=None, attr=None):
- self.__name__ = name
- self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
- self.__map__ = {}
- self.__implprefix__ = implprefix or name
- if attr:
- for name, val in attr.items():
- # print "setting", self.__name__, name, val
- setattr(self, name, val)
- for name, importspec in importspec.items():
- if isinstance(importspec, dict):
- subname = '%s.%s' % (self.__name__, name)
- apimod = ApiModule(subname, importspec, implprefix)
- sys.modules[subname] = apimod
- setattr(self, name, apimod)
- else:
- parts = importspec.split(':')
- modpath = parts.pop(0)
- attrname = parts and parts[0] or ""
- if modpath[0] == '.':
- modpath = implprefix + modpath
-
- if not attrname:
- subname = '%s.%s' % (self.__name__, name)
- apimod = AliasModule(subname, modpath)
- sys.modules[subname] = apimod
- if '.' not in name:
- setattr(self, name, apimod)
- else:
- self.__map__[name] = (modpath, attrname)
-
- def __repr__(self):
- l = []
- if hasattr(self, '__version__'):
- l.append("version=" + repr(self.__version__))
- if hasattr(self, '__file__'):
- l.append('from ' + repr(self.__file__))
- if l:
- return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
- return '<ApiModule %r>' % (self.__name__,)
-
- def __makeattr(self, name):
- """lazily compute value for name or raise AttributeError if unknown."""
- # print "makeattr", self.__name__, name
- target = None
- if '__onfirstaccess__' in self.__map__:
- target = self.__map__.pop('__onfirstaccess__')
- importobj(*target)()
- try:
- modpath, attrname = self.__map__[name]
- except KeyError:
- if target is not None and name != '__onfirstaccess__':
- # retry, onfirstaccess might have set attrs
- return getattr(self, name)
- raise AttributeError(name)
- else:
- result = importobj(modpath, attrname)
- setattr(self, name, result)
- try:
- del self.__map__[name]
- except KeyError:
- pass # in a recursive-import situation a double-del can happen
- return result
-
- __getattr__ = __makeattr
-
- def __dict__(self):
- # force all the content of the module to be loaded when __dict__ is read
- dictdescr = ModuleType.__dict__['__dict__']
- dict = dictdescr.__get__(self)
- if dict is not None:
- hasattr(self, 'some')
- for name in self.__all__:
- try:
- self.__makeattr(name)
- except AttributeError:
- pass
- return dict
- __dict__ = property(__dict__)
-
-
-def AliasModule(modname, modpath, attrname=None):
- mod = []
-
- def getmod():
- if not mod:
- x = importobj(modpath, None)
- if attrname is not None:
- x = getattr(x, attrname)
- mod.append(x)
- return mod[0]
-
- class AliasModule(ModuleType):
-
- def __repr__(self):
- x = modpath
- if attrname:
- x += "." + attrname
- return '<AliasModule %r for %r>' % (modname, x)
-
- def __getattribute__(self, name):
- try:
- return getattr(getmod(), name)
- except ImportError:
- return None
-
- def __setattr__(self, name, value):
- setattr(getmod(), name, value)
-
- def __delattr__(self, name):
- delattr(getmod(), name)
-
- return AliasModule(str(modname))
+"""
+apipkg: control the exported namespace of a python package.
+
+see http://pypi.python.org/pypi/apipkg
+
+(c) holger krekel, 2009 - MIT license
+"""
+import os
+import sys
+from types import ModuleType
+
+__version__ = '1.3.dev'
+
+def _py_abspath(path):
+ """
+ special version of abspath
+ that will leave paths from jython jars alone
+ """
+ if path.startswith('__pyclasspath__'):
+
+ return path
+ else:
+ return os.path.abspath(path)
+
+def initpkg(pkgname, exportdefs, attr=dict()):
+ """ initialize given package from the export definitions. """
+ oldmod = sys.modules.get(pkgname)
+ d = {}
+ f = getattr(oldmod, '__file__', None)
+ if f:
+ f = _py_abspath(f)
+ d['__file__'] = f
+ if hasattr(oldmod, '__version__'):
+ d['__version__'] = oldmod.__version__
+ if hasattr(oldmod, '__loader__'):
+ d['__loader__'] = oldmod.__loader__
+ if hasattr(oldmod, '__path__'):
+ d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
+ if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
+ d['__doc__'] = oldmod.__doc__
+ d.update(attr)
+ if hasattr(oldmod, "__dict__"):
+ oldmod.__dict__.update(d)
+ mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
+ sys.modules[pkgname] = mod
+
+def importobj(modpath, attrname):
+ module = __import__(modpath, None, None, ['__doc__'])
+ if not attrname:
+ return module
+
+ retval = module
+ names = attrname.split(".")
+ for x in names:
+ retval = getattr(retval, x)
+ return retval
+
+class ApiModule(ModuleType):
+ def __docget(self):
+ try:
+ return self.__doc
+ except AttributeError:
+ if '__doc__' in self.__map__:
+ return self.__makeattr('__doc__')
+ def __docset(self, value):
+ self.__doc = value
+ __doc__ = property(__docget, __docset)
+
+ def __init__(self, name, importspec, implprefix=None, attr=None):
+ self.__name__ = name
+ self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
+ self.__map__ = {}
+ self.__implprefix__ = implprefix or name
+ if attr:
+ for name, val in attr.items():
+ # print "setting", self.__name__, name, val
+ setattr(self, name, val)
+ for name, importspec in importspec.items():
+ if isinstance(importspec, dict):
+ subname = '%s.%s' % (self.__name__, name)
+ apimod = ApiModule(subname, importspec, implprefix)
+ sys.modules[subname] = apimod
+ setattr(self, name, apimod)
+ else:
+ parts = importspec.split(':')
+ modpath = parts.pop(0)
+ attrname = parts and parts[0] or ""
+ if modpath[0] == '.':
+ modpath = implprefix + modpath
+
+ if not attrname:
+ subname = '%s.%s' % (self.__name__, name)
+ apimod = AliasModule(subname, modpath)
+ sys.modules[subname] = apimod
+ if '.' not in name:
+ setattr(self, name, apimod)
+ else:
+ self.__map__[name] = (modpath, attrname)
+
+ def __repr__(self):
+ l = []
+ if hasattr(self, '__version__'):
+ l.append("version=" + repr(self.__version__))
+ if hasattr(self, '__file__'):
+ l.append('from ' + repr(self.__file__))
+ if l:
+ return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
+ return '<ApiModule %r>' % (self.__name__,)
+
+ def __makeattr(self, name):
+ """lazily compute value for name or raise AttributeError if unknown."""
+ # print "makeattr", self.__name__, name
+ target = None
+ if '__onfirstaccess__' in self.__map__:
+ target = self.__map__.pop('__onfirstaccess__')
+ importobj(*target)()
+ try:
+ modpath, attrname = self.__map__[name]
+ except KeyError:
+ if target is not None and name != '__onfirstaccess__':
+ # retry, onfirstaccess might have set attrs
+ return getattr(self, name)
+ raise AttributeError(name)
+ else:
+ result = importobj(modpath, attrname)
+ setattr(self, name, result)
+ try:
+ del self.__map__[name]
+ except KeyError:
+ pass # in a recursive-import situation a double-del can happen
+ return result
+
+ __getattr__ = __makeattr
+
+ def __dict__(self):
+ # force all the content of the module to be loaded when __dict__ is read
+ dictdescr = ModuleType.__dict__['__dict__']
+ dict = dictdescr.__get__(self)
+ if dict is not None:
+ hasattr(self, 'some')
+ for name in self.__all__:
+ try:
+ self.__makeattr(name)
+ except AttributeError:
+ pass
+ return dict
+ __dict__ = property(__dict__)
+
+
+def AliasModule(modname, modpath, attrname=None):
+ mod = []
+
+ def getmod():
+ if not mod:
+ x = importobj(modpath, None)
+ if attrname is not None:
+ x = getattr(x, attrname)
+ mod.append(x)
+ return mod[0]
+
+ class AliasModule(ModuleType):
+
+ def __repr__(self):
+ x = modpath
+ if attrname:
+ x += "." + attrname
+ return '<AliasModule %r for %r>' % (modname, x)
+
+ def __getattribute__(self, name):
+ try:
+ return getattr(getmod(), name)
+ except ImportError:
+ return None
+
+ def __setattr__(self, name, value):
+ setattr(getmod(), name, value)
+
+ def __delattr__(self, name):
+ delattr(getmod(), name)
+
+ return AliasModule(str(modname))
diff --git a/py/_builtin.py b/py/_builtin.py
index 52ee9d7..6199afa 100644
--- a/py/_builtin.py
+++ b/py/_builtin.py
@@ -1,248 +1,248 @@
-import sys
-
-try:
- reversed = reversed
-except NameError:
- def reversed(sequence):
- """reversed(sequence) -> reverse iterator over values of the sequence
-
- Return a reverse iterator
- """
- if hasattr(sequence, '__reversed__'):
- return sequence.__reversed__()
- if not hasattr(sequence, '__getitem__'):
- raise TypeError("argument to reversed() must be a sequence")
- return reversed_iterator(sequence)
-
- class reversed_iterator(object):
-
- def __init__(self, seq):
- self.seq = seq
- self.remaining = len(seq)
-
- def __iter__(self):
- return self
-
- def next(self):
- i = self.remaining
- if i > 0:
- i -= 1
- item = self.seq[i]
- self.remaining = i
- return item
- raise StopIteration
-
- def __length_hint__(self):
- return self.remaining
-
-try:
- any = any
-except NameError:
- def any(iterable):
- for x in iterable:
- if x:
- return True
- return False
-
-try:
- all = all
-except NameError:
- def all(iterable):
- for x in iterable:
- if not x:
- return False
- return True
-
-try:
- sorted = sorted
-except NameError:
- builtin_cmp = cmp # need to use cmp as keyword arg
-
- def sorted(iterable, cmp=None, key=None, reverse=0):
- use_cmp = None
- if key is not None:
- if cmp is None:
- def use_cmp(x, y):
- return builtin_cmp(x[0], y[0])
- else:
- def use_cmp(x, y):
- return cmp(x[0], y[0])
- l = [(key(element), element) for element in iterable]
- else:
- if cmp is not None:
- use_cmp = cmp
- l = list(iterable)
- if use_cmp is not None:
- l.sort(use_cmp)
- else:
- l.sort()
- if reverse:
- l.reverse()
- if key is not None:
- return [element for (_, element) in l]
- return l
-
-try:
- set, frozenset = set, frozenset
-except NameError:
- from sets import set, frozenset
-
-# pass through
-enumerate = enumerate
-
-try:
- BaseException = BaseException
-except NameError:
- BaseException = Exception
-
-try:
- GeneratorExit = GeneratorExit
-except NameError:
- class GeneratorExit(Exception):
- """ This exception is never raised, it is there to make it possible to
- write code compatible with CPython 2.5 even in lower CPython
- versions."""
- pass
- GeneratorExit.__module__ = 'exceptions'
-
-_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
-
-try:
- callable = callable
-except NameError:
- def callable(obj):
- return hasattr(obj, "__call__")
-
-if sys.version_info >= (3, 0):
- exec ("print_ = print ; exec_=exec")
- import builtins
-
- # some backward compatibility helpers
- _basestring = str
- def _totext(obj, encoding=None, errors=None):
- if isinstance(obj, bytes):
- if errors is None:
- obj = obj.decode(encoding)
- else:
- obj = obj.decode(encoding, errors)
- elif not isinstance(obj, str):
- obj = str(obj)
- return obj
-
- def _isbytes(x):
- return isinstance(x, bytes)
- def _istext(x):
- return isinstance(x, str)
-
- text = str
- bytes = bytes
-
-
- def _getimself(function):
- return getattr(function, '__self__', None)
-
- def _getfuncdict(function):
- return getattr(function, "__dict__", None)
-
- def _getcode(function):
- return getattr(function, "__code__", None)
-
- def execfile(fn, globs=None, locs=None):
- if globs is None:
- back = sys._getframe(1)
- globs = back.f_globals
- locs = back.f_locals
- del back
- elif locs is None:
- locs = globs
- fp = open(fn, "r")
- try:
- source = fp.read()
- finally:
- fp.close()
- co = compile(source, fn, "exec", dont_inherit=True)
- exec_(co, globs, locs)
-
-else:
- import __builtin__ as builtins
- _totext = unicode
- _basestring = basestring
- text = unicode
- bytes = str
- execfile = execfile
- callable = callable
- def _isbytes(x):
- return isinstance(x, str)
- def _istext(x):
- return isinstance(x, unicode)
-
- def _getimself(function):
- return getattr(function, 'im_self', None)
-
- def _getfuncdict(function):
- return getattr(function, "__dict__", None)
-
- def _getcode(function):
- try:
- return getattr(function, "__code__")
- except AttributeError:
- return getattr(function, "func_code", None)
-
- def print_(*args, **kwargs):
- """ minimal backport of py3k print statement. """
- sep = ' '
- if 'sep' in kwargs:
- sep = kwargs.pop('sep')
- end = '\n'
- if 'end' in kwargs:
- end = kwargs.pop('end')
- file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
- if kwargs:
- args = ", ".join([str(x) for x in kwargs])
- raise TypeError("invalid keyword arguments: %s" % args)
- at_start = True
- for x in args:
- if not at_start:
- file.write(sep)
- file.write(str(x))
- at_start = False
- file.write(end)
-
- def exec_(obj, globals=None, locals=None):
- """ minimal backport of py3k exec statement. """
- __tracebackhide__ = True
- if globals is None:
- frame = sys._getframe(1)
- globals = frame.f_globals
- if locals is None:
- locals = frame.f_locals
- elif locals is None:
- locals = globals
- exec2(obj, globals, locals)
-
-if sys.version_info >= (3, 0):
- def _reraise(cls, val, tb):
- __tracebackhide__ = True
- assert hasattr(val, '__traceback__')
- raise cls.with_traceback(val, tb)
-else:
- exec ("""
-def _reraise(cls, val, tb):
- __tracebackhide__ = True
- raise cls, val, tb
-def exec2(obj, globals, locals):
- __tracebackhide__ = True
- exec obj in globals, locals
-""")
-
-def _tryimport(*names):
- """ return the first successfully imported module. """
- assert names
- for name in names:
- try:
- __import__(name)
- except ImportError:
- excinfo = sys.exc_info()
- else:
- return sys.modules[name]
- _reraise(*excinfo)
+import sys
+
+try:
+ reversed = reversed
+except NameError:
+ def reversed(sequence):
+ """reversed(sequence) -> reverse iterator over values of the sequence
+
+ Return a reverse iterator
+ """
+ if hasattr(sequence, '__reversed__'):
+ return sequence.__reversed__()
+ if not hasattr(sequence, '__getitem__'):
+ raise TypeError("argument to reversed() must be a sequence")
+ return reversed_iterator(sequence)
+
+ class reversed_iterator(object):
+
+ def __init__(self, seq):
+ self.seq = seq
+ self.remaining = len(seq)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ i = self.remaining
+ if i > 0:
+ i -= 1
+ item = self.seq[i]
+ self.remaining = i
+ return item
+ raise StopIteration
+
+ def __length_hint__(self):
+ return self.remaining
+
+try:
+ any = any
+except NameError:
+ def any(iterable):
+ for x in iterable:
+ if x:
+ return True
+ return False
+
+try:
+ all = all
+except NameError:
+ def all(iterable):
+ for x in iterable:
+ if not x:
+ return False
+ return True
+
+try:
+ sorted = sorted
+except NameError:
+ builtin_cmp = cmp # need to use cmp as keyword arg
+
+ def sorted(iterable, cmp=None, key=None, reverse=0):
+ use_cmp = None
+ if key is not None:
+ if cmp is None:
+ def use_cmp(x, y):
+ return builtin_cmp(x[0], y[0])
+ else:
+ def use_cmp(x, y):
+ return cmp(x[0], y[0])
+ l = [(key(element), element) for element in iterable]
+ else:
+ if cmp is not None:
+ use_cmp = cmp
+ l = list(iterable)
+ if use_cmp is not None:
+ l.sort(use_cmp)
+ else:
+ l.sort()
+ if reverse:
+ l.reverse()
+ if key is not None:
+ return [element for (_, element) in l]
+ return l
+
+try:
+ set, frozenset = set, frozenset
+except NameError:
+ from sets import set, frozenset
+
+# pass through
+enumerate = enumerate
+
+try:
+ BaseException = BaseException
+except NameError:
+ BaseException = Exception
+
+try:
+ GeneratorExit = GeneratorExit
+except NameError:
+ class GeneratorExit(Exception):
+ """ This exception is never raised, it is there to make it possible to
+ write code compatible with CPython 2.5 even in lower CPython
+ versions."""
+ pass
+ GeneratorExit.__module__ = 'exceptions'
+
+_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return hasattr(obj, "__call__")
+
+if sys.version_info >= (3, 0):
+ exec ("print_ = print ; exec_=exec")
+ import builtins
+
+ # some backward compatibility helpers
+ _basestring = str
+ def _totext(obj, encoding=None, errors=None):
+ if isinstance(obj, bytes):
+ if errors is None:
+ obj = obj.decode(encoding)
+ else:
+ obj = obj.decode(encoding, errors)
+ elif not isinstance(obj, str):
+ obj = str(obj)
+ return obj
+
+ def _isbytes(x):
+ return isinstance(x, bytes)
+ def _istext(x):
+ return isinstance(x, str)
+
+ text = str
+ bytes = bytes
+
+
+ def _getimself(function):
+ return getattr(function, '__self__', None)
+
+ def _getfuncdict(function):
+ return getattr(function, "__dict__", None)
+
+ def _getcode(function):
+ return getattr(function, "__code__", None)
+
+ def execfile(fn, globs=None, locs=None):
+ if globs is None:
+ back = sys._getframe(1)
+ globs = back.f_globals
+ locs = back.f_locals
+ del back
+ elif locs is None:
+ locs = globs
+ fp = open(fn, "r")
+ try:
+ source = fp.read()
+ finally:
+ fp.close()
+ co = compile(source, fn, "exec", dont_inherit=True)
+ exec_(co, globs, locs)
+
+else:
+ import __builtin__ as builtins
+ _totext = unicode
+ _basestring = basestring
+ text = unicode
+ bytes = str
+ execfile = execfile
+ callable = callable
+ def _isbytes(x):
+ return isinstance(x, str)
+ def _istext(x):
+ return isinstance(x, unicode)
+
+ def _getimself(function):
+ return getattr(function, 'im_self', None)
+
+ def _getfuncdict(function):
+ return getattr(function, "__dict__", None)
+
+ def _getcode(function):
+ try:
+ return getattr(function, "__code__")
+ except AttributeError:
+ return getattr(function, "func_code", None)
+
+ def print_(*args, **kwargs):
+ """ minimal backport of py3k print statement. """
+ sep = ' '
+ if 'sep' in kwargs:
+ sep = kwargs.pop('sep')
+ end = '\n'
+ if 'end' in kwargs:
+ end = kwargs.pop('end')
+ file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
+ if kwargs:
+ args = ", ".join([str(x) for x in kwargs])
+ raise TypeError("invalid keyword arguments: %s" % args)
+ at_start = True
+ for x in args:
+ if not at_start:
+ file.write(sep)
+ file.write(str(x))
+ at_start = False
+ file.write(end)
+
+ def exec_(obj, globals=None, locals=None):
+ """ minimal backport of py3k exec statement. """
+ __tracebackhide__ = True
+ if globals is None:
+ frame = sys._getframe(1)
+ globals = frame.f_globals
+ if locals is None:
+ locals = frame.f_locals
+ elif locals is None:
+ locals = globals
+ exec2(obj, globals, locals)
+
+if sys.version_info >= (3, 0):
+ def _reraise(cls, val, tb):
+ __tracebackhide__ = True
+ assert hasattr(val, '__traceback__')
+ raise cls.with_traceback(val, tb)
+else:
+ exec ("""
+def _reraise(cls, val, tb):
+ __tracebackhide__ = True
+ raise cls, val, tb
+def exec2(obj, globals, locals):
+ __tracebackhide__ = True
+ exec obj in globals, locals
+""")
+
+def _tryimport(*names):
+ """ return the first successfully imported module. """
+ assert names
+ for name in names:
+ try:
+ __import__(name)
+ except ImportError:
+ excinfo = sys.exc_info()
+ else:
+ return sys.modules[name]
+ _reraise(*excinfo)
diff --git a/py/_code/__init__.py b/py/_code/__init__.py
index f15acf8..152ff58 100644
--- a/py/_code/__init__.py
+++ b/py/_code/__init__.py
@@ -1 +1 @@
-""" python inspection/code generation API """
+""" python inspection/code generation API """
diff --git a/py/_code/_assertionnew.py b/py/_code/_assertionnew.py
index afb1b31..e9b7050 100644
--- a/py/_code/_assertionnew.py
+++ b/py/_code/_assertionnew.py
@@ -1,339 +1,339 @@
-"""
-Find intermediate evalutation results in assert statements through builtin AST.
-This should replace _assertionold.py eventually.
-"""
-
-import sys
-import ast
-
-import py
-from py._code.assertion import _format_explanation, BuiltinAssertionError
-
-
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
- # See http://bugs.jython.org/issue1497
- _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
- "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
- "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
- "List", "Tuple")
- _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
- "AugAssign", "Print", "For", "While", "If", "With", "Raise",
- "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
- "Exec", "Global", "Expr", "Pass", "Break", "Continue")
- _expr_nodes = set(getattr(ast, name) for name in _exprs)
- _stmt_nodes = set(getattr(ast, name) for name in _stmts)
- def _is_ast_expr(node):
- return node.__class__ in _expr_nodes
- def _is_ast_stmt(node):
- return node.__class__ in _stmt_nodes
-else:
- def _is_ast_expr(node):
- return isinstance(node, ast.expr)
- def _is_ast_stmt(node):
- return isinstance(node, ast.stmt)
-
-
-class Failure(Exception):
- """Error found while interpreting AST."""
-
- def __init__(self, explanation=""):
- self.cause = sys.exc_info()
- self.explanation = explanation
-
-
-def interpret(source, frame, should_fail=False):
- mod = ast.parse(source)
- visitor = DebugInterpreter(frame)
- try:
- visitor.visit(mod)
- except Failure:
- failure = sys.exc_info()[1]
- return getfailure(failure)
- if should_fail:
- return ("(assertion failed, but when it was re-run for "
- "printing intermediate values, it did not fail. Suggestions: "
- "compute assert expression before the assert or use --no-assert)")
-
-def run(offending_line, frame=None):
- if frame is None:
- frame = py.code.Frame(sys._getframe(1))
- return interpret(offending_line, frame)
-
-def getfailure(failure):
- explanation = _format_explanation(failure.explanation)
- value = failure.cause[1]
- if str(value):
- lines = explanation.splitlines()
- if not lines:
- lines.append("")
- lines[0] += " << %s" % (value,)
- explanation = "\n".join(lines)
- text = "%s: %s" % (failure.cause[0].__name__, explanation)
- if text.startswith("AssertionError: assert "):
- text = text[16:]
- return text
-
-
-operator_map = {
- ast.BitOr : "|",
- ast.BitXor : "^",
- ast.BitAnd : "&",
- ast.LShift : "<<",
- ast.RShift : ">>",
- ast.Add : "+",
- ast.Sub : "-",
- ast.Mult : "*",
- ast.Div : "/",
- ast.FloorDiv : "//",
- ast.Mod : "%",
- ast.Eq : "==",
- ast.NotEq : "!=",
- ast.Lt : "<",
- ast.LtE : "<=",
- ast.Gt : ">",
- ast.GtE : ">=",
- ast.Pow : "**",
- ast.Is : "is",
- ast.IsNot : "is not",
- ast.In : "in",
- ast.NotIn : "not in"
-}
-
-unary_map = {
- ast.Not : "not %s",
- ast.Invert : "~%s",
- ast.USub : "-%s",
- ast.UAdd : "+%s"
-}
-
-
-class DebugInterpreter(ast.NodeVisitor):
- """Interpret AST nodes to gleam useful debugging information. """
-
- def __init__(self, frame):
- self.frame = frame
-
- def generic_visit(self, node):
- # Fallback when we don't have a special implementation.
- if _is_ast_expr(node):
- mod = ast.Expression(node)
- co = self._compile(mod)
- try:
- result = self.frame.eval(co)
- except Exception:
- raise Failure()
- explanation = self.frame.repr(result)
- return explanation, result
- elif _is_ast_stmt(node):
- mod = ast.Module([node])
- co = self._compile(mod, "exec")
- try:
- self.frame.exec_(co)
- except Exception:
- raise Failure()
- return None, None
- else:
- raise AssertionError("can't handle %s" %(node,))
-
- def _compile(self, source, mode="eval"):
- return compile(source, "<assertion interpretation>", mode)
-
- def visit_Expr(self, expr):
- return self.visit(expr.value)
-
- def visit_Module(self, mod):
- for stmt in mod.body:
- self.visit(stmt)
-
- def visit_Name(self, name):
- explanation, result = self.generic_visit(name)
- # See if the name is local.
- source = "%r in locals() is not globals()" % (name.id,)
- co = self._compile(source)
- try:
- local = self.frame.eval(co)
- except Exception:
- # have to assume it isn't
- local = False
- if not local:
- return name.id, result
- return explanation, result
-
- def visit_Compare(self, comp):
- left = comp.left
- left_explanation, left_result = self.visit(left)
- for op, next_op in zip(comp.ops, comp.comparators):
- next_explanation, next_result = self.visit(next_op)
- op_symbol = operator_map[op.__class__]
- explanation = "%s %s %s" % (left_explanation, op_symbol,
- next_explanation)
- source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
- co = self._compile(source)
- try:
- result = self.frame.eval(co, __exprinfo_left=left_result,
- __exprinfo_right=next_result)
- except Exception:
- raise Failure(explanation)
- try:
- if not result:
- break
- except KeyboardInterrupt:
- raise
- except:
- break
- left_explanation, left_result = next_explanation, next_result
-
- rcomp = py.code._reprcompare
- if rcomp:
- res = rcomp(op_symbol, left_result, next_result)
- if res:
- explanation = res
- return explanation, result
-
- def visit_BoolOp(self, boolop):
- is_or = isinstance(boolop.op, ast.Or)
- explanations = []
- for operand in boolop.values:
- explanation, result = self.visit(operand)
- explanations.append(explanation)
- if result == is_or:
- break
- name = is_or and " or " or " and "
- explanation = "(" + name.join(explanations) + ")"
- return explanation, result
-
- def visit_UnaryOp(self, unary):
- pattern = unary_map[unary.op.__class__]
- operand_explanation, operand_result = self.visit(unary.operand)
- explanation = pattern % (operand_explanation,)
- co = self._compile(pattern % ("__exprinfo_expr",))
- try:
- result = self.frame.eval(co, __exprinfo_expr=operand_result)
- except Exception:
- raise Failure(explanation)
- return explanation, result
-
- def visit_BinOp(self, binop):
- left_explanation, left_result = self.visit(binop.left)
- right_explanation, right_result = self.visit(binop.right)
- symbol = operator_map[binop.op.__class__]
- explanation = "(%s %s %s)" % (left_explanation, symbol,
- right_explanation)
- source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
- co = self._compile(source)
- try:
- result = self.frame.eval(co, __exprinfo_left=left_result,
- __exprinfo_right=right_result)
- except Exception:
- raise Failure(explanation)
- return explanation, result
-
- def visit_Call(self, call):
- func_explanation, func = self.visit(call.func)
- arg_explanations = []
- ns = {"__exprinfo_func" : func}
- arguments = []
- for arg in call.args:
- arg_explanation, arg_result = self.visit(arg)
- arg_name = "__exprinfo_%s" % (len(ns),)
- ns[arg_name] = arg_result
- arguments.append(arg_name)
- arg_explanations.append(arg_explanation)
- for keyword in call.keywords:
- arg_explanation, arg_result = self.visit(keyword.value)
- arg_name = "__exprinfo_%s" % (len(ns),)
- ns[arg_name] = arg_result
- keyword_source = "%s=%%s" % (keyword.arg)
- arguments.append(keyword_source % (arg_name,))
- arg_explanations.append(keyword_source % (arg_explanation,))
- if call.starargs:
- arg_explanation, arg_result = self.visit(call.starargs)
- arg_name = "__exprinfo_star"
- ns[arg_name] = arg_result
- arguments.append("*%s" % (arg_name,))
- arg_explanations.append("*%s" % (arg_explanation,))
- if call.kwargs:
- arg_explanation, arg_result = self.visit(call.kwargs)
- arg_name = "__exprinfo_kwds"
- ns[arg_name] = arg_result
- arguments.append("**%s" % (arg_name,))
- arg_explanations.append("**%s" % (arg_explanation,))
- args_explained = ", ".join(arg_explanations)
- explanation = "%s(%s)" % (func_explanation, args_explained)
- args = ", ".join(arguments)
- source = "__exprinfo_func(%s)" % (args,)
- co = self._compile(source)
- try:
- result = self.frame.eval(co, **ns)
- except Exception:
- raise Failure(explanation)
- pattern = "%s\n{%s = %s\n}"
- rep = self.frame.repr(result)
- explanation = pattern % (rep, rep, explanation)
- return explanation, result
-
- def _is_builtin_name(self, name):
- pattern = "%r not in globals() and %r not in locals()"
- source = pattern % (name.id, name.id)
- co = self._compile(source)
- try:
- return self.frame.eval(co)
- except Exception:
- return False
-
- def visit_Attribute(self, attr):
- if not isinstance(attr.ctx, ast.Load):
- return self.generic_visit(attr)
- source_explanation, source_result = self.visit(attr.value)
- explanation = "%s.%s" % (source_explanation, attr.attr)
- source = "__exprinfo_expr.%s" % (attr.attr,)
- co = self._compile(source)
- try:
- result = self.frame.eval(co, __exprinfo_expr=source_result)
- except Exception:
- raise Failure(explanation)
- explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
- self.frame.repr(result),
- source_explanation, attr.attr)
- # Check if the attr is from an instance.
- source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
- source = source % (attr.attr,)
- co = self._compile(source)
- try:
- from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
- except Exception:
- from_instance = True
- if from_instance:
- rep = self.frame.repr(result)
- pattern = "%s\n{%s = %s\n}"
- explanation = pattern % (rep, rep, explanation)
- return explanation, result
-
- def visit_Assert(self, assrt):
- test_explanation, test_result = self.visit(assrt.test)
- if test_explanation.startswith("False\n{False =") and \
- test_explanation.endswith("\n"):
- test_explanation = test_explanation[15:-2]
- explanation = "assert %s" % (test_explanation,)
- if not test_result:
- try:
- raise BuiltinAssertionError
- except Exception:
- raise Failure(explanation)
- return explanation, test_result
-
- def visit_Assign(self, assign):
- value_explanation, value_result = self.visit(assign.value)
- explanation = "... = %s" % (value_explanation,)
- name = ast.Name("__exprinfo_expr", ast.Load(),
- lineno=assign.value.lineno,
- col_offset=assign.value.col_offset)
- new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
- col_offset=assign.col_offset)
- mod = ast.Module([new_assign])
- co = self._compile(mod, "exec")
- try:
- self.frame.exec_(co, __exprinfo_expr=value_result)
- except Exception:
- raise Failure(explanation)
- return explanation, value_result
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+This should replace _assertionold.py eventually.
+"""
+
+import sys
+import ast
+
+import py
+from py._code.assertion import _format_explanation, BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+ # See http://bugs.jython.org/issue1497
+ _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+ "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+ "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+ "List", "Tuple")
+ _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+ "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+ "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+ "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+ _expr_nodes = set(getattr(ast, name) for name in _exprs)
+ _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+ def _is_ast_expr(node):
+ return node.__class__ in _expr_nodes
+ def _is_ast_stmt(node):
+ return node.__class__ in _stmt_nodes
+else:
+ def _is_ast_expr(node):
+ return isinstance(node, ast.expr)
+ def _is_ast_stmt(node):
+ return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+ """Error found while interpreting AST."""
+
+ def __init__(self, explanation=""):
+ self.cause = sys.exc_info()
+ self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+ mod = ast.parse(source)
+ visitor = DebugInterpreter(frame)
+ try:
+ visitor.visit(mod)
+ except Failure:
+ failure = sys.exc_info()[1]
+ return getfailure(failure)
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+ if frame is None:
+ frame = py.code.Frame(sys._getframe(1))
+ return interpret(offending_line, frame)
+
+def getfailure(failure):
+ explanation = _format_explanation(failure.explanation)
+ value = failure.cause[1]
+ if str(value):
+ lines = explanation.splitlines()
+ if not lines:
+ lines.append("")
+ lines[0] += " << %s" % (value,)
+ explanation = "\n".join(lines)
+ text = "%s: %s" % (failure.cause[0].__name__, explanation)
+ if text.startswith("AssertionError: assert "):
+ text = text[16:]
+ return text
+
+
+operator_map = {
+ ast.BitOr : "|",
+ ast.BitXor : "^",
+ ast.BitAnd : "&",
+ ast.LShift : "<<",
+ ast.RShift : ">>",
+ ast.Add : "+",
+ ast.Sub : "-",
+ ast.Mult : "*",
+ ast.Div : "/",
+ ast.FloorDiv : "//",
+ ast.Mod : "%",
+ ast.Eq : "==",
+ ast.NotEq : "!=",
+ ast.Lt : "<",
+ ast.LtE : "<=",
+ ast.Gt : ">",
+ ast.GtE : ">=",
+ ast.Pow : "**",
+ ast.Is : "is",
+ ast.IsNot : "is not",
+ ast.In : "in",
+ ast.NotIn : "not in"
+}
+
+unary_map = {
+ ast.Not : "not %s",
+ ast.Invert : "~%s",
+ ast.USub : "-%s",
+ ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+ """Interpret AST nodes to gleam useful debugging information. """
+
+ def __init__(self, frame):
+ self.frame = frame
+
+ def generic_visit(self, node):
+ # Fallback when we don't have a special implementation.
+ if _is_ast_expr(node):
+ mod = ast.Expression(node)
+ co = self._compile(mod)
+ try:
+ result = self.frame.eval(co)
+ except Exception:
+ raise Failure()
+ explanation = self.frame.repr(result)
+ return explanation, result
+ elif _is_ast_stmt(node):
+ mod = ast.Module([node])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co)
+ except Exception:
+ raise Failure()
+ return None, None
+ else:
+ raise AssertionError("can't handle %s" %(node,))
+
+ def _compile(self, source, mode="eval"):
+ return compile(source, "<assertion interpretation>", mode)
+
+ def visit_Expr(self, expr):
+ return self.visit(expr.value)
+
+ def visit_Module(self, mod):
+ for stmt in mod.body:
+ self.visit(stmt)
+
+ def visit_Name(self, name):
+ explanation, result = self.generic_visit(name)
+ # See if the name is local.
+ source = "%r in locals() is not globals()" % (name.id,)
+ co = self._compile(source)
+ try:
+ local = self.frame.eval(co)
+ except Exception:
+ # have to assume it isn't
+ local = False
+ if not local:
+ return name.id, result
+ return explanation, result
+
+ def visit_Compare(self, comp):
+ left = comp.left
+ left_explanation, left_result = self.visit(left)
+ for op, next_op in zip(comp.ops, comp.comparators):
+ next_explanation, next_result = self.visit(next_op)
+ op_symbol = operator_map[op.__class__]
+ explanation = "%s %s %s" % (left_explanation, op_symbol,
+ next_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=next_result)
+ except Exception:
+ raise Failure(explanation)
+ try:
+ if not result:
+ break
+ except KeyboardInterrupt:
+ raise
+ except:
+ break
+ left_explanation, left_result = next_explanation, next_result
+
+ rcomp = py.code._reprcompare
+ if rcomp:
+ res = rcomp(op_symbol, left_result, next_result)
+ if res:
+ explanation = res
+ return explanation, result
+
+ def visit_BoolOp(self, boolop):
+ is_or = isinstance(boolop.op, ast.Or)
+ explanations = []
+ for operand in boolop.values:
+ explanation, result = self.visit(operand)
+ explanations.append(explanation)
+ if result == is_or:
+ break
+ name = is_or and " or " or " and "
+ explanation = "(" + name.join(explanations) + ")"
+ return explanation, result
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_explanation, operand_result = self.visit(unary.operand)
+ explanation = pattern % (operand_explanation,)
+ co = self._compile(pattern % ("__exprinfo_expr",))
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=operand_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_BinOp(self, binop):
+ left_explanation, left_result = self.visit(binop.left)
+ right_explanation, right_result = self.visit(binop.right)
+ symbol = operator_map[binop.op.__class__]
+ explanation = "(%s %s %s)" % (left_explanation, symbol,
+ right_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=right_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_Call(self, call):
+ func_explanation, func = self.visit(call.func)
+ arg_explanations = []
+ ns = {"__exprinfo_func" : func}
+ arguments = []
+ for arg in call.args:
+ arg_explanation, arg_result = self.visit(arg)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ arguments.append(arg_name)
+ arg_explanations.append(arg_explanation)
+ for keyword in call.keywords:
+ arg_explanation, arg_result = self.visit(keyword.value)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ keyword_source = "%s=%%s" % (keyword.arg)
+ arguments.append(keyword_source % (arg_name,))
+ arg_explanations.append(keyword_source % (arg_explanation,))
+ if call.starargs:
+ arg_explanation, arg_result = self.visit(call.starargs)
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+ if call.kwargs:
+ arg_explanation, arg_result = self.visit(call.kwargs)
+ arg_name = "__exprinfo_kwds"
+ ns[arg_name] = arg_result
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+ args_explained = ", ".join(arg_explanations)
+ explanation = "%s(%s)" % (func_explanation, args_explained)
+ args = ", ".join(arguments)
+ source = "__exprinfo_func(%s)" % (args,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, **ns)
+ except Exception:
+ raise Failure(explanation)
+ pattern = "%s\n{%s = %s\n}"
+ rep = self.frame.repr(result)
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def _is_builtin_name(self, name):
+ pattern = "%r not in globals() and %r not in locals()"
+ source = pattern % (name.id, name.id)
+ co = self._compile(source)
+ try:
+ return self.frame.eval(co)
+ except Exception:
+ return False
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ source_explanation, source_result = self.visit(attr.value)
+ explanation = "%s.%s" % (source_explanation, attr.attr)
+ source = "__exprinfo_expr.%s" % (attr.attr,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ raise Failure(explanation)
+ explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+ self.frame.repr(result),
+ source_explanation, attr.attr)
+ # Check if the attr is from an instance.
+ source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+ source = source % (attr.attr,)
+ co = self._compile(source)
+ try:
+ from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ from_instance = True
+ if from_instance:
+ rep = self.frame.repr(result)
+ pattern = "%s\n{%s = %s\n}"
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def visit_Assert(self, assrt):
+ test_explanation, test_result = self.visit(assrt.test)
+ if test_explanation.startswith("False\n{False =") and \
+ test_explanation.endswith("\n"):
+ test_explanation = test_explanation[15:-2]
+ explanation = "assert %s" % (test_explanation,)
+ if not test_result:
+ try:
+ raise BuiltinAssertionError
+ except Exception:
+ raise Failure(explanation)
+ return explanation, test_result
+
+ def visit_Assign(self, assign):
+ value_explanation, value_result = self.visit(assign.value)
+ explanation = "... = %s" % (value_explanation,)
+ name = ast.Name("__exprinfo_expr", ast.Load(),
+ lineno=assign.value.lineno,
+ col_offset=assign.value.col_offset)
+ new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+ col_offset=assign.col_offset)
+ mod = ast.Module([new_assign])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co, __exprinfo_expr=value_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, value_result
diff --git a/py/_code/_assertionold.py b/py/_code/_assertionold.py
index 4e81fb3..98786e4 100644
--- a/py/_code/_assertionold.py
+++ b/py/_code/_assertionold.py
@@ -1,555 +1,555 @@
-import py
-import sys, inspect
-from compiler import parse, ast, pycodegen
-from py._code.assertion import BuiltinAssertionError, _format_explanation
-
-passthroughex = py.builtin._sysex
-
-class Failure:
- def __init__(self, node):
- self.exc, self.value, self.tb = sys.exc_info()
- self.node = node
-
-class View(object):
- """View base class.
-
- If C is a subclass of View, then C(x) creates a proxy object around
- the object x. The actual class of the proxy is not C in general,
- but a *subclass* of C determined by the rules below. To avoid confusion
- we call view class the class of the proxy (a subclass of C, so of View)
- and object class the class of x.
-
- Attributes and methods not found in the proxy are automatically read on x.
- Other operations like setting attributes are performed on the proxy, as
- determined by its view class. The object x is available from the proxy
- as its __obj__ attribute.
-
- The view class selection is determined by the __view__ tuples and the
- optional __viewkey__ method. By default, the selected view class is the
- most specific subclass of C whose __view__ mentions the class of x.
- If no such subclass is found, the search proceeds with the parent
- object classes. For example, C(True) will first look for a subclass
- of C with __view__ = (..., bool, ...) and only if it doesn't find any
- look for one with __view__ = (..., int, ...), and then ..., object,...
- If everything fails the class C itself is considered to be the default.
-
- Alternatively, the view class selection can be driven by another aspect
- of the object x, instead of the class of x, by overriding __viewkey__.
- See last example at the end of this module.
- """
-
- _viewcache = {}
- __view__ = ()
-
- def __new__(rootclass, obj, *args, **kwds):
- self = object.__new__(rootclass)
- self.__obj__ = obj
- self.__rootclass__ = rootclass
- key = self.__viewkey__()
- try:
- self.__class__ = self._viewcache[key]
- except KeyError:
- self.__class__ = self._selectsubclass(key)
- return self
-
- def __getattr__(self, attr):
- # attributes not found in the normal hierarchy rooted on View
- # are looked up in the object's real class
- return getattr(self.__obj__, attr)
-
- def __viewkey__(self):
- return self.__obj__.__class__
-
- def __matchkey__(self, key, subclasses):
- if inspect.isclass(key):
- keys = inspect.getmro(key)
- else:
- keys = [key]
- for key in keys:
- result = [C for C in subclasses if key in C.__view__]
- if result:
- return result
- return []
-
- def _selectsubclass(self, key):
- subclasses = list(enumsubclasses(self.__rootclass__))
- for C in subclasses:
- if not isinstance(C.__view__, tuple):
- C.__view__ = (C.__view__,)
- choices = self.__matchkey__(key, subclasses)
- if not choices:
- return self.__rootclass__
- elif len(choices) == 1:
- return choices[0]
- else:
- # combine the multiple choices
- return type('?', tuple(choices), {})
-
- def __repr__(self):
- return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
-
-
-def enumsubclasses(cls):
- for subcls in cls.__subclasses__():
- for subsubclass in enumsubclasses(subcls):
- yield subsubclass
- yield cls
-
-
-class Interpretable(View):
- """A parse tree node with a few extra methods."""
- explanation = None
-
- def is_builtin(self, frame):
- return False
-
- def eval(self, frame):
- # fall-back for unknown expression nodes
- try:
- expr = ast.Expression(self.__obj__)
- expr.filename = '<eval>'
- self.__obj__.filename = '<eval>'
- co = pycodegen.ExpressionCodeGenerator(expr).getCode()
- result = frame.eval(co)
- except passthroughex:
- raise
- except:
- raise Failure(self)
- self.result = result
- self.explanation = self.explanation or frame.repr(self.result)
-
- def run(self, frame):
- # fall-back for unknown statement nodes
- try:
- expr = ast.Module(None, ast.Stmt([self.__obj__]))
- expr.filename = '<run>'
- co = pycodegen.ModuleCodeGenerator(expr).getCode()
- frame.exec_(co)
- except passthroughex:
- raise
- except:
- raise Failure(self)
-
- def nice_explanation(self):
- return _format_explanation(self.explanation)
-
-
-class Name(Interpretable):
- __view__ = ast.Name
-
- def is_local(self, frame):
- source = '%r in locals() is not globals()' % self.name
- try:
- return frame.is_true(frame.eval(source))
- except passthroughex:
- raise
- except:
- return False
-
- def is_global(self, frame):
- source = '%r in globals()' % self.name
- try:
- return frame.is_true(frame.eval(source))
- except passthroughex:
- raise
- except:
- return False
-
- def is_builtin(self, frame):
- source = '%r not in locals() and %r not in globals()' % (
- self.name, self.name)
- try:
- return frame.is_true(frame.eval(source))
- except passthroughex:
- raise
- except:
- return False
-
- def eval(self, frame):
- super(Name, self).eval(frame)
- if not self.is_local(frame):
- self.explanation = self.name
-
-class Compare(Interpretable):
- __view__ = ast.Compare
-
- def eval(self, frame):
- expr = Interpretable(self.expr)
- expr.eval(frame)
- for operation, expr2 in self.ops:
- if hasattr(self, 'result'):
- # shortcutting in chained expressions
- if not frame.is_true(self.result):
- break
- expr2 = Interpretable(expr2)
- expr2.eval(frame)
- self.explanation = "%s %s %s" % (
- expr.explanation, operation, expr2.explanation)
- source = "__exprinfo_left %s __exprinfo_right" % operation
- try:
- self.result = frame.eval(source,
- __exprinfo_left=expr.result,
- __exprinfo_right=expr2.result)
- except passthroughex:
- raise
- except:
- raise Failure(self)
- expr = expr2
-
-class And(Interpretable):
- __view__ = ast.And
-
- def eval(self, frame):
- explanations = []
- for expr in self.nodes:
- expr = Interpretable(expr)
- expr.eval(frame)
- explanations.append(expr.explanation)
- self.result = expr.result
- if not frame.is_true(expr.result):
- break
- self.explanation = '(' + ' and '.join(explanations) + ')'
-
-class Or(Interpretable):
- __view__ = ast.Or
-
- def eval(self, frame):
- explanations = []
- for expr in self.nodes:
- expr = Interpretable(expr)
- expr.eval(frame)
- explanations.append(expr.explanation)
- self.result = expr.result
- if frame.is_true(expr.result):
- break
- self.explanation = '(' + ' or '.join(explanations) + ')'
-
-
-# == Unary operations ==
-keepalive = []
-for astclass, astpattern in {
- ast.Not : 'not __exprinfo_expr',
- ast.Invert : '(~__exprinfo_expr)',
- }.items():
-
- class UnaryArith(Interpretable):
- __view__ = astclass
-
- def eval(self, frame, astpattern=astpattern):
- expr = Interpretable(self.expr)
- expr.eval(frame)
- self.explanation = astpattern.replace('__exprinfo_expr',
- expr.explanation)
- try:
- self.result = frame.eval(astpattern,
- __exprinfo_expr=expr.result)
- except passthroughex:
- raise
- except:
- raise Failure(self)
-
- keepalive.append(UnaryArith)
-
-# == Binary operations ==
-for astclass, astpattern in {
- ast.Add : '(__exprinfo_left + __exprinfo_right)',
- ast.Sub : '(__exprinfo_left - __exprinfo_right)',
- ast.Mul : '(__exprinfo_left * __exprinfo_right)',
- ast.Div : '(__exprinfo_left / __exprinfo_right)',
- ast.Mod : '(__exprinfo_left % __exprinfo_right)',
- ast.Power : '(__exprinfo_left ** __exprinfo_right)',
- }.items():
-
- class BinaryArith(Interpretable):
- __view__ = astclass
-
- def eval(self, frame, astpattern=astpattern):
- left = Interpretable(self.left)
- left.eval(frame)
- right = Interpretable(self.right)
- right.eval(frame)
- self.explanation = (astpattern
- .replace('__exprinfo_left', left .explanation)
- .replace('__exprinfo_right', right.explanation))
- try:
- self.result = frame.eval(astpattern,
- __exprinfo_left=left.result,
- __exprinfo_right=right.result)
- except passthroughex:
- raise
- except:
- raise Failure(self)
-
- keepalive.append(BinaryArith)
-
-
-class CallFunc(Interpretable):
- __view__ = ast.CallFunc
-
- def is_bool(self, frame):
- source = 'isinstance(__exprinfo_value, bool)'
- try:
- return frame.is_true(frame.eval(source,
- __exprinfo_value=self.result))
- except passthroughex:
- raise
- except:
- return False
-
- def eval(self, frame):
- node = Interpretable(self.node)
- node.eval(frame)
- explanations = []
- vars = {'__exprinfo_fn': node.result}
- source = '__exprinfo_fn('
- for a in self.args:
- if isinstance(a, ast.Keyword):
- keyword = a.name
- a = a.expr
- else:
- keyword = None
- a = Interpretable(a)
- a.eval(frame)
- argname = '__exprinfo_%d' % len(vars)
- vars[argname] = a.result
- if keyword is None:
- source += argname + ','
- explanations.append(a.explanation)
- else:
- source += '%s=%s,' % (keyword, argname)
- explanations.append('%s=%s' % (keyword, a.explanation))
- if self.star_args:
- star_args = Interpretable(self.star_args)
- star_args.eval(frame)
- argname = '__exprinfo_star'
- vars[argname] = star_args.result
- source += '*' + argname + ','
- explanations.append('*' + star_args.explanation)
- if self.dstar_args:
- dstar_args = Interpretable(self.dstar_args)
- dstar_args.eval(frame)
- argname = '__exprinfo_kwds'
- vars[argname] = dstar_args.result
- source += '**' + argname + ','
- explanations.append('**' + dstar_args.explanation)
- self.explanation = "%s(%s)" % (
- node.explanation, ', '.join(explanations))
- if source.endswith(','):
- source = source[:-1]
- source += ')'
- try:
- self.result = frame.eval(source, **vars)
- except passthroughex:
- raise
- except:
- raise Failure(self)
- if not node.is_builtin(frame) or not self.is_bool(frame):
- r = frame.repr(self.result)
- self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
-
-class Getattr(Interpretable):
- __view__ = ast.Getattr
-
- def eval(self, frame):
- expr = Interpretable(self.expr)
- expr.eval(frame)
- source = '__exprinfo_expr.%s' % self.attrname
- try:
- self.result = frame.eval(source, __exprinfo_expr=expr.result)
- except passthroughex:
- raise
- except:
- raise Failure(self)
- self.explanation = '%s.%s' % (expr.explanation, self.attrname)
- # if the attribute comes from the instance, its value is interesting
- source = ('hasattr(__exprinfo_expr, "__dict__") and '
- '%r in __exprinfo_expr.__dict__' % self.attrname)
- try:
- from_instance = frame.is_true(
- frame.eval(source, __exprinfo_expr=expr.result))
- except passthroughex:
- raise
- except:
- from_instance = True
- if from_instance:
- r = frame.repr(self.result)
- self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
-
-# == Re-interpretation of full statements ==
-
-class Assert(Interpretable):
- __view__ = ast.Assert
-
- def run(self, frame):
- test = Interpretable(self.test)
- test.eval(frame)
- # simplify 'assert False where False = ...'
- if (test.explanation.startswith('False\n{False = ') and
- test.explanation.endswith('\n}')):
- test.explanation = test.explanation[15:-2]
- # print the result as 'assert <explanation>'
- self.result = test.result
- self.explanation = 'assert ' + test.explanation
- if not frame.is_true(test.result):
- try:
- raise BuiltinAssertionError
- except passthroughex:
- raise
- except:
- raise Failure(self)
-
-class Assign(Interpretable):
- __view__ = ast.Assign
-
- def run(self, frame):
- expr = Interpretable(self.expr)
- expr.eval(frame)
- self.result = expr.result
- self.explanation = '... = ' + expr.explanation
- # fall-back-run the rest of the assignment
- ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
- mod = ast.Module(None, ast.Stmt([ass]))
- mod.filename = '<run>'
- co = pycodegen.ModuleCodeGenerator(mod).getCode()
- try:
- frame.exec_(co, __exprinfo_expr=expr.result)
- except passthroughex:
- raise
- except:
- raise Failure(self)
-
-class Discard(Interpretable):
- __view__ = ast.Discard
-
- def run(self, frame):
- expr = Interpretable(self.expr)
- expr.eval(frame)
- self.result = expr.result
- self.explanation = expr.explanation
-
-class Stmt(Interpretable):
- __view__ = ast.Stmt
-
- def run(self, frame):
- for stmt in self.nodes:
- stmt = Interpretable(stmt)
- stmt.run(frame)
-
-
-def report_failure(e):
- explanation = e.node.nice_explanation()
- if explanation:
- explanation = ", in: " + explanation
- else:
- explanation = ""
- sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
-
-def check(s, frame=None):
- if frame is None:
- frame = sys._getframe(1)
- frame = py.code.Frame(frame)
- expr = parse(s, 'eval')
- assert isinstance(expr, ast.Expression)
- node = Interpretable(expr.node)
- try:
- node.eval(frame)
- except passthroughex:
- raise
- except Failure:
- e = sys.exc_info()[1]
- report_failure(e)
- else:
- if not frame.is_true(node.result):
- sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
-
-
-###########################################################
-# API / Entry points
-# #########################################################
-
-def interpret(source, frame, should_fail=False):
- module = Interpretable(parse(source, 'exec').node)
- #print "got module", module
- if isinstance(frame, py.std.types.FrameType):
- frame = py.code.Frame(frame)
- try:
- module.run(frame)
- except Failure:
- e = sys.exc_info()[1]
- return getfailure(e)
- except passthroughex:
- raise
- except:
- import traceback
- traceback.print_exc()
- if should_fail:
- return ("(assertion failed, but when it was re-run for "
- "printing intermediate values, it did not fail. Suggestions: "
- "compute assert expression before the assert or use --nomagic)")
- else:
- return None
-
-def getmsg(excinfo):
- if isinstance(excinfo, tuple):
- excinfo = py.code.ExceptionInfo(excinfo)
- #frame, line = gettbline(tb)
- #frame = py.code.Frame(frame)
- #return interpret(line, frame)
-
- tb = excinfo.traceback[-1]
- source = str(tb.statement).strip()
- x = interpret(source, tb.frame, should_fail=True)
- if not isinstance(x, str):
- raise TypeError("interpret returned non-string %r" % (x,))
- return x
-
-def getfailure(e):
- explanation = e.node.nice_explanation()
- if str(e.value):
- lines = explanation.split('\n')
- lines[0] += " << %s" % (e.value,)
- explanation = '\n'.join(lines)
- text = "%s: %s" % (e.exc.__name__, explanation)
- if text.startswith('AssertionError: assert '):
- text = text[16:]
- return text
-
-def run(s, frame=None):
- if frame is None:
- frame = sys._getframe(1)
- frame = py.code.Frame(frame)
- module = Interpretable(parse(s, 'exec').node)
- try:
- module.run(frame)
- except Failure:
- e = sys.exc_info()[1]
- report_failure(e)
-
-
-if __name__ == '__main__':
- # example:
- def f():
- return 5
- def g():
- return 3
- def h(x):
- return 'never'
- check("f() * g() == 5")
- check("not f()")
- check("not (f() and g() or 0)")
- check("f() == g()")
- i = 4
- check("i == f()")
- check("len(f()) == 0")
- check("isinstance(2+3+4, float)")
-
- run("x = i")
- check("x == 5")
-
- run("assert not f(), 'oops'")
- run("a, b, c = 1, 2")
- run("a, b, c = f()")
-
- check("max([f(),g()]) == 4")
- check("'hello'[g()] == 'h'")
- run("'guk%d' % h(f())")
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from py._code.assertion import BuiltinAssertionError, _format_explanation
+
+passthroughex = py.builtin._sysex
+
+class Failure:
+ def __init__(self, node):
+ self.exc, self.value, self.tb = sys.exc_info()
+ self.node = node
+
+class View(object):
+ """View base class.
+
+ If C is a subclass of View, then C(x) creates a proxy object around
+ the object x. The actual class of the proxy is not C in general,
+ but a *subclass* of C determined by the rules below. To avoid confusion
+ we call view class the class of the proxy (a subclass of C, so of View)
+ and object class the class of x.
+
+ Attributes and methods not found in the proxy are automatically read on x.
+ Other operations like setting attributes are performed on the proxy, as
+ determined by its view class. The object x is available from the proxy
+ as its __obj__ attribute.
+
+ The view class selection is determined by the __view__ tuples and the
+ optional __viewkey__ method. By default, the selected view class is the
+ most specific subclass of C whose __view__ mentions the class of x.
+ If no such subclass is found, the search proceeds with the parent
+ object classes. For example, C(True) will first look for a subclass
+ of C with __view__ = (..., bool, ...) and only if it doesn't find any
+ look for one with __view__ = (..., int, ...), and then ..., object,...
+ If everything fails the class C itself is considered to be the default.
+
+ Alternatively, the view class selection can be driven by another aspect
+ of the object x, instead of the class of x, by overriding __viewkey__.
+ See last example at the end of this module.
+ """
+
+ _viewcache = {}
+ __view__ = ()
+
+ def __new__(rootclass, obj, *args, **kwds):
+ self = object.__new__(rootclass)
+ self.__obj__ = obj
+ self.__rootclass__ = rootclass
+ key = self.__viewkey__()
+ try:
+ self.__class__ = self._viewcache[key]
+ except KeyError:
+ self.__class__ = self._selectsubclass(key)
+ return self
+
+ def __getattr__(self, attr):
+ # attributes not found in the normal hierarchy rooted on View
+ # are looked up in the object's real class
+ return getattr(self.__obj__, attr)
+
+ def __viewkey__(self):
+ return self.__obj__.__class__
+
+ def __matchkey__(self, key, subclasses):
+ if inspect.isclass(key):
+ keys = inspect.getmro(key)
+ else:
+ keys = [key]
+ for key in keys:
+ result = [C for C in subclasses if key in C.__view__]
+ if result:
+ return result
+ return []
+
+ def _selectsubclass(self, key):
+ subclasses = list(enumsubclasses(self.__rootclass__))
+ for C in subclasses:
+ if not isinstance(C.__view__, tuple):
+ C.__view__ = (C.__view__,)
+ choices = self.__matchkey__(key, subclasses)
+ if not choices:
+ return self.__rootclass__
+ elif len(choices) == 1:
+ return choices[0]
+ else:
+ # combine the multiple choices
+ return type('?', tuple(choices), {})
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+ for subcls in cls.__subclasses__():
+ for subsubclass in enumsubclasses(subcls):
+ yield subsubclass
+ yield cls
+
+
+class Interpretable(View):
+ """A parse tree node with a few extra methods."""
+ explanation = None
+
+ def is_builtin(self, frame):
+ return False
+
+ def eval(self, frame):
+ # fall-back for unknown expression nodes
+ try:
+ expr = ast.Expression(self.__obj__)
+ expr.filename = '<eval>'
+ self.__obj__.filename = '<eval>'
+ co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+ result = frame.eval(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.result = result
+ self.explanation = self.explanation or frame.repr(self.result)
+
+ def run(self, frame):
+ # fall-back for unknown statement nodes
+ try:
+ expr = ast.Module(None, ast.Stmt([self.__obj__]))
+ expr.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(expr).getCode()
+ frame.exec_(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ def nice_explanation(self):
+ return _format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+ __view__ = ast.Name
+
+ def is_local(self, frame):
+ source = '%r in locals() is not globals()' % self.name
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_global(self, frame):
+ source = '%r in globals()' % self.name
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_builtin(self, frame):
+ source = '%r not in locals() and %r not in globals()' % (
+ self.name, self.name)
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ super(Name, self).eval(frame)
+ if not self.is_local(frame):
+ self.explanation = self.name
+
+class Compare(Interpretable):
+ __view__ = ast.Compare
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ for operation, expr2 in self.ops:
+ if hasattr(self, 'result'):
+ # shortcutting in chained expressions
+ if not frame.is_true(self.result):
+ break
+ expr2 = Interpretable(expr2)
+ expr2.eval(frame)
+ self.explanation = "%s %s %s" % (
+ expr.explanation, operation, expr2.explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % operation
+ try:
+ self.result = frame.eval(source,
+ __exprinfo_left=expr.result,
+ __exprinfo_right=expr2.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ expr = expr2
+
+class And(Interpretable):
+ __view__ = ast.And
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if not frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+ __view__ = ast.Or
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+ ast.Not : 'not __exprinfo_expr',
+ ast.Invert : '(~__exprinfo_expr)',
+ }.items():
+
+ class UnaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.explanation = astpattern.replace('__exprinfo_expr',
+ expr.explanation)
+ try:
+ self.result = frame.eval(astpattern,
+ __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+ ast.Add : '(__exprinfo_left + __exprinfo_right)',
+ ast.Sub : '(__exprinfo_left - __exprinfo_right)',
+ ast.Mul : '(__exprinfo_left * __exprinfo_right)',
+ ast.Div : '(__exprinfo_left / __exprinfo_right)',
+ ast.Mod : '(__exprinfo_left % __exprinfo_right)',
+ ast.Power : '(__exprinfo_left ** __exprinfo_right)',
+ }.items():
+
+ class BinaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern):
+ left = Interpretable(self.left)
+ left.eval(frame)
+ right = Interpretable(self.right)
+ right.eval(frame)
+ self.explanation = (astpattern
+ .replace('__exprinfo_left', left .explanation)
+ .replace('__exprinfo_right', right.explanation))
+ try:
+ self.result = frame.eval(astpattern,
+ __exprinfo_left=left.result,
+ __exprinfo_right=right.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+ __view__ = ast.CallFunc
+
+ def is_bool(self, frame):
+ source = 'isinstance(__exprinfo_value, bool)'
+ try:
+ return frame.is_true(frame.eval(source,
+ __exprinfo_value=self.result))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ node = Interpretable(self.node)
+ node.eval(frame)
+ explanations = []
+ vars = {'__exprinfo_fn': node.result}
+ source = '__exprinfo_fn('
+ for a in self.args:
+ if isinstance(a, ast.Keyword):
+ keyword = a.name
+ a = a.expr
+ else:
+ keyword = None
+ a = Interpretable(a)
+ a.eval(frame)
+ argname = '__exprinfo_%d' % len(vars)
+ vars[argname] = a.result
+ if keyword is None:
+ source += argname + ','
+ explanations.append(a.explanation)
+ else:
+ source += '%s=%s,' % (keyword, argname)
+ explanations.append('%s=%s' % (keyword, a.explanation))
+ if self.star_args:
+ star_args = Interpretable(self.star_args)
+ star_args.eval(frame)
+ argname = '__exprinfo_star'
+ vars[argname] = star_args.result
+ source += '*' + argname + ','
+ explanations.append('*' + star_args.explanation)
+ if self.dstar_args:
+ dstar_args = Interpretable(self.dstar_args)
+ dstar_args.eval(frame)
+ argname = '__exprinfo_kwds'
+ vars[argname] = dstar_args.result
+ source += '**' + argname + ','
+ explanations.append('**' + dstar_args.explanation)
+ self.explanation = "%s(%s)" % (
+ node.explanation, ', '.join(explanations))
+ if source.endswith(','):
+ source = source[:-1]
+ source += ')'
+ try:
+ self.result = frame.eval(source, **vars)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ if not node.is_builtin(frame) or not self.is_bool(frame):
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+ __view__ = ast.Getattr
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ source = '__exprinfo_expr.%s' % self.attrname
+ try:
+ self.result = frame.eval(source, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+ # if the attribute comes from the instance, its value is interesting
+ source = ('hasattr(__exprinfo_expr, "__dict__") and '
+ '%r in __exprinfo_expr.__dict__' % self.attrname)
+ try:
+ from_instance = frame.is_true(
+ frame.eval(source, __exprinfo_expr=expr.result))
+ except passthroughex:
+ raise
+ except:
+ from_instance = True
+ if from_instance:
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+ __view__ = ast.Assert
+
+ def run(self, frame):
+ test = Interpretable(self.test)
+ test.eval(frame)
+ # simplify 'assert False where False = ...'
+ if (test.explanation.startswith('False\n{False = ') and
+ test.explanation.endswith('\n}')):
+ test.explanation = test.explanation[15:-2]
+ # print the result as 'assert <explanation>'
+ self.result = test.result
+ self.explanation = 'assert ' + test.explanation
+ if not frame.is_true(test.result):
+ try:
+ raise BuiltinAssertionError
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Assign(Interpretable):
+ __view__ = ast.Assign
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = '... = ' + expr.explanation
+ # fall-back-run the rest of the assignment
+ ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+ mod = ast.Module(None, ast.Stmt([ass]))
+ mod.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(mod).getCode()
+ try:
+ frame.exec_(co, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Discard(Interpretable):
+ __view__ = ast.Discard
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+ __view__ = ast.Stmt
+
+ def run(self, frame):
+ for stmt in self.nodes:
+ stmt = Interpretable(stmt)
+ stmt.run(frame)
+
+
+def report_failure(e):
+ explanation = e.node.nice_explanation()
+ if explanation:
+ explanation = ", in: " + explanation
+ else:
+ explanation = ""
+ sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ expr = parse(s, 'eval')
+ assert isinstance(expr, ast.Expression)
+ node = Interpretable(expr.node)
+ try:
+ node.eval(frame)
+ except passthroughex:
+ raise
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+ else:
+ if not frame.is_true(node.result):
+ sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+ module = Interpretable(parse(source, 'exec').node)
+ #print "got module", module
+ if isinstance(frame, py.std.types.FrameType):
+ frame = py.code.Frame(frame)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ return getfailure(e)
+ except passthroughex:
+ raise
+ except:
+ import traceback
+ traceback.print_exc()
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --nomagic)")
+ else:
+ return None
+
+def getmsg(excinfo):
+ if isinstance(excinfo, tuple):
+ excinfo = py.code.ExceptionInfo(excinfo)
+ #frame, line = gettbline(tb)
+ #frame = py.code.Frame(frame)
+ #return interpret(line, frame)
+
+ tb = excinfo.traceback[-1]
+ source = str(tb.statement).strip()
+ x = interpret(source, tb.frame, should_fail=True)
+ if not isinstance(x, str):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ return x
+
+def getfailure(e):
+ explanation = e.node.nice_explanation()
+ if str(e.value):
+ lines = explanation.split('\n')
+ lines[0] += " << %s" % (e.value,)
+ explanation = '\n'.join(lines)
+ text = "%s: %s" % (e.exc.__name__, explanation)
+ if text.startswith('AssertionError: assert '):
+ text = text[16:]
+ return text
+
+def run(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ module = Interpretable(parse(s, 'exec').node)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+
+
+if __name__ == '__main__':
+ # example:
+ def f():
+ return 5
+ def g():
+ return 3
+ def h(x):
+ return 'never'
+ check("f() * g() == 5")
+ check("not f()")
+ check("not (f() and g() or 0)")
+ check("f() == g()")
+ i = 4
+ check("i == f()")
+ check("len(f()) == 0")
+ check("isinstance(2+3+4, float)")
+
+ run("x = i")
+ check("x == 5")
+
+ run("assert not f(), 'oops'")
+ run("a, b, c = 1, 2")
+ run("a, b, c = f()")
+
+ check("max([f(),g()]) == 4")
+ check("'hello'[g()] == 'h'")
+ run("'guk%d' % h(f())")
diff --git a/py/_code/_py2traceback.py b/py/_code/_py2traceback.py
index d65e27c..227cb96 100644
--- a/py/_code/_py2traceback.py
+++ b/py/_code/_py2traceback.py
@@ -1,79 +1,79 @@
-# copied from python-2.7.3's traceback.py
-# CHANGES:
-# - some_str is replaced, trying to create unicode strings
-#
-import types
-
-def format_exception_only(etype, value):
- """Format the exception part of a traceback.
-
- The arguments are the exception type and value such as given by
- sys.last_type and sys.last_value. The return value is a list of
- strings, each ending in a newline.
-
- Normally, the list contains a single string; however, for
- SyntaxError exceptions, it contains several lines that (when
- printed) display detailed information about where the syntax
- error occurred.
-
- The message indicating which exception occurred is always the last
- string in the list.
-
- """
-
- # An instance should not have a meaningful value parameter, but
- # sometimes does, particularly for string exceptions, such as
- # >>> raise string1, string2 # deprecated
- #
- # Clear these out first because issubtype(string1, SyntaxError)
- # would throw another exception and mask the original problem.
- if (isinstance(etype, BaseException) or
- isinstance(etype, types.InstanceType) or
- etype is None or type(etype) is str):
- return [_format_final_exc_line(etype, value)]
-
- stype = etype.__name__
-
- if not issubclass(etype, SyntaxError):
- return [_format_final_exc_line(stype, value)]
-
- # It was a syntax error; show exactly where the problem was found.
- lines = []
- try:
- msg, (filename, lineno, offset, badline) = value.args
- except Exception:
- pass
- else:
- filename = filename or "<string>"
- lines.append(' File "%s", line %d\n' % (filename, lineno))
- if badline is not None:
- lines.append(' %s\n' % badline.strip())
- if offset is not None:
- caretspace = badline.rstrip('\n')[:offset].lstrip()
- # non-space whitespace (likes tabs) must be kept for alignment
- caretspace = ((c.isspace() and c or ' ') for c in caretspace)
- # only three spaces to account for offset1 == pos 0
- lines.append(' %s^\n' % ''.join(caretspace))
- value = msg
-
- lines.append(_format_final_exc_line(stype, value))
- return lines
-
-def _format_final_exc_line(etype, value):
- """Return a list of a single line -- normal case for format_exception_only"""
- valuestr = _some_str(value)
- if value is None or not valuestr:
- line = "%s\n" % etype
- else:
- line = "%s: %s\n" % (etype, valuestr)
- return line
-
-def _some_str(value):
- try:
- return unicode(value)
- except Exception:
- try:
- return str(value)
- except Exception:
- pass
- return '<unprintable %s object>' % type(value).__name__
+# copied from python-2.7.3's traceback.py
+# CHANGES:
+# - some_str is replaced, trying to create unicode strings
+#
+import types
+
+def format_exception_only(etype, value):
+ """Format the exception part of a traceback.
+
+ The arguments are the exception type and value such as given by
+ sys.last_type and sys.last_value. The return value is a list of
+ strings, each ending in a newline.
+
+ Normally, the list contains a single string; however, for
+ SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax
+ error occurred.
+
+ The message indicating which exception occurred is always the last
+ string in the list.
+
+ """
+
+ # An instance should not have a meaningful value parameter, but
+ # sometimes does, particularly for string exceptions, such as
+ # >>> raise string1, string2 # deprecated
+ #
+ # Clear these out first because issubtype(string1, SyntaxError)
+ # would throw another exception and mask the original problem.
+ if (isinstance(etype, BaseException) or
+ isinstance(etype, types.InstanceType) or
+ etype is None or type(etype) is str):
+ return [_format_final_exc_line(etype, value)]
+
+ stype = etype.__name__
+
+ if not issubclass(etype, SyntaxError):
+ return [_format_final_exc_line(stype, value)]
+
+ # It was a syntax error; show exactly where the problem was found.
+ lines = []
+ try:
+ msg, (filename, lineno, offset, badline) = value.args
+ except Exception:
+ pass
+ else:
+ filename = filename or "<string>"
+ lines.append(' File "%s", line %d\n' % (filename, lineno))
+ if badline is not None:
+ lines.append(' %s\n' % badline.strip())
+ if offset is not None:
+ caretspace = badline.rstrip('\n')[:offset].lstrip()
+ # non-space whitespace (likes tabs) must be kept for alignment
+ caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+ # only three spaces to account for offset1 == pos 0
+ lines.append(' %s^\n' % ''.join(caretspace))
+ value = msg
+
+ lines.append(_format_final_exc_line(stype, value))
+ return lines
+
+def _format_final_exc_line(etype, value):
+ """Return a list of a single line -- normal case for format_exception_only"""
+ valuestr = _some_str(value)
+ if value is None or not valuestr:
+ line = "%s\n" % etype
+ else:
+ line = "%s: %s\n" % (etype, valuestr)
+ return line
+
+def _some_str(value):
+ try:
+ return unicode(value)
+ except Exception:
+ try:
+ return str(value)
+ except Exception:
+ pass
+ return '<unprintable %s object>' % type(value).__name__
diff --git a/py/_code/assertion.py b/py/_code/assertion.py
index 4ce80c7..3bcb8cd 100644
--- a/py/_code/assertion.py
+++ b/py/_code/assertion.py
@@ -1,94 +1,94 @@
-import sys
-import py
-
-BuiltinAssertionError = py.builtin.builtins.AssertionError
-
-_reprcompare = None # if set, will be called by assert reinterp for comparison ops
-
-def _format_explanation(explanation):
- """This formats an explanation
-
- Normally all embedded newlines are escaped, however there are
- three exceptions: \n{, \n} and \n~. The first two are intended
- cover nested explanations, see function and attribute explanations
- for examples (.visit_Call(), visit_Attribute()). The last one is
- for when one explanation needs to span multiple lines, e.g. when
- displaying diffs.
- """
- raw_lines = (explanation or '').split('\n')
- # escape newlines not followed by {, } and ~
- lines = [raw_lines[0]]
- for l in raw_lines[1:]:
- if l.startswith('{') or l.startswith('}') or l.startswith('~'):
- lines.append(l)
- else:
- lines[-1] += '\\n' + l
-
- result = lines[:1]
- stack = [0]
- stackcnt = [0]
- for line in lines[1:]:
- if line.startswith('{'):
- if stackcnt[-1]:
- s = 'and '
- else:
- s = 'where '
- stack.append(len(result))
- stackcnt[-1] += 1
- stackcnt.append(0)
- result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
- elif line.startswith('}'):
- assert line.startswith('}')
- stack.pop()
- stackcnt.pop()
- result[stack[-1]] += line[1:]
- else:
- assert line.startswith('~')
- result.append(' '*len(stack) + line[1:])
- assert len(stack) == 1
- return '\n'.join(result)
-
-
-class AssertionError(BuiltinAssertionError):
- def __init__(self, *args):
- BuiltinAssertionError.__init__(self, *args)
- if args:
- try:
- self.msg = str(args[0])
- except py.builtin._sysex:
- raise
- except:
- self.msg = "<[broken __repr__] %s at %0xd>" %(
- args[0].__class__, id(args[0]))
- else:
- f = py.code.Frame(sys._getframe(1))
- try:
- source = f.code.fullsource
- if source is not None:
- try:
- source = source.getstatement(f.lineno, assertion=True)
- except IndexError:
- source = None
- else:
- source = str(source.deindent()).strip()
- except py.error.ENOENT:
- source = None
- # this can also occur during reinterpretation, when the
- # co_filename is set to "<run>".
- if source:
- self.msg = reinterpret(source, f, should_fail=True)
- else:
- self.msg = "<could not determine information>"
- if not self.args:
- self.args = (self.msg,)
-
-if sys.version_info > (3, 0):
- AssertionError.__module__ = "builtins"
- reinterpret_old = "old reinterpretation not available for py3"
-else:
- from py._code._assertionold import interpret as reinterpret_old
-if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
- from py._code._assertionnew import interpret as reinterpret
-else:
- reinterpret = reinterpret_old
-
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+_reprcompare = None # if set, will be called by assert reinterp for comparison ops
+
+def _format_explanation(explanation):
+ """This formats an explanation
+
+ Normally all embedded newlines are escaped, however there are
+ three exceptions: \n{, \n} and \n~. The first two are intended
+ cover nested explanations, see function and attribute explanations
+ for examples (.visit_Call(), visit_Attribute()). The last one is
+ for when one explanation needs to span multiple lines, e.g. when
+ displaying diffs.
+ """
+ raw_lines = (explanation or '').split('\n')
+ # escape newlines not followed by {, } and ~
+ lines = [raw_lines[0]]
+ for l in raw_lines[1:]:
+ if l.startswith('{') or l.startswith('}') or l.startswith('~'):
+ lines.append(l)
+ else:
+ lines[-1] += '\\n' + l
+
+ result = lines[:1]
+ stack = [0]
+ stackcnt = [0]
+ for line in lines[1:]:
+ if line.startswith('{'):
+ if stackcnt[-1]:
+ s = 'and '
+ else:
+ s = 'where '
+ stack.append(len(result))
+ stackcnt[-1] += 1
+ stackcnt.append(0)
+ result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ elif line.startswith('}'):
+ assert line.startswith('}')
+ stack.pop()
+ stackcnt.pop()
+ result[stack[-1]] += line[1:]
+ else:
+ assert line.startswith('~')
+ result.append(' '*len(stack) + line[1:])
+ assert len(stack) == 1
+ return '\n'.join(result)
+
+
+class AssertionError(BuiltinAssertionError):
+ def __init__(self, *args):
+ BuiltinAssertionError.__init__(self, *args)
+ if args:
+ try:
+ self.msg = str(args[0])
+ except py.builtin._sysex:
+ raise
+ except:
+ self.msg = "<[broken __repr__] %s at %0xd>" %(
+ args[0].__class__, id(args[0]))
+ else:
+ f = py.code.Frame(sys._getframe(1))
+ try:
+ source = f.code.fullsource
+ if source is not None:
+ try:
+ source = source.getstatement(f.lineno, assertion=True)
+ except IndexError:
+ source = None
+ else:
+ source = str(source.deindent()).strip()
+ except py.error.ENOENT:
+ source = None
+ # this can also occur during reinterpretation, when the
+ # co_filename is set to "<run>".
+ if source:
+ self.msg = reinterpret(source, f, should_fail=True)
+ else:
+ self.msg = "<could not determine information>"
+ if not self.args:
+ self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+ AssertionError.__module__ = "builtins"
+ reinterpret_old = "old reinterpretation not available for py3"
+else:
+ from py._code._assertionold import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+ from py._code._assertionnew import interpret as reinterpret
+else:
+ reinterpret = reinterpret_old
+
diff --git a/py/_code/code.py b/py/_code/code.py
index f14c562..9a64707 100644
--- a/py/_code/code.py
+++ b/py/_code/code.py
@@ -1,787 +1,787 @@
-import py
-import sys
-from inspect import CO_VARARGS, CO_VARKEYWORDS
-
-builtin_repr = repr
-
-reprlib = py.builtin._tryimport('repr', 'reprlib')
-
-if sys.version_info[0] >= 3:
- from traceback import format_exception_only
-else:
- from py._code._py2traceback import format_exception_only
-
-class Code(object):
- """ wrapper around Python code objects """
- def __init__(self, rawcode):
- if not hasattr(rawcode, "co_filename"):
- rawcode = py.code.getrawcode(rawcode)
- try:
- self.filename = rawcode.co_filename
- self.firstlineno = rawcode.co_firstlineno - 1
- self.name = rawcode.co_name
- except AttributeError:
- raise TypeError("not a code object: %r" %(rawcode,))
- self.raw = rawcode
-
- def __eq__(self, other):
- return self.raw == other.raw
-
- def __ne__(self, other):
- return not self == other
-
- @property
- def path(self):
- """ return a path object pointing to source code (note that it
- might not point to an actually existing file). """
- p = py.path.local(self.raw.co_filename)
- # maybe don't try this checking
- if not p.check():
- # XXX maybe try harder like the weird logic
- # in the standard lib [linecache.updatecache] does?
- p = self.raw.co_filename
- return p
-
- @property
- def fullsource(self):
- """ return a py.code.Source object for the full source file of the code
- """
- from py._code import source
- full, _ = source.findsource(self.raw)
- return full
-
- def source(self):
- """ return a py.code.Source object for the code object's source only
- """
- # return source only for that part of code
- return py.code.Source(self.raw)
-
- def getargs(self, var=False):
- """ return a tuple with the argument names for the code object
-
- if 'var' is set True also return the names of the variable and
- keyword arguments when present
- """
- # handfull shortcut for getting args
- raw = self.raw
- argcount = raw.co_argcount
- if var:
- argcount += raw.co_flags & CO_VARARGS
- argcount += raw.co_flags & CO_VARKEYWORDS
- return raw.co_varnames[:argcount]
-
-class Frame(object):
- """Wrapper around a Python frame holding f_locals and f_globals
- in which expressions can be evaluated."""
-
- def __init__(self, frame):
- self.lineno = frame.f_lineno - 1
- self.f_globals = frame.f_globals
- self.f_locals = frame.f_locals
- self.raw = frame
- self.code = py.code.Code(frame.f_code)
-
- @property
- def statement(self):
- """ statement this frame is at """
- if self.code.fullsource is None:
- return py.code.Source("")
- return self.code.fullsource.getstatement(self.lineno)
-
- def eval(self, code, **vars):
- """ evaluate 'code' in the frame
-
- 'vars' are optional additional local variables
-
- returns the result of the evaluation
- """
- f_locals = self.f_locals.copy()
- f_locals.update(vars)
- return eval(code, self.f_globals, f_locals)
-
- def exec_(self, code, **vars):
- """ exec 'code' in the frame
-
- 'vars' are optiona; additional local variables
- """
- f_locals = self.f_locals.copy()
- f_locals.update(vars)
- py.builtin.exec_(code, self.f_globals, f_locals )
-
- def repr(self, object):
- """ return a 'safe' (non-recursive, one-line) string repr for 'object'
- """
- return py.io.saferepr(object)
-
- def is_true(self, object):
- return object
-
- def getargs(self, var=False):
- """ return a list of tuples (name, value) for all arguments
-
- if 'var' is set True also include the variable and keyword
- arguments when present
- """
- retval = []
- for arg in self.code.getargs(var):
- try:
- retval.append((arg, self.f_locals[arg]))
- except KeyError:
- pass # this can occur when using Psyco
- return retval
-
-class TracebackEntry(object):
- """ a single entry in a traceback """
-
- _repr_style = None
- exprinfo = None
-
- def __init__(self, rawentry):
- self._rawentry = rawentry
- self.lineno = rawentry.tb_lineno - 1
-
- def set_repr_style(self, mode):
- assert mode in ("short", "long")
- self._repr_style = mode
-
- @property
- def frame(self):
- return py.code.Frame(self._rawentry.tb_frame)
-
- @property
- def relline(self):
- return self.lineno - self.frame.code.firstlineno
-
- def __repr__(self):
- return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
-
- @property
- def statement(self):
- """ py.code.Source object for the current statement """
- source = self.frame.code.fullsource
- return source.getstatement(self.lineno)
-
- @property
- def path(self):
- """ path to the source code """
- return self.frame.code.path
-
- def getlocals(self):
- return self.frame.f_locals
- locals = property(getlocals, None, None, "locals of underlaying frame")
-
- def reinterpret(self):
- """Reinterpret the failing statement and returns a detailed information
- about what operations are performed."""
- if self.exprinfo is None:
- source = str(self.statement).strip()
- x = py.code._reinterpret(source, self.frame, should_fail=True)
- if not isinstance(x, str):
- raise TypeError("interpret returned non-string %r" % (x,))
- self.exprinfo = x
- return self.exprinfo
-
- def getfirstlinesource(self):
- # on Jython this firstlineno can be -1 apparently
- return max(self.frame.code.firstlineno, 0)
-
- def getsource(self, astcache=None):
- """ return failing source code. """
- # we use the passed in astcache to not reparse asttrees
- # within exception info printing
- from py._code.source import getstatementrange_ast
- source = self.frame.code.fullsource
- if source is None:
- return None
- key = astnode = None
- if astcache is not None:
- key = self.frame.code.path
- if key is not None:
- astnode = astcache.get(key, None)
- start = self.getfirstlinesource()
- try:
- astnode, _, end = getstatementrange_ast(self.lineno, source,
- astnode=astnode)
- except SyntaxError:
- end = self.lineno + 1
- else:
- if key is not None:
- astcache[key] = astnode
- return source[start:end]
-
- source = property(getsource)
-
- def ishidden(self):
- """ return True if the current frame has a var __tracebackhide__
- resolving to True
-
- mostly for internal use
- """
- try:
- return self.frame.f_locals['__tracebackhide__']
- except KeyError:
- try:
- return self.frame.f_globals['__tracebackhide__']
- except KeyError:
- return False
-
- def __str__(self):
- try:
- fn = str(self.path)
- except py.error.Error:
- fn = '???'
- name = self.frame.code.name
- try:
- line = str(self.statement).lstrip()
- except KeyboardInterrupt:
- raise
- except:
- line = "???"
- return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
-
- def name(self):
- return self.frame.code.raw.co_name
- name = property(name, None, None, "co_name of underlaying code")
-
-class Traceback(list):
- """ Traceback objects encapsulate and offer higher level
- access to Traceback entries.
- """
- Entry = TracebackEntry
- def __init__(self, tb):
- """ initialize from given python traceback object. """
- if hasattr(tb, 'tb_next'):
- def f(cur):
- while cur is not None:
- yield self.Entry(cur)
- cur = cur.tb_next
- list.__init__(self, f(tb))
- else:
- list.__init__(self, tb)
-
- def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
- """ return a Traceback instance wrapping part of this Traceback
-
- by provding any combination of path, lineno and firstlineno, the
- first frame to start the to-be-returned traceback is determined
-
- this allows cutting the first part of a Traceback instance e.g.
- for formatting reasons (removing some uninteresting bits that deal
- with handling of the exception/traceback)
- """
- for x in self:
- code = x.frame.code
- codepath = code.path
- if ((path is None or codepath == path) and
- (excludepath is None or not hasattr(codepath, 'relto') or
- not codepath.relto(excludepath)) and
- (lineno is None or x.lineno == lineno) and
- (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
- return Traceback(x._rawentry)
- return self
-
- def __getitem__(self, key):
- val = super(Traceback, self).__getitem__(key)
- if isinstance(key, type(slice(0))):
- val = self.__class__(val)
- return val
-
- def filter(self, fn=lambda x: not x.ishidden()):
- """ return a Traceback instance with certain items removed
-
- fn is a function that gets a single argument, a TracebackItem
- instance, and should return True when the item should be added
- to the Traceback, False when not
-
- by default this removes all the TracebackItems which are hidden
- (see ishidden() above)
- """
- return Traceback(filter(fn, self))
-
- def getcrashentry(self):
- """ return last non-hidden traceback entry that lead
- to the exception of a traceback.
- """
- for i in range(-1, -len(self)-1, -1):
- entry = self[i]
- if not entry.ishidden():
- return entry
- return self[-1]
-
- def recursionindex(self):
- """ return the index of the frame/TracebackItem where recursion
- originates if appropriate, None if no recursion occurred
- """
- cache = {}
- for i, entry in enumerate(self):
- # id for the code.raw is needed to work around
- # the strange metaprogramming in the decorator lib from pypi
- # which generates code objects that have hash/value equality
- #XXX needs a test
- key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
- #print "checking for recursion at", key
- l = cache.setdefault(key, [])
- if l:
- f = entry.frame
- loc = f.f_locals
- for otherloc in l:
- if f.is_true(f.eval(co_equal,
- __recursioncache_locals_1=loc,
- __recursioncache_locals_2=otherloc)):
- return i
- l.append(entry.frame.f_locals)
- return None
-
-co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
- '?', 'eval')
-
-class ExceptionInfo(object):
- """ wraps sys.exc_info() objects and offers
- help for navigating the traceback.
- """
- _striptext = ''
- def __init__(self, tup=None, exprinfo=None):
- if tup is None:
- tup = sys.exc_info()
- if exprinfo is None and isinstance(tup[1], AssertionError):
- exprinfo = getattr(tup[1], 'msg', None)
- if exprinfo is None:
- exprinfo = str(tup[1])
- if exprinfo and exprinfo.startswith('assert '):
- self._striptext = 'AssertionError: '
- self._excinfo = tup
- #: the exception class
- self.type = tup[0]
- #: the exception instance
- self.value = tup[1]
- #: the exception raw traceback
- self.tb = tup[2]
- #: the exception type name
- self.typename = self.type.__name__
- #: the exception traceback (py.code.Traceback instance)
- self.traceback = py.code.Traceback(self.tb)
-
- def __repr__(self):
- return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
-
- def exconly(self, tryshort=False):
- """ return the exception as a string
-
- when 'tryshort' resolves to True, and the exception is a
- py.code._AssertionError, only the actual exception part of
- the exception representation is returned (so 'AssertionError: ' is
- removed from the beginning)
- """
- lines = format_exception_only(self.type, self.value)
- text = ''.join(lines)
- text = text.rstrip()
- if tryshort:
- if text.startswith(self._striptext):
- text = text[len(self._striptext):]
- return text
-
- def errisinstance(self, exc):
- """ return True if the exception is an instance of exc """
- return isinstance(self.value, exc)
-
- def _getreprcrash(self):
- exconly = self.exconly(tryshort=True)
- entry = self.traceback.getcrashentry()
- path, lineno = entry.frame.code.raw.co_filename, entry.lineno
- return ReprFileLocation(path, lineno+1, exconly)
-
- def getrepr(self, showlocals=False, style="long",
- abspath=False, tbfilter=True, funcargs=False):
- """ return str()able representation of this exception info.
- showlocals: show locals per traceback entry
- style: long|short|no|native traceback style
- tbfilter: hide entries (where __tracebackhide__ is true)
-
- in case of style==native, tbfilter and showlocals is ignored.
- """
- if style == 'native':
- return ReprExceptionInfo(ReprTracebackNative(
- py.std.traceback.format_exception(
- self.type,
- self.value,
- self.traceback[0]._rawentry,
- )), self._getreprcrash())
-
- fmt = FormattedExcinfo(showlocals=showlocals, style=style,
- abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
- return fmt.repr_excinfo(self)
-
- def __str__(self):
- entry = self.traceback[-1]
- loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
- return str(loc)
-
- def __unicode__(self):
- entry = self.traceback[-1]
- loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
- return unicode(loc)
-
-
-class FormattedExcinfo(object):
- """ presenting information about failing Functions and Generators. """
- # for traceback entries
- flow_marker = ">"
- fail_marker = "E"
-
- def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
- self.showlocals = showlocals
- self.style = style
- self.tbfilter = tbfilter
- self.funcargs = funcargs
- self.abspath = abspath
- self.astcache = {}
-
- def _getindent(self, source):
- # figure out indent for given source
- try:
- s = str(source.getstatement(len(source)-1))
- except KeyboardInterrupt:
- raise
- except:
- try:
- s = str(source[-1])
- except KeyboardInterrupt:
- raise
- except:
- return 0
- return 4 + (len(s) - len(s.lstrip()))
-
- def _getentrysource(self, entry):
- source = entry.getsource(self.astcache)
- if source is not None:
- source = source.deindent()
- return source
-
- def _saferepr(self, obj):
- return py.io.saferepr(obj)
-
- def repr_args(self, entry):
- if self.funcargs:
- args = []
- for argname, argvalue in entry.frame.getargs(var=True):
- args.append((argname, self._saferepr(argvalue)))
- return ReprFuncArgs(args)
-
- def get_source(self, source, line_index=-1, excinfo=None, short=False):
- """ return formatted and marked up source lines. """
- lines = []
- if source is None or line_index >= len(source.lines):
- source = py.code.Source("???")
- line_index = 0
- if line_index < 0:
- line_index += len(source)
- space_prefix = " "
- if short:
- lines.append(space_prefix + source.lines[line_index].strip())
- else:
- for line in source.lines[:line_index]:
- lines.append(space_prefix + line)
- lines.append(self.flow_marker + " " + source.lines[line_index])
- for line in source.lines[line_index+1:]:
- lines.append(space_prefix + line)
- if excinfo is not None:
- indent = 4 if short else self._getindent(source)
- lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
- return lines
-
- def get_exconly(self, excinfo, indent=4, markall=False):
- lines = []
- indent = " " * indent
- # get the real exception information out
- exlines = excinfo.exconly(tryshort=True).split('\n')
- failindent = self.fail_marker + indent[1:]
- for line in exlines:
- lines.append(failindent + line)
- if not markall:
- failindent = indent
- return lines
-
- def repr_locals(self, locals):
- if self.showlocals:
- lines = []
- keys = [loc for loc in locals if loc[0] != "@"]
- keys.sort()
- for name in keys:
- value = locals[name]
- if name == '__builtins__':
- lines.append("__builtins__ = <builtins>")
- else:
- # This formatting could all be handled by the
- # _repr() function, which is only reprlib.Repr in
- # disguise, so is very configurable.
- str_repr = self._saferepr(value)
- #if len(str_repr) < 70 or not isinstance(value,
- # (list, tuple, dict)):
- lines.append("%-10s = %s" %(name, str_repr))
- #else:
- # self._line("%-10s =\\" % (name,))
- # # XXX
- # py.std.pprint.pprint(value, stream=self.excinfowriter)
- return ReprLocals(lines)
-
- def repr_traceback_entry(self, entry, excinfo=None):
- source = self._getentrysource(entry)
- if source is None:
- source = py.code.Source("???")
- line_index = 0
- else:
- # entry.getfirstlinesource() can be -1, should be 0 on jython
- line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
-
- lines = []
- style = entry._repr_style
- if style is None:
- style = self.style
- if style in ("short", "long"):
- short = style == "short"
- reprargs = self.repr_args(entry) if not short else None
- s = self.get_source(source, line_index, excinfo, short=short)
- lines.extend(s)
- if short:
- message = "in %s" %(entry.name)
- else:
- message = excinfo and excinfo.typename or ""
- path = self._makepath(entry.path)
- filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
- localsrepr = None
- if not short:
- localsrepr = self.repr_locals(entry.locals)
- return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
- if excinfo:
- lines.extend(self.get_exconly(excinfo, indent=4))
- return ReprEntry(lines, None, None, None, style)
-
- def _makepath(self, path):
- if not self.abspath:
- try:
- np = py.path.local().bestrelpath(path)
- except OSError:
- return path
- if len(np) < len(str(path)):
- path = np
- return path
-
- def repr_traceback(self, excinfo):
- traceback = excinfo.traceback
- if self.tbfilter:
- traceback = traceback.filter()
- recursionindex = None
- if excinfo.errisinstance(RuntimeError):
- if "maximum recursion depth exceeded" in str(excinfo.value):
- recursionindex = traceback.recursionindex()
- last = traceback[-1]
- entries = []
- extraline = None
- for index, entry in enumerate(traceback):
- einfo = (last == entry) and excinfo or None
- reprentry = self.repr_traceback_entry(entry, einfo)
- entries.append(reprentry)
- if index == recursionindex:
- extraline = "!!! Recursion detected (same locals & position)"
- break
- return ReprTraceback(entries, extraline, style=self.style)
-
- def repr_excinfo(self, excinfo):
- reprtraceback = self.repr_traceback(excinfo)
- reprcrash = excinfo._getreprcrash()
- return ReprExceptionInfo(reprtraceback, reprcrash)
-
-class TerminalRepr:
- def __str__(self):
- s = self.__unicode__()
- if sys.version_info[0] < 3:
- s = s.encode('utf-8')
- return s
-
- def __unicode__(self):
- # FYI this is called from pytest-xdist's serialization of exception
- # information.
- io = py.io.TextIO()
- tw = py.io.TerminalWriter(file=io)
- self.toterminal(tw)
- return io.getvalue().strip()
-
- def __repr__(self):
- return "<%s instance at %0x>" %(self.__class__, id(self))
-
-
-class ReprExceptionInfo(TerminalRepr):
- def __init__(self, reprtraceback, reprcrash):
- self.reprtraceback = reprtraceback
- self.reprcrash = reprcrash
- self.sections = []
-
- def addsection(self, name, content, sep="-"):
- self.sections.append((name, content, sep))
-
- def toterminal(self, tw):
- self.reprtraceback.toterminal(tw)
- for name, content, sep in self.sections:
- tw.sep(sep, name)
- tw.line(content)
-
-class ReprTraceback(TerminalRepr):
- entrysep = "_ "
-
- def __init__(self, reprentries, extraline, style):
- self.reprentries = reprentries
- self.extraline = extraline
- self.style = style
-
- def toterminal(self, tw):
- # the entries might have different styles
- last_style = None
- for i, entry in enumerate(self.reprentries):
- if entry.style == "long":
- tw.line("")
- entry.toterminal(tw)
- if i < len(self.reprentries) - 1:
- next_entry = self.reprentries[i+1]
- if entry.style == "long" or \
- entry.style == "short" and next_entry.style == "long":
- tw.sep(self.entrysep)
-
- if self.extraline:
- tw.line(self.extraline)
-
-class ReprTracebackNative(ReprTraceback):
- def __init__(self, tblines):
- self.style = "native"
- self.reprentries = [ReprEntryNative(tblines)]
- self.extraline = None
-
-class ReprEntryNative(TerminalRepr):
- style = "native"
-
- def __init__(self, tblines):
- self.lines = tblines
-
- def toterminal(self, tw):
- tw.write("".join(self.lines))
-
-class ReprEntry(TerminalRepr):
- localssep = "_ "
-
- def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
- self.lines = lines
- self.reprfuncargs = reprfuncargs
- self.reprlocals = reprlocals
- self.reprfileloc = filelocrepr
- self.style = style
-
- def toterminal(self, tw):
- if self.style == "short":
- self.reprfileloc.toterminal(tw)
- for line in self.lines:
- red = line.startswith("E ")
- tw.line(line, bold=True, red=red)
- #tw.line("")
- return
- if self.reprfuncargs:
- self.reprfuncargs.toterminal(tw)
- for line in self.lines:
- red = line.startswith("E ")
- tw.line(line, bold=True, red=red)
- if self.reprlocals:
- #tw.sep(self.localssep, "Locals")
- tw.line("")
- self.reprlocals.toterminal(tw)
- if self.reprfileloc:
- if self.lines:
- tw.line("")
- self.reprfileloc.toterminal(tw)
-
- def __str__(self):
- return "%s\n%s\n%s" % ("\n".join(self.lines),
- self.reprlocals,
- self.reprfileloc)
-
-class ReprFileLocation(TerminalRepr):
- def __init__(self, path, lineno, message):
- self.path = str(path)
- self.lineno = lineno
- self.message = message
-
- def toterminal(self, tw):
- # filename and lineno output for each entry,
- # using an output format that most editors unterstand
- msg = self.message
- i = msg.find("\n")
- if i != -1:
- msg = msg[:i]
- tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
-
-class ReprLocals(TerminalRepr):
- def __init__(self, lines):
- self.lines = lines
-
- def toterminal(self, tw):
- for line in self.lines:
- tw.line(line)
-
-class ReprFuncArgs(TerminalRepr):
- def __init__(self, args):
- self.args = args
-
- def toterminal(self, tw):
- if self.args:
- linesofar = ""
- for name, value in self.args:
- ns = "%s = %s" %(name, value)
- if len(ns) + len(linesofar) + 2 > tw.fullwidth:
- if linesofar:
- tw.line(linesofar)
- linesofar = ns
- else:
- if linesofar:
- linesofar += ", " + ns
- else:
- linesofar = ns
- if linesofar:
- tw.line(linesofar)
- tw.line("")
-
-
-
-oldbuiltins = {}
-
-def patch_builtins(assertion=True, compile=True):
- """ put compile and AssertionError builtins to Python's builtins. """
- if assertion:
- from py._code import assertion
- l = oldbuiltins.setdefault('AssertionError', [])
- l.append(py.builtin.builtins.AssertionError)
- py.builtin.builtins.AssertionError = assertion.AssertionError
- if compile:
- l = oldbuiltins.setdefault('compile', [])
- l.append(py.builtin.builtins.compile)
- py.builtin.builtins.compile = py.code.compile
-
-def unpatch_builtins(assertion=True, compile=True):
- """ remove compile and AssertionError builtins from Python builtins. """
- if assertion:
- py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
- if compile:
- py.builtin.builtins.compile = oldbuiltins['compile'].pop()
-
-def getrawcode(obj, trycall=True):
- """ return code object for given function. """
- try:
- return obj.__code__
- except AttributeError:
- obj = getattr(obj, 'im_func', obj)
- obj = getattr(obj, 'func_code', obj)
- obj = getattr(obj, 'f_code', obj)
- obj = getattr(obj, '__code__', obj)
- if trycall and not hasattr(obj, 'co_firstlineno'):
- if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
- x = getrawcode(obj.__call__, trycall=False)
- if hasattr(x, 'co_firstlineno'):
- return x
- return obj
-
+import py
+import sys
+from inspect import CO_VARARGS, CO_VARKEYWORDS
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+if sys.version_info[0] >= 3:
+ from traceback import format_exception_only
+else:
+ from py._code._py2traceback import format_exception_only
+
+class Code(object):
+ """ wrapper around Python code objects """
+ def __init__(self, rawcode):
+ if not hasattr(rawcode, "co_filename"):
+ rawcode = py.code.getrawcode(rawcode)
+ try:
+ self.filename = rawcode.co_filename
+ self.firstlineno = rawcode.co_firstlineno - 1
+ self.name = rawcode.co_name
+ except AttributeError:
+ raise TypeError("not a code object: %r" %(rawcode,))
+ self.raw = rawcode
+
+ def __eq__(self, other):
+ return self.raw == other.raw
+
+ def __ne__(self, other):
+ return not self == other
+
+ @property
+ def path(self):
+ """ return a path object pointing to source code (note that it
+ might not point to an actually existing file). """
+ p = py.path.local(self.raw.co_filename)
+ # maybe don't try this checking
+ if not p.check():
+ # XXX maybe try harder like the weird logic
+ # in the standard lib [linecache.updatecache] does?
+ p = self.raw.co_filename
+ return p
+
+ @property
+ def fullsource(self):
+ """ return a py.code.Source object for the full source file of the code
+ """
+ from py._code import source
+ full, _ = source.findsource(self.raw)
+ return full
+
+ def source(self):
+ """ return a py.code.Source object for the code object's source only
+ """
+ # return source only for that part of code
+ return py.code.Source(self.raw)
+
+ def getargs(self, var=False):
+ """ return a tuple with the argument names for the code object
+
+ if 'var' is set True also return the names of the variable and
+ keyword arguments when present
+ """
+ # handfull shortcut for getting args
+ raw = self.raw
+ argcount = raw.co_argcount
+ if var:
+ argcount += raw.co_flags & CO_VARARGS
+ argcount += raw.co_flags & CO_VARKEYWORDS
+ return raw.co_varnames[:argcount]
+
+class Frame(object):
+ """Wrapper around a Python frame holding f_locals and f_globals
+ in which expressions can be evaluated."""
+
+ def __init__(self, frame):
+ self.lineno = frame.f_lineno - 1
+ self.f_globals = frame.f_globals
+ self.f_locals = frame.f_locals
+ self.raw = frame
+ self.code = py.code.Code(frame.f_code)
+
+ @property
+ def statement(self):
+ """ statement this frame is at """
+ if self.code.fullsource is None:
+ return py.code.Source("")
+ return self.code.fullsource.getstatement(self.lineno)
+
+ def eval(self, code, **vars):
+ """ evaluate 'code' in the frame
+
+ 'vars' are optional additional local variables
+
+ returns the result of the evaluation
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ return eval(code, self.f_globals, f_locals)
+
+ def exec_(self, code, **vars):
+ """ exec 'code' in the frame
+
+ 'vars' are optiona; additional local variables
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ py.builtin.exec_(code, self.f_globals, f_locals )
+
+ def repr(self, object):
+ """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+ """
+ return py.io.saferepr(object)
+
+ def is_true(self, object):
+ return object
+
+ def getargs(self, var=False):
+ """ return a list of tuples (name, value) for all arguments
+
+ if 'var' is set True also include the variable and keyword
+ arguments when present
+ """
+ retval = []
+ for arg in self.code.getargs(var):
+ try:
+ retval.append((arg, self.f_locals[arg]))
+ except KeyError:
+ pass # this can occur when using Psyco
+ return retval
+
+class TracebackEntry(object):
+ """ a single entry in a traceback """
+
+ _repr_style = None
+ exprinfo = None
+
+ def __init__(self, rawentry):
+ self._rawentry = rawentry
+ self.lineno = rawentry.tb_lineno - 1
+
+ def set_repr_style(self, mode):
+ assert mode in ("short", "long")
+ self._repr_style = mode
+
+ @property
+ def frame(self):
+ return py.code.Frame(self._rawentry.tb_frame)
+
+ @property
+ def relline(self):
+ return self.lineno - self.frame.code.firstlineno
+
+ def __repr__(self):
+ return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+ @property
+ def statement(self):
+ """ py.code.Source object for the current statement """
+ source = self.frame.code.fullsource
+ return source.getstatement(self.lineno)
+
+ @property
+ def path(self):
+ """ path to the source code """
+ return self.frame.code.path
+
+ def getlocals(self):
+ return self.frame.f_locals
+ locals = property(getlocals, None, None, "locals of underlaying frame")
+
+ def reinterpret(self):
+ """Reinterpret the failing statement and returns a detailed information
+ about what operations are performed."""
+ if self.exprinfo is None:
+ source = str(self.statement).strip()
+ x = py.code._reinterpret(source, self.frame, should_fail=True)
+ if not isinstance(x, str):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ self.exprinfo = x
+ return self.exprinfo
+
+ def getfirstlinesource(self):
+ # on Jython this firstlineno can be -1 apparently
+ return max(self.frame.code.firstlineno, 0)
+
+ def getsource(self, astcache=None):
+ """ return failing source code. """
+ # we use the passed in astcache to not reparse asttrees
+ # within exception info printing
+ from py._code.source import getstatementrange_ast
+ source = self.frame.code.fullsource
+ if source is None:
+ return None
+ key = astnode = None
+ if astcache is not None:
+ key = self.frame.code.path
+ if key is not None:
+ astnode = astcache.get(key, None)
+ start = self.getfirstlinesource()
+ try:
+ astnode, _, end = getstatementrange_ast(self.lineno, source,
+ astnode=astnode)
+ except SyntaxError:
+ end = self.lineno + 1
+ else:
+ if key is not None:
+ astcache[key] = astnode
+ return source[start:end]
+
+ source = property(getsource)
+
+ def ishidden(self):
+ """ return True if the current frame has a var __tracebackhide__
+ resolving to True
+
+ mostly for internal use
+ """
+ try:
+ return self.frame.f_locals['__tracebackhide__']
+ except KeyError:
+ try:
+ return self.frame.f_globals['__tracebackhide__']
+ except KeyError:
+ return False
+
+ def __str__(self):
+ try:
+ fn = str(self.path)
+ except py.error.Error:
+ fn = '???'
+ name = self.frame.code.name
+ try:
+ line = str(self.statement).lstrip()
+ except KeyboardInterrupt:
+ raise
+ except:
+ line = "???"
+ return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
+
+ def name(self):
+ return self.frame.code.raw.co_name
+ name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+ """ Traceback objects encapsulate and offer higher level
+ access to Traceback entries.
+ """
+ Entry = TracebackEntry
+ def __init__(self, tb):
+ """ initialize from given python traceback object. """
+ if hasattr(tb, 'tb_next'):
+ def f(cur):
+ while cur is not None:
+ yield self.Entry(cur)
+ cur = cur.tb_next
+ list.__init__(self, f(tb))
+ else:
+ list.__init__(self, tb)
+
+ def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+ """ return a Traceback instance wrapping part of this Traceback
+
+ by provding any combination of path, lineno and firstlineno, the
+ first frame to start the to-be-returned traceback is determined
+
+ this allows cutting the first part of a Traceback instance e.g.
+ for formatting reasons (removing some uninteresting bits that deal
+ with handling of the exception/traceback)
+ """
+ for x in self:
+ code = x.frame.code
+ codepath = code.path
+ if ((path is None or codepath == path) and
+ (excludepath is None or not hasattr(codepath, 'relto') or
+ not codepath.relto(excludepath)) and
+ (lineno is None or x.lineno == lineno) and
+ (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+ return Traceback(x._rawentry)
+ return self
+
+ def __getitem__(self, key):
+ val = super(Traceback, self).__getitem__(key)
+ if isinstance(key, type(slice(0))):
+ val = self.__class__(val)
+ return val
+
+ def filter(self, fn=lambda x: not x.ishidden()):
+ """ return a Traceback instance with certain items removed
+
+ fn is a function that gets a single argument, a TracebackItem
+ instance, and should return True when the item should be added
+ to the Traceback, False when not
+
+ by default this removes all the TracebackItems which are hidden
+ (see ishidden() above)
+ """
+ return Traceback(filter(fn, self))
+
+ def getcrashentry(self):
+ """ return last non-hidden traceback entry that lead
+ to the exception of a traceback.
+ """
+ for i in range(-1, -len(self)-1, -1):
+ entry = self[i]
+ if not entry.ishidden():
+ return entry
+ return self[-1]
+
+ def recursionindex(self):
+ """ return the index of the frame/TracebackItem where recursion
+ originates if appropriate, None if no recursion occurred
+ """
+ cache = {}
+ for i, entry in enumerate(self):
+ # id for the code.raw is needed to work around
+ # the strange metaprogramming in the decorator lib from pypi
+ # which generates code objects that have hash/value equality
+ #XXX needs a test
+ key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
+ #print "checking for recursion at", key
+ l = cache.setdefault(key, [])
+ if l:
+ f = entry.frame
+ loc = f.f_locals
+ for otherloc in l:
+ if f.is_true(f.eval(co_equal,
+ __recursioncache_locals_1=loc,
+ __recursioncache_locals_2=otherloc)):
+ return i
+ l.append(entry.frame.f_locals)
+ return None
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+ '?', 'eval')
+
+class ExceptionInfo(object):
+ """ wraps sys.exc_info() objects and offers
+ help for navigating the traceback.
+ """
+ _striptext = ''
+ def __init__(self, tup=None, exprinfo=None):
+ if tup is None:
+ tup = sys.exc_info()
+ if exprinfo is None and isinstance(tup[1], AssertionError):
+ exprinfo = getattr(tup[1], 'msg', None)
+ if exprinfo is None:
+ exprinfo = str(tup[1])
+ if exprinfo and exprinfo.startswith('assert '):
+ self._striptext = 'AssertionError: '
+ self._excinfo = tup
+ #: the exception class
+ self.type = tup[0]
+ #: the exception instance
+ self.value = tup[1]
+ #: the exception raw traceback
+ self.tb = tup[2]
+ #: the exception type name
+ self.typename = self.type.__name__
+ #: the exception traceback (py.code.Traceback instance)
+ self.traceback = py.code.Traceback(self.tb)
+
+ def __repr__(self):
+ return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+ def exconly(self, tryshort=False):
+ """ return the exception as a string
+
+ when 'tryshort' resolves to True, and the exception is a
+ py.code._AssertionError, only the actual exception part of
+ the exception representation is returned (so 'AssertionError: ' is
+ removed from the beginning)
+ """
+ lines = format_exception_only(self.type, self.value)
+ text = ''.join(lines)
+ text = text.rstrip()
+ if tryshort:
+ if text.startswith(self._striptext):
+ text = text[len(self._striptext):]
+ return text
+
+ def errisinstance(self, exc):
+ """ return True if the exception is an instance of exc """
+ return isinstance(self.value, exc)
+
+ def _getreprcrash(self):
+ exconly = self.exconly(tryshort=True)
+ entry = self.traceback.getcrashentry()
+ path, lineno = entry.frame.code.raw.co_filename, entry.lineno
+ return ReprFileLocation(path, lineno+1, exconly)
+
+ def getrepr(self, showlocals=False, style="long",
+ abspath=False, tbfilter=True, funcargs=False):
+ """ return str()able representation of this exception info.
+ showlocals: show locals per traceback entry
+ style: long|short|no|native traceback style
+ tbfilter: hide entries (where __tracebackhide__ is true)
+
+ in case of style==native, tbfilter and showlocals is ignored.
+ """
+ if style == 'native':
+ return ReprExceptionInfo(ReprTracebackNative(
+ py.std.traceback.format_exception(
+ self.type,
+ self.value,
+ self.traceback[0]._rawentry,
+ )), self._getreprcrash())
+
+ fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+ abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+ return fmt.repr_excinfo(self)
+
+ def __str__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return str(loc)
+
+ def __unicode__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return unicode(loc)
+
+
+class FormattedExcinfo(object):
+ """ presenting information about failing Functions and Generators. """
+ # for traceback entries
+ flow_marker = ">"
+ fail_marker = "E"
+
+ def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+ self.showlocals = showlocals
+ self.style = style
+ self.tbfilter = tbfilter
+ self.funcargs = funcargs
+ self.abspath = abspath
+ self.astcache = {}
+
+ def _getindent(self, source):
+ # figure out indent for given source
+ try:
+ s = str(source.getstatement(len(source)-1))
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ s = str(source[-1])
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 0
+ return 4 + (len(s) - len(s.lstrip()))
+
+ def _getentrysource(self, entry):
+ source = entry.getsource(self.astcache)
+ if source is not None:
+ source = source.deindent()
+ return source
+
+ def _saferepr(self, obj):
+ return py.io.saferepr(obj)
+
+ def repr_args(self, entry):
+ if self.funcargs:
+ args = []
+ for argname, argvalue in entry.frame.getargs(var=True):
+ args.append((argname, self._saferepr(argvalue)))
+ return ReprFuncArgs(args)
+
+ def get_source(self, source, line_index=-1, excinfo=None, short=False):
+ """ return formatted and marked up source lines. """
+ lines = []
+ if source is None or line_index >= len(source.lines):
+ source = py.code.Source("???")
+ line_index = 0
+ if line_index < 0:
+ line_index += len(source)
+ space_prefix = " "
+ if short:
+ lines.append(space_prefix + source.lines[line_index].strip())
+ else:
+ for line in source.lines[:line_index]:
+ lines.append(space_prefix + line)
+ lines.append(self.flow_marker + " " + source.lines[line_index])
+ for line in source.lines[line_index+1:]:
+ lines.append(space_prefix + line)
+ if excinfo is not None:
+ indent = 4 if short else self._getindent(source)
+ lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+ return lines
+
+ def get_exconly(self, excinfo, indent=4, markall=False):
+ lines = []
+ indent = " " * indent
+ # get the real exception information out
+ exlines = excinfo.exconly(tryshort=True).split('\n')
+ failindent = self.fail_marker + indent[1:]
+ for line in exlines:
+ lines.append(failindent + line)
+ if not markall:
+ failindent = indent
+ return lines
+
+ def repr_locals(self, locals):
+ if self.showlocals:
+ lines = []
+ keys = [loc for loc in locals if loc[0] != "@"]
+ keys.sort()
+ for name in keys:
+ value = locals[name]
+ if name == '__builtins__':
+ lines.append("__builtins__ = <builtins>")
+ else:
+ # This formatting could all be handled by the
+ # _repr() function, which is only reprlib.Repr in
+ # disguise, so is very configurable.
+ str_repr = self._saferepr(value)
+ #if len(str_repr) < 70 or not isinstance(value,
+ # (list, tuple, dict)):
+ lines.append("%-10s = %s" %(name, str_repr))
+ #else:
+ # self._line("%-10s =\\" % (name,))
+ # # XXX
+ # py.std.pprint.pprint(value, stream=self.excinfowriter)
+ return ReprLocals(lines)
+
+ def repr_traceback_entry(self, entry, excinfo=None):
+ source = self._getentrysource(entry)
+ if source is None:
+ source = py.code.Source("???")
+ line_index = 0
+ else:
+ # entry.getfirstlinesource() can be -1, should be 0 on jython
+ line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+ lines = []
+ style = entry._repr_style
+ if style is None:
+ style = self.style
+ if style in ("short", "long"):
+ short = style == "short"
+ reprargs = self.repr_args(entry) if not short else None
+ s = self.get_source(source, line_index, excinfo, short=short)
+ lines.extend(s)
+ if short:
+ message = "in %s" %(entry.name)
+ else:
+ message = excinfo and excinfo.typename or ""
+ path = self._makepath(entry.path)
+ filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+ localsrepr = None
+ if not short:
+ localsrepr = self.repr_locals(entry.locals)
+ return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
+ if excinfo:
+ lines.extend(self.get_exconly(excinfo, indent=4))
+ return ReprEntry(lines, None, None, None, style)
+
+ def _makepath(self, path):
+ if not self.abspath:
+ try:
+ np = py.path.local().bestrelpath(path)
+ except OSError:
+ return path
+ if len(np) < len(str(path)):
+ path = np
+ return path
+
+ def repr_traceback(self, excinfo):
+ traceback = excinfo.traceback
+ if self.tbfilter:
+ traceback = traceback.filter()
+ recursionindex = None
+ if excinfo.errisinstance(RuntimeError):
+ if "maximum recursion depth exceeded" in str(excinfo.value):
+ recursionindex = traceback.recursionindex()
+ last = traceback[-1]
+ entries = []
+ extraline = None
+ for index, entry in enumerate(traceback):
+ einfo = (last == entry) and excinfo or None
+ reprentry = self.repr_traceback_entry(entry, einfo)
+ entries.append(reprentry)
+ if index == recursionindex:
+ extraline = "!!! Recursion detected (same locals & position)"
+ break
+ return ReprTraceback(entries, extraline, style=self.style)
+
+ def repr_excinfo(self, excinfo):
+ reprtraceback = self.repr_traceback(excinfo)
+ reprcrash = excinfo._getreprcrash()
+ return ReprExceptionInfo(reprtraceback, reprcrash)
+
+class TerminalRepr:
+ def __str__(self):
+ s = self.__unicode__()
+ if sys.version_info[0] < 3:
+ s = s.encode('utf-8')
+ return s
+
+ def __unicode__(self):
+ # FYI this is called from pytest-xdist's serialization of exception
+ # information.
+ io = py.io.TextIO()
+ tw = py.io.TerminalWriter(file=io)
+ self.toterminal(tw)
+ return io.getvalue().strip()
+
+ def __repr__(self):
+ return "<%s instance at %0x>" %(self.__class__, id(self))
+
+
+class ReprExceptionInfo(TerminalRepr):
+ def __init__(self, reprtraceback, reprcrash):
+ self.reprtraceback = reprtraceback
+ self.reprcrash = reprcrash
+ self.sections = []
+
+ def addsection(self, name, content, sep="-"):
+ self.sections.append((name, content, sep))
+
+ def toterminal(self, tw):
+ self.reprtraceback.toterminal(tw)
+ for name, content, sep in self.sections:
+ tw.sep(sep, name)
+ tw.line(content)
+
+class ReprTraceback(TerminalRepr):
+ entrysep = "_ "
+
+ def __init__(self, reprentries, extraline, style):
+ self.reprentries = reprentries
+ self.extraline = extraline
+ self.style = style
+
+ def toterminal(self, tw):
+ # the entries might have different styles
+ last_style = None
+ for i, entry in enumerate(self.reprentries):
+ if entry.style == "long":
+ tw.line("")
+ entry.toterminal(tw)
+ if i < len(self.reprentries) - 1:
+ next_entry = self.reprentries[i+1]
+ if entry.style == "long" or \
+ entry.style == "short" and next_entry.style == "long":
+ tw.sep(self.entrysep)
+
+ if self.extraline:
+ tw.line(self.extraline)
+
+class ReprTracebackNative(ReprTraceback):
+ def __init__(self, tblines):
+ self.style = "native"
+ self.reprentries = [ReprEntryNative(tblines)]
+ self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+ style = "native"
+
+ def __init__(self, tblines):
+ self.lines = tblines
+
+ def toterminal(self, tw):
+ tw.write("".join(self.lines))
+
+class ReprEntry(TerminalRepr):
+ localssep = "_ "
+
+ def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
+ self.lines = lines
+ self.reprfuncargs = reprfuncargs
+ self.reprlocals = reprlocals
+ self.reprfileloc = filelocrepr
+ self.style = style
+
+ def toterminal(self, tw):
+ if self.style == "short":
+ self.reprfileloc.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ #tw.line("")
+ return
+ if self.reprfuncargs:
+ self.reprfuncargs.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ if self.reprlocals:
+ #tw.sep(self.localssep, "Locals")
+ tw.line("")
+ self.reprlocals.toterminal(tw)
+ if self.reprfileloc:
+ if self.lines:
+ tw.line("")
+ self.reprfileloc.toterminal(tw)
+
+ def __str__(self):
+ return "%s\n%s\n%s" % ("\n".join(self.lines),
+ self.reprlocals,
+ self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+ def __init__(self, path, lineno, message):
+ self.path = str(path)
+ self.lineno = lineno
+ self.message = message
+
+ def toterminal(self, tw):
+ # filename and lineno output for each entry,
+ # using an output format that most editors unterstand
+ msg = self.message
+ i = msg.find("\n")
+ if i != -1:
+ msg = msg[:i]
+ tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+ def __init__(self, lines):
+ self.lines = lines
+
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+ def __init__(self, args):
+ self.args = args
+
+ def toterminal(self, tw):
+ if self.args:
+ linesofar = ""
+ for name, value in self.args:
+ ns = "%s = %s" %(name, value)
+ if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+ if linesofar:
+ tw.line(linesofar)
+ linesofar = ns
+ else:
+ if linesofar:
+ linesofar += ", " + ns
+ else:
+ linesofar = ns
+ if linesofar:
+ tw.line(linesofar)
+ tw.line("")
+
+
+
+oldbuiltins = {}
+
+def patch_builtins(assertion=True, compile=True):
+ """ put compile and AssertionError builtins to Python's builtins. """
+ if assertion:
+ from py._code import assertion
+ l = oldbuiltins.setdefault('AssertionError', [])
+ l.append(py.builtin.builtins.AssertionError)
+ py.builtin.builtins.AssertionError = assertion.AssertionError
+ if compile:
+ l = oldbuiltins.setdefault('compile', [])
+ l.append(py.builtin.builtins.compile)
+ py.builtin.builtins.compile = py.code.compile
+
+def unpatch_builtins(assertion=True, compile=True):
+ """ remove compile and AssertionError builtins from Python builtins. """
+ if assertion:
+ py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
+ if compile:
+ py.builtin.builtins.compile = oldbuiltins['compile'].pop()
+
+def getrawcode(obj, trycall=True):
+ """ return code object for given function. """
+ try:
+ return obj.__code__
+ except AttributeError:
+ obj = getattr(obj, 'im_func', obj)
+ obj = getattr(obj, 'func_code', obj)
+ obj = getattr(obj, 'f_code', obj)
+ obj = getattr(obj, '__code__', obj)
+ if trycall and not hasattr(obj, 'co_firstlineno'):
+ if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+ x = getrawcode(obj.__call__, trycall=False)
+ if hasattr(x, 'co_firstlineno'):
+ return x
+ return obj
+
diff --git a/py/_code/source.py b/py/_code/source.py
index c8b668b..59281b5 100644
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -1,411 +1,411 @@
-from __future__ import generators
-
-from bisect import bisect_right
-import sys
-import inspect, tokenize
-import py
-from types import ModuleType
-cpy_compile = compile
-
-try:
- import _ast
- from _ast import PyCF_ONLY_AST as _AST_FLAG
-except ImportError:
- _AST_FLAG = 0
- _ast = None
-
-
-class Source(object):
- """ a immutable object holding a source code fragment,
- possibly deindenting it.
- """
- _compilecounter = 0
- def __init__(self, *parts, **kwargs):
- self.lines = lines = []
- de = kwargs.get('deindent', True)
- rstrip = kwargs.get('rstrip', True)
- for part in parts:
- if not part:
- partlines = []
- if isinstance(part, Source):
- partlines = part.lines
- elif isinstance(part, (tuple, list)):
- partlines = [x.rstrip("\n") for x in part]
- elif isinstance(part, py.builtin._basestring):
- partlines = part.split('\n')
- if rstrip:
- while partlines:
- if partlines[-1].strip():
- break
- partlines.pop()
- else:
- partlines = getsource(part, deindent=de).lines
- if de:
- partlines = deindent(partlines)
- lines.extend(partlines)
-
- def __eq__(self, other):
- try:
- return self.lines == other.lines
- except AttributeError:
- if isinstance(other, str):
- return str(self) == other
- return False
-
- def __getitem__(self, key):
- if isinstance(key, int):
- return self.lines[key]
- else:
- if key.step not in (None, 1):
- raise IndexError("cannot slice a Source with a step")
- return self.__getslice__(key.start, key.stop)
-
- def __len__(self):
- return len(self.lines)
-
- def __getslice__(self, start, end):
- newsource = Source()
- newsource.lines = self.lines[start:end]
- return newsource
-
- def strip(self):
- """ return new source object with trailing
- and leading blank lines removed.
- """
- start, end = 0, len(self)
- while start < end and not self.lines[start].strip():
- start += 1
- while end > start and not self.lines[end-1].strip():
- end -= 1
- source = Source()
- source.lines[:] = self.lines[start:end]
- return source
-
- def putaround(self, before='', after='', indent=' ' * 4):
- """ return a copy of the source object with
- 'before' and 'after' wrapped around it.
- """
- before = Source(before)
- after = Source(after)
- newsource = Source()
- lines = [ (indent + line) for line in self.lines]
- newsource.lines = before.lines + lines + after.lines
- return newsource
-
- def indent(self, indent=' ' * 4):
- """ return a copy of the source object with
- all lines indented by the given indent-string.
- """
- newsource = Source()
- newsource.lines = [(indent+line) for line in self.lines]
- return newsource
-
- def getstatement(self, lineno, assertion=False):
- """ return Source statement which contains the
- given linenumber (counted from 0).
- """
- start, end = self.getstatementrange(lineno, assertion)
- return self[start:end]
-
- def getstatementrange(self, lineno, assertion=False):
- """ return (start, end) tuple which spans the minimal
- statement region which containing the given lineno.
- """
- if not (0 <= lineno < len(self)):
- raise IndexError("lineno out of range")
- ast, start, end = getstatementrange_ast(lineno, self)
- return start, end
-
- def deindent(self, offset=None):
- """ return a new source object deindented by offset.
- If offset is None then guess an indentation offset from
- the first non-blank line. Subsequent lines which have a
- lower indentation offset will be copied verbatim as
- they are assumed to be part of multilines.
- """
- # XXX maybe use the tokenizer to properly handle multiline
- # strings etc.pp?
- newsource = Source()
- newsource.lines[:] = deindent(self.lines, offset)
- return newsource
-
- def isparseable(self, deindent=True):
- """ return True if source is parseable, heuristically
- deindenting it by default.
- """
- try:
- import parser
- except ImportError:
- syntax_checker = lambda x: compile(x, 'asd', 'exec')
- else:
- syntax_checker = parser.suite
-
- if deindent:
- source = str(self.deindent())
- else:
- source = str(self)
- try:
- #compile(source+'\n', "x", "exec")
- syntax_checker(source+'\n')
- except KeyboardInterrupt:
- raise
- except Exception:
- return False
- else:
- return True
-
- def __str__(self):
- return "\n".join(self.lines)
-
- def compile(self, filename=None, mode='exec',
- flag=generators.compiler_flag,
- dont_inherit=0, _genframe=None):
- """ return compiled code object. if filename is None
- invent an artificial filename which displays
- the source/line position of the caller frame.
- """
- if not filename or py.path.local(filename).check(file=0):
- if _genframe is None:
- _genframe = sys._getframe(1) # the caller
- fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
- base = "<%d-codegen " % self._compilecounter
- self.__class__._compilecounter += 1
- if not filename:
- filename = base + '%s:%d>' % (fn, lineno)
- else:
- filename = base + '%r %s:%d>' % (filename, fn, lineno)
- source = "\n".join(self.lines) + '\n'
- try:
- co = cpy_compile(source, filename, mode, flag)
- except SyntaxError:
- ex = sys.exc_info()[1]
- # re-represent syntax errors from parsing python strings
- msglines = self.lines[:ex.lineno]
- if ex.offset:
- msglines.append(" "*ex.offset + '^')
- msglines.append("(code was compiled probably from here: %s)" % filename)
- newex = SyntaxError('\n'.join(msglines))
- newex.offset = ex.offset
- newex.lineno = ex.lineno
- newex.text = ex.text
- raise newex
- else:
- if flag & _AST_FLAG:
- return co
- lines = [(x + "\n") for x in self.lines]
- py.std.linecache.cache[filename] = (1, None, lines, filename)
- return co
-
-#
-# public API shortcut functions
-#
-
-def compile_(source, filename=None, mode='exec', flags=
- generators.compiler_flag, dont_inherit=0):
- """ compile the given source to a raw code object,
- and maintain an internal cache which allows later
- retrieval of the source code for the code object
- and any recursively created code objects.
- """
- if _ast is not None and isinstance(source, _ast.AST):
- # XXX should Source support having AST?
- return cpy_compile(source, filename, mode, flags, dont_inherit)
- _genframe = sys._getframe(1) # the caller
- s = Source(source)
- co = s.compile(filename, mode, flags, _genframe=_genframe)
- return co
-
-
-def getfslineno(obj):
- """ Return source location (path, lineno) for the given object.
- If the source cannot be determined return ("", -1)
- """
- try:
- code = py.code.Code(obj)
- except TypeError:
- try:
- fn = (py.std.inspect.getsourcefile(obj) or
- py.std.inspect.getfile(obj))
- except TypeError:
- return "", -1
-
- fspath = fn and py.path.local(fn) or None
- lineno = -1
- if fspath:
- try:
- _, lineno = findsource(obj)
- except IOError:
- pass
- else:
- fspath = code.path
- lineno = code.firstlineno
- assert isinstance(lineno, int)
- return fspath, lineno
-
-#
-# helper functions
-#
-
-def findsource(obj):
- try:
- sourcelines, lineno = py.std.inspect.findsource(obj)
- except py.builtin._sysex:
- raise
- except:
- return None, -1
- source = Source()
- source.lines = [line.rstrip() for line in sourcelines]
- return source, lineno
-
-def getsource(obj, **kwargs):
- obj = py.code.getrawcode(obj)
- try:
- strsrc = inspect.getsource(obj)
- except IndentationError:
- strsrc = "\"Buggy python version consider upgrading, cannot get source\""
- assert isinstance(strsrc, str)
- return Source(strsrc, **kwargs)
-
-def deindent(lines, offset=None):
- if offset is None:
- for line in lines:
- line = line.expandtabs()
- s = line.lstrip()
- if s:
- offset = len(line)-len(s)
- break
- else:
- offset = 0
- if offset == 0:
- return list(lines)
- newlines = []
- def readline_generator(lines):
- for line in lines:
- yield line + '\n'
- while True:
- yield ''
-
- it = readline_generator(lines)
-
- try:
- for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
- if sline > len(lines):
- break # End of input reached
- if sline > len(newlines):
- line = lines[sline - 1].expandtabs()
- if line.lstrip() and line[:offset].isspace():
- line = line[offset:] # Deindent
- newlines.append(line)
-
- for i in range(sline, eline):
- # Don't deindent continuing lines of
- # multiline tokens (i.e. multiline strings)
- newlines.append(lines[i])
- except (IndentationError, tokenize.TokenError):
- pass
- # Add any lines we didn't see. E.g. if an exception was raised.
- newlines.extend(lines[len(newlines):])
- return newlines
-
-
-def get_statement_startend2(lineno, node):
- import ast
- # flatten all statements and except handlers into one lineno-list
- # AST's line numbers start indexing at 1
- l = []
- for x in ast.walk(node):
- if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
- l.append(x.lineno - 1)
- for name in "finalbody", "orelse":
- val = getattr(x, name, None)
- if val:
- # treat the finally/orelse part as its own statement
- l.append(val[0].lineno - 1 - 1)
- l.sort()
- insert_index = bisect_right(l, lineno)
- start = l[insert_index - 1]
- if insert_index >= len(l):
- end = None
- else:
- end = l[insert_index]
- return start, end
-
-
-def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
- if astnode is None:
- content = str(source)
- if sys.version_info < (2,7):
- content += "\n"
- try:
- astnode = compile(content, "source", "exec", 1024) # 1024 for AST
- except ValueError:
- start, end = getstatementrange_old(lineno, source, assertion)
- return None, start, end
- start, end = get_statement_startend2(lineno, astnode)
- # we need to correct the end:
- # - ast-parsing strips comments
- # - there might be empty lines
- # - we might have lesser indented code blocks at the end
- if end is None:
- end = len(source.lines)
-
- if end > start + 1:
- # make sure we don't span differently indented code blocks
- # by using the BlockFinder helper used which inspect.getsource() uses itself
- block_finder = inspect.BlockFinder()
- # if we start with an indented line, put blockfinder to "started" mode
- block_finder.started = source.lines[start][0].isspace()
- it = ((x + "\n") for x in source.lines[start:end])
- try:
- for tok in tokenize.generate_tokens(lambda: next(it)):
- block_finder.tokeneater(*tok)
- except (inspect.EndOfBlock, IndentationError):
- end = block_finder.last + start
- except Exception:
- pass
-
- # the end might still point to a comment or empty line, correct it
- while end:
- line = source.lines[end - 1].lstrip()
- if line.startswith("#") or not line:
- end -= 1
- else:
- break
- return astnode, start, end
-
-
-def getstatementrange_old(lineno, source, assertion=False):
- """ return (start, end) tuple which spans the minimal
- statement region which containing the given lineno.
- raise an IndexError if no such statementrange can be found.
- """
- # XXX this logic is only used on python2.4 and below
- # 1. find the start of the statement
- from codeop import compile_command
- for start in range(lineno, -1, -1):
- if assertion:
- line = source.lines[start]
- # the following lines are not fully tested, change with care
- if 'super' in line and 'self' in line and '__init__' in line:
- raise IndexError("likely a subclass")
- if "assert" not in line and "raise" not in line:
- continue
- trylines = source.lines[start:lineno+1]
- # quick hack to prepare parsing an indented line with
- # compile_command() (which errors on "return" outside defs)
- trylines.insert(0, 'def xxx():')
- trysource = '\n '.join(trylines)
- # ^ space here
- try:
- compile_command(trysource)
- except (SyntaxError, OverflowError, ValueError):
- continue
-
- # 2. find the end of the statement
- for end in range(lineno+1, len(source)+1):
- trysource = source[start:end]
- if trysource.isparseable():
- return start, end
- raise SyntaxError("no valid source range around line %d " % (lineno,))
-
-
+from __future__ import generators
+
+from bisect import bisect_right
+import sys
+import inspect, tokenize
+import py
+from types import ModuleType
+cpy_compile = compile
+
+try:
+ import _ast
+ from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+ _AST_FLAG = 0
+ _ast = None
+
+
+class Source(object):
+ """ a immutable object holding a source code fragment,
+ possibly deindenting it.
+ """
+ _compilecounter = 0
+ def __init__(self, *parts, **kwargs):
+ self.lines = lines = []
+ de = kwargs.get('deindent', True)
+ rstrip = kwargs.get('rstrip', True)
+ for part in parts:
+ if not part:
+ partlines = []
+ if isinstance(part, Source):
+ partlines = part.lines
+ elif isinstance(part, (tuple, list)):
+ partlines = [x.rstrip("\n") for x in part]
+ elif isinstance(part, py.builtin._basestring):
+ partlines = part.split('\n')
+ if rstrip:
+ while partlines:
+ if partlines[-1].strip():
+ break
+ partlines.pop()
+ else:
+ partlines = getsource(part, deindent=de).lines
+ if de:
+ partlines = deindent(partlines)
+ lines.extend(partlines)
+
+ def __eq__(self, other):
+ try:
+ return self.lines == other.lines
+ except AttributeError:
+ if isinstance(other, str):
+ return str(self) == other
+ return False
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self.lines[key]
+ else:
+ if key.step not in (None, 1):
+ raise IndexError("cannot slice a Source with a step")
+ return self.__getslice__(key.start, key.stop)
+
+ def __len__(self):
+ return len(self.lines)
+
+ def __getslice__(self, start, end):
+ newsource = Source()
+ newsource.lines = self.lines[start:end]
+ return newsource
+
+ def strip(self):
+ """ return new source object with trailing
+ and leading blank lines removed.
+ """
+ start, end = 0, len(self)
+ while start < end and not self.lines[start].strip():
+ start += 1
+ while end > start and not self.lines[end-1].strip():
+ end -= 1
+ source = Source()
+ source.lines[:] = self.lines[start:end]
+ return source
+
+ def putaround(self, before='', after='', indent=' ' * 4):
+ """ return a copy of the source object with
+ 'before' and 'after' wrapped around it.
+ """
+ before = Source(before)
+ after = Source(after)
+ newsource = Source()
+ lines = [ (indent + line) for line in self.lines]
+ newsource.lines = before.lines + lines + after.lines
+ return newsource
+
+ def indent(self, indent=' ' * 4):
+ """ return a copy of the source object with
+ all lines indented by the given indent-string.
+ """
+ newsource = Source()
+ newsource.lines = [(indent+line) for line in self.lines]
+ return newsource
+
+ def getstatement(self, lineno, assertion=False):
+ """ return Source statement which contains the
+ given linenumber (counted from 0).
+ """
+ start, end = self.getstatementrange(lineno, assertion)
+ return self[start:end]
+
+ def getstatementrange(self, lineno, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ """
+ if not (0 <= lineno < len(self)):
+ raise IndexError("lineno out of range")
+ ast, start, end = getstatementrange_ast(lineno, self)
+ return start, end
+
+ def deindent(self, offset=None):
+ """ return a new source object deindented by offset.
+ If offset is None then guess an indentation offset from
+ the first non-blank line. Subsequent lines which have a
+ lower indentation offset will be copied verbatim as
+ they are assumed to be part of multilines.
+ """
+ # XXX maybe use the tokenizer to properly handle multiline
+ # strings etc.pp?
+ newsource = Source()
+ newsource.lines[:] = deindent(self.lines, offset)
+ return newsource
+
+ def isparseable(self, deindent=True):
+ """ return True if source is parseable, heuristically
+ deindenting it by default.
+ """
+ try:
+ import parser
+ except ImportError:
+ syntax_checker = lambda x: compile(x, 'asd', 'exec')
+ else:
+ syntax_checker = parser.suite
+
+ if deindent:
+ source = str(self.deindent())
+ else:
+ source = str(self)
+ try:
+ #compile(source+'\n', "x", "exec")
+ syntax_checker(source+'\n')
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ return False
+ else:
+ return True
+
+ def __str__(self):
+ return "\n".join(self.lines)
+
+ def compile(self, filename=None, mode='exec',
+ flag=generators.compiler_flag,
+ dont_inherit=0, _genframe=None):
+ """ return compiled code object. if filename is None
+ invent an artificial filename which displays
+ the source/line position of the caller frame.
+ """
+ if not filename or py.path.local(filename).check(file=0):
+ if _genframe is None:
+ _genframe = sys._getframe(1) # the caller
+ fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+ base = "<%d-codegen " % self._compilecounter
+ self.__class__._compilecounter += 1
+ if not filename:
+ filename = base + '%s:%d>' % (fn, lineno)
+ else:
+ filename = base + '%r %s:%d>' % (filename, fn, lineno)
+ source = "\n".join(self.lines) + '\n'
+ try:
+ co = cpy_compile(source, filename, mode, flag)
+ except SyntaxError:
+ ex = sys.exc_info()[1]
+ # re-represent syntax errors from parsing python strings
+ msglines = self.lines[:ex.lineno]
+ if ex.offset:
+ msglines.append(" "*ex.offset + '^')
+ msglines.append("(code was compiled probably from here: %s)" % filename)
+ newex = SyntaxError('\n'.join(msglines))
+ newex.offset = ex.offset
+ newex.lineno = ex.lineno
+ newex.text = ex.text
+ raise newex
+ else:
+ if flag & _AST_FLAG:
+ return co
+ lines = [(x + "\n") for x in self.lines]
+ py.std.linecache.cache[filename] = (1, None, lines, filename)
+ return co
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+ generators.compiler_flag, dont_inherit=0):
+ """ compile the given source to a raw code object,
+ and maintain an internal cache which allows later
+ retrieval of the source code for the code object
+ and any recursively created code objects.
+ """
+ if _ast is not None and isinstance(source, _ast.AST):
+ # XXX should Source support having AST?
+ return cpy_compile(source, filename, mode, flags, dont_inherit)
+ _genframe = sys._getframe(1) # the caller
+ s = Source(source)
+ co = s.compile(filename, mode, flags, _genframe=_genframe)
+ return co
+
+
+def getfslineno(obj):
+ """ Return source location (path, lineno) for the given object.
+ If the source cannot be determined return ("", -1)
+ """
+ try:
+ code = py.code.Code(obj)
+ except TypeError:
+ try:
+ fn = (py.std.inspect.getsourcefile(obj) or
+ py.std.inspect.getfile(obj))
+ except TypeError:
+ return "", -1
+
+ fspath = fn and py.path.local(fn) or None
+ lineno = -1
+ if fspath:
+ try:
+ _, lineno = findsource(obj)
+ except IOError:
+ pass
+ else:
+ fspath = code.path
+ lineno = code.firstlineno
+ assert isinstance(lineno, int)
+ return fspath, lineno
+
+#
+# helper functions
+#
+
+def findsource(obj):
+ try:
+ sourcelines, lineno = py.std.inspect.findsource(obj)
+ except py.builtin._sysex:
+ raise
+ except:
+ return None, -1
+ source = Source()
+ source.lines = [line.rstrip() for line in sourcelines]
+ return source, lineno
+
+def getsource(obj, **kwargs):
+ obj = py.code.getrawcode(obj)
+ try:
+ strsrc = inspect.getsource(obj)
+ except IndentationError:
+ strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+ assert isinstance(strsrc, str)
+ return Source(strsrc, **kwargs)
+
+def deindent(lines, offset=None):
+ if offset is None:
+ for line in lines:
+ line = line.expandtabs()
+ s = line.lstrip()
+ if s:
+ offset = len(line)-len(s)
+ break
+ else:
+ offset = 0
+ if offset == 0:
+ return list(lines)
+ newlines = []
+ def readline_generator(lines):
+ for line in lines:
+ yield line + '\n'
+ while True:
+ yield ''
+
+ it = readline_generator(lines)
+
+ try:
+ for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
+ if sline > len(lines):
+ break # End of input reached
+ if sline > len(newlines):
+ line = lines[sline - 1].expandtabs()
+ if line.lstrip() and line[:offset].isspace():
+ line = line[offset:] # Deindent
+ newlines.append(line)
+
+ for i in range(sline, eline):
+ # Don't deindent continuing lines of
+ # multiline tokens (i.e. multiline strings)
+ newlines.append(lines[i])
+ except (IndentationError, tokenize.TokenError):
+ pass
+ # Add any lines we didn't see. E.g. if an exception was raised.
+ newlines.extend(lines[len(newlines):])
+ return newlines
+
+
+def get_statement_startend2(lineno, node):
+ import ast
+ # flatten all statements and except handlers into one lineno-list
+ # AST's line numbers start indexing at 1
+ l = []
+ for x in ast.walk(node):
+ if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
+ l.append(x.lineno - 1)
+ for name in "finalbody", "orelse":
+ val = getattr(x, name, None)
+ if val:
+ # treat the finally/orelse part as its own statement
+ l.append(val[0].lineno - 1 - 1)
+ l.sort()
+ insert_index = bisect_right(l, lineno)
+ start = l[insert_index - 1]
+ if insert_index >= len(l):
+ end = None
+ else:
+ end = l[insert_index]
+ return start, end
+
+
+def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
+ if astnode is None:
+ content = str(source)
+ if sys.version_info < (2,7):
+ content += "\n"
+ try:
+ astnode = compile(content, "source", "exec", 1024) # 1024 for AST
+ except ValueError:
+ start, end = getstatementrange_old(lineno, source, assertion)
+ return None, start, end
+ start, end = get_statement_startend2(lineno, astnode)
+ # we need to correct the end:
+ # - ast-parsing strips comments
+ # - there might be empty lines
+ # - we might have lesser indented code blocks at the end
+ if end is None:
+ end = len(source.lines)
+
+ if end > start + 1:
+ # make sure we don't span differently indented code blocks
+ # by using the BlockFinder helper used which inspect.getsource() uses itself
+ block_finder = inspect.BlockFinder()
+ # if we start with an indented line, put blockfinder to "started" mode
+ block_finder.started = source.lines[start][0].isspace()
+ it = ((x + "\n") for x in source.lines[start:end])
+ try:
+ for tok in tokenize.generate_tokens(lambda: next(it)):
+ block_finder.tokeneater(*tok)
+ except (inspect.EndOfBlock, IndentationError):
+ end = block_finder.last + start
+ except Exception:
+ pass
+
+ # the end might still point to a comment or empty line, correct it
+ while end:
+ line = source.lines[end - 1].lstrip()
+ if line.startswith("#") or not line:
+ end -= 1
+ else:
+ break
+ return astnode, start, end
+
+
+def getstatementrange_old(lineno, source, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ raise an IndexError if no such statementrange can be found.
+ """
+ # XXX this logic is only used on python2.4 and below
+ # 1. find the start of the statement
+ from codeop import compile_command
+ for start in range(lineno, -1, -1):
+ if assertion:
+ line = source.lines[start]
+ # the following lines are not fully tested, change with care
+ if 'super' in line and 'self' in line and '__init__' in line:
+ raise IndexError("likely a subclass")
+ if "assert" not in line and "raise" not in line:
+ continue
+ trylines = source.lines[start:lineno+1]
+ # quick hack to prepare parsing an indented line with
+ # compile_command() (which errors on "return" outside defs)
+ trylines.insert(0, 'def xxx():')
+ trysource = '\n '.join(trylines)
+ # ^ space here
+ try:
+ compile_command(trysource)
+ except (SyntaxError, OverflowError, ValueError):
+ continue
+
+ # 2. find the end of the statement
+ for end in range(lineno+1, len(source)+1):
+ trysource = source[start:end]
+ if trysource.isparseable():
+ return start, end
+ raise SyntaxError("no valid source range around line %d " % (lineno,))
+
+
diff --git a/py/_error.py b/py/_error.py
index 8ca339b..228de58 100644
--- a/py/_error.py
+++ b/py/_error.py
@@ -1,89 +1,89 @@
-"""
-create errno-specific classes for IO or os calls.
-
-"""
-import sys, os, errno
-
-class Error(EnvironmentError):
- def __repr__(self):
- return "%s.%s %r: %s " %(self.__class__.__module__,
- self.__class__.__name__,
- self.__class__.__doc__,
- " ".join(map(str, self.args)),
- #repr(self.args)
- )
-
- def __str__(self):
- s = "[%s]: %s" %(self.__class__.__doc__,
- " ".join(map(str, self.args)),
- )
- return s
-
-_winerrnomap = {
- 2: errno.ENOENT,
- 3: errno.ENOENT,
- 17: errno.EEXIST,
- 18: errno.EXDEV,
- 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
- 22: errno.ENOTDIR,
- 20: errno.ENOTDIR,
- 267: errno.ENOTDIR,
- 5: errno.EACCES, # anything better?
-}
-
-class ErrorMaker(object):
- """ lazily provides Exception classes for each possible POSIX errno
- (as defined per the 'errno' module). All such instances
- subclass EnvironmentError.
- """
- Error = Error
- _errno2class = {}
-
- def __getattr__(self, name):
- if name[0] == "_":
- raise AttributeError(name)
- eno = getattr(errno, name)
- cls = self._geterrnoclass(eno)
- setattr(self, name, cls)
- return cls
-
- def _geterrnoclass(self, eno):
- try:
- return self._errno2class[eno]
- except KeyError:
- clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
- errorcls = type(Error)(clsname, (Error,),
- {'__module__':'py.error',
- '__doc__': os.strerror(eno)})
- self._errno2class[eno] = errorcls
- return errorcls
-
- def checked_call(self, func, *args, **kwargs):
- """ call a function and raise an errno-exception if applicable. """
- __tracebackhide__ = True
- try:
- return func(*args, **kwargs)
- except self.Error:
- raise
- except (OSError, EnvironmentError):
- cls, value, tb = sys.exc_info()
- if not hasattr(value, 'errno'):
- raise
- __tracebackhide__ = False
- errno = value.errno
- try:
- if not isinstance(value, WindowsError):
- raise NameError
- except NameError:
- # we are not on Windows, or we got a proper OSError
- cls = self._geterrnoclass(errno)
- else:
- try:
- cls = self._geterrnoclass(_winerrnomap[errno])
- except KeyError:
- raise value
- raise cls("%s%r" % (func.__name__, args))
- __tracebackhide__ = True
-
-
-error = ErrorMaker()
+"""
+create errno-specific classes for IO or os calls.
+
+"""
+import sys, os, errno
+
+class Error(EnvironmentError):
+ def __repr__(self):
+ return "%s.%s %r: %s " %(self.__class__.__module__,
+ self.__class__.__name__,
+ self.__class__.__doc__,
+ " ".join(map(str, self.args)),
+ #repr(self.args)
+ )
+
+ def __str__(self):
+ s = "[%s]: %s" %(self.__class__.__doc__,
+ " ".join(map(str, self.args)),
+ )
+ return s
+
+_winerrnomap = {
+ 2: errno.ENOENT,
+ 3: errno.ENOENT,
+ 17: errno.EEXIST,
+ 18: errno.EXDEV,
+ 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
+ 22: errno.ENOTDIR,
+ 20: errno.ENOTDIR,
+ 267: errno.ENOTDIR,
+ 5: errno.EACCES, # anything better?
+}
+
+class ErrorMaker(object):
+ """ lazily provides Exception classes for each possible POSIX errno
+ (as defined per the 'errno' module). All such instances
+ subclass EnvironmentError.
+ """
+ Error = Error
+ _errno2class = {}
+
+ def __getattr__(self, name):
+ if name[0] == "_":
+ raise AttributeError(name)
+ eno = getattr(errno, name)
+ cls = self._geterrnoclass(eno)
+ setattr(self, name, cls)
+ return cls
+
+ def _geterrnoclass(self, eno):
+ try:
+ return self._errno2class[eno]
+ except KeyError:
+ clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
+ errorcls = type(Error)(clsname, (Error,),
+ {'__module__':'py.error',
+ '__doc__': os.strerror(eno)})
+ self._errno2class[eno] = errorcls
+ return errorcls
+
+ def checked_call(self, func, *args, **kwargs):
+ """ call a function and raise an errno-exception if applicable. """
+ __tracebackhide__ = True
+ try:
+ return func(*args, **kwargs)
+ except self.Error:
+ raise
+ except (OSError, EnvironmentError):
+ cls, value, tb = sys.exc_info()
+ if not hasattr(value, 'errno'):
+ raise
+ __tracebackhide__ = False
+ errno = value.errno
+ try:
+ if not isinstance(value, WindowsError):
+ raise NameError
+ except NameError:
+ # we are not on Windows, or we got a proper OSError
+ cls = self._geterrnoclass(errno)
+ else:
+ try:
+ cls = self._geterrnoclass(_winerrnomap[errno])
+ except KeyError:
+ raise value
+ raise cls("%s%r" % (func.__name__, args))
+ __tracebackhide__ = True
+
+
+error = ErrorMaker()
diff --git a/py/_iniconfig.py b/py/_iniconfig.py
index 92b50bd..8e46404 100644
--- a/py/_iniconfig.py
+++ b/py/_iniconfig.py
@@ -1,162 +1,162 @@
-""" brain-dead simple parser for ini-style files.
-(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed
-"""
-__version__ = "0.2.dev2"
-
-__all__ = ['IniConfig', 'ParseError']
-
-COMMENTCHARS = "#;"
-
-class ParseError(Exception):
- def __init__(self, path, lineno, msg):
- Exception.__init__(self, path, lineno, msg)
- self.path = path
- self.lineno = lineno
- self.msg = msg
-
- def __str__(self):
- return "%s:%s: %s" %(self.path, self.lineno+1, self.msg)
-
-class SectionWrapper(object):
- def __init__(self, config, name):
- self.config = config
- self.name = name
-
- def lineof(self, name):
- return self.config.lineof(self.name, name)
-
- def get(self, key, default=None, convert=str):
- return self.config.get(self.name, key, convert=convert, default=default)
-
- def __getitem__(self, key):
- return self.config.sections[self.name][key]
-
- def __iter__(self):
- section = self.config.sections.get(self.name, [])
- def lineof(key):
- return self.config.lineof(self.name, key)
- for name in sorted(section, key=lineof):
- yield name
-
- def items(self):
- for name in self:
- yield name, self[name]
-
-
-class IniConfig(object):
- def __init__(self, path, data=None):
- self.path = str(path) # convenience
- if data is None:
- f = open(self.path)
- try:
- tokens = self._parse(iter(f))
- finally:
- f.close()
- else:
- tokens = self._parse(data.splitlines(True))
-
- self._sources = {}
- self.sections = {}
-
- for lineno, section, name, value in tokens:
- if section is None:
- self._raise(lineno, 'no section header defined')
- self._sources[section, name] = lineno
- if name is None:
- if section in self.sections:
- self._raise(lineno, 'duplicate section %r'%(section, ))
- self.sections[section] = {}
- else:
- if name in self.sections[section]:
- self._raise(lineno, 'duplicate name %r'%(name, ))
- self.sections[section][name] = value
-
- def _raise(self, lineno, msg):
- raise ParseError(self.path, lineno, msg)
-
- def _parse(self, line_iter):
- result = []
- section = None
- for lineno, line in enumerate(line_iter):
- name, data = self._parseline(line, lineno)
- # new value
- if name is not None and data is not None:
- result.append((lineno, section, name, data))
- # new section
- elif name is not None and data is None:
- if not name:
- self._raise(lineno, 'empty section name')
- section = name
- result.append((lineno, section, None, None))
- # continuation
- elif name is None and data is not None:
- if not result:
- self._raise(lineno, 'unexpected value continuation')
- last = result.pop()
- last_name, last_data = last[-2:]
- if last_name is None:
- self._raise(lineno, 'unexpected value continuation')
-
- if last_data:
- data = '%s\n%s' % (last_data, data)
- result.append(last[:-1] + (data,))
- return result
-
- def _parseline(self, line, lineno):
- # blank lines
- if iscommentline(line):
- line = ""
- else:
- line = line.rstrip()
- if not line:
- return None, None
- # section
- if line[0] == '[':
- realline = line
- for c in COMMENTCHARS:
- line = line.split(c)[0].rstrip()
- if line[-1] == "]":
- return line[1:-1], None
- return None, realline.strip()
- # value
- elif not line[0].isspace():
- try:
- name, value = line.split('=', 1)
- if ":" in name:
- raise ValueError()
- except ValueError:
- try:
- name, value = line.split(":", 1)
- except ValueError:
- self._raise(lineno, 'unexpected line: %r' % line)
- return name.strip(), value.strip()
- # continuation
- else:
- return None, line.strip()
-
- def lineof(self, section, name=None):
- lineno = self._sources.get((section, name))
- if lineno is not None:
- return lineno + 1
-
- def get(self, section, name, default=None, convert=str):
- try:
- return convert(self.sections[section][name])
- except KeyError:
- return default
-
- def __getitem__(self, name):
- if name not in self.sections:
- raise KeyError(name)
- return SectionWrapper(self, name)
-
- def __iter__(self):
- for name in sorted(self.sections, key=self.lineof):
- yield SectionWrapper(self, name)
-
- def __contains__(self, arg):
- return arg in self.sections
-
-def iscommentline(line):
- c = line.lstrip()[:1]
- return c in COMMENTCHARS
+""" brain-dead simple parser for ini-style files.
+(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed
+"""
+__version__ = "0.2.dev2"
+
+__all__ = ['IniConfig', 'ParseError']
+
+COMMENTCHARS = "#;"
+
+class ParseError(Exception):
+ def __init__(self, path, lineno, msg):
+ Exception.__init__(self, path, lineno, msg)
+ self.path = path
+ self.lineno = lineno
+ self.msg = msg
+
+ def __str__(self):
+ return "%s:%s: %s" %(self.path, self.lineno+1, self.msg)
+
+class SectionWrapper(object):
+ def __init__(self, config, name):
+ self.config = config
+ self.name = name
+
+ def lineof(self, name):
+ return self.config.lineof(self.name, name)
+
+ def get(self, key, default=None, convert=str):
+ return self.config.get(self.name, key, convert=convert, default=default)
+
+ def __getitem__(self, key):
+ return self.config.sections[self.name][key]
+
+ def __iter__(self):
+ section = self.config.sections.get(self.name, [])
+ def lineof(key):
+ return self.config.lineof(self.name, key)
+ for name in sorted(section, key=lineof):
+ yield name
+
+ def items(self):
+ for name in self:
+ yield name, self[name]
+
+
+class IniConfig(object):
+ def __init__(self, path, data=None):
+ self.path = str(path) # convenience
+ if data is None:
+ f = open(self.path)
+ try:
+ tokens = self._parse(iter(f))
+ finally:
+ f.close()
+ else:
+ tokens = self._parse(data.splitlines(True))
+
+ self._sources = {}
+ self.sections = {}
+
+ for lineno, section, name, value in tokens:
+ if section is None:
+ self._raise(lineno, 'no section header defined')
+ self._sources[section, name] = lineno
+ if name is None:
+ if section in self.sections:
+ self._raise(lineno, 'duplicate section %r'%(section, ))
+ self.sections[section] = {}
+ else:
+ if name in self.sections[section]:
+ self._raise(lineno, 'duplicate name %r'%(name, ))
+ self.sections[section][name] = value
+
+ def _raise(self, lineno, msg):
+ raise ParseError(self.path, lineno, msg)
+
+ def _parse(self, line_iter):
+ result = []
+ section = None
+ for lineno, line in enumerate(line_iter):
+ name, data = self._parseline(line, lineno)
+ # new value
+ if name is not None and data is not None:
+ result.append((lineno, section, name, data))
+ # new section
+ elif name is not None and data is None:
+ if not name:
+ self._raise(lineno, 'empty section name')
+ section = name
+ result.append((lineno, section, None, None))
+ # continuation
+ elif name is None and data is not None:
+ if not result:
+ self._raise(lineno, 'unexpected value continuation')
+ last = result.pop()
+ last_name, last_data = last[-2:]
+ if last_name is None:
+ self._raise(lineno, 'unexpected value continuation')
+
+ if last_data:
+ data = '%s\n%s' % (last_data, data)
+ result.append(last[:-1] + (data,))
+ return result
+
+ def _parseline(self, line, lineno):
+ # blank lines
+ if iscommentline(line):
+ line = ""
+ else:
+ line = line.rstrip()
+ if not line:
+ return None, None
+ # section
+ if line[0] == '[':
+ realline = line
+ for c in COMMENTCHARS:
+ line = line.split(c)[0].rstrip()
+ if line[-1] == "]":
+ return line[1:-1], None
+ return None, realline.strip()
+ # value
+ elif not line[0].isspace():
+ try:
+ name, value = line.split('=', 1)
+ if ":" in name:
+ raise ValueError()
+ except ValueError:
+ try:
+ name, value = line.split(":", 1)
+ except ValueError:
+ self._raise(lineno, 'unexpected line: %r' % line)
+ return name.strip(), value.strip()
+ # continuation
+ else:
+ return None, line.strip()
+
+ def lineof(self, section, name=None):
+ lineno = self._sources.get((section, name))
+ if lineno is not None:
+ return lineno + 1
+
+ def get(self, section, name, default=None, convert=str):
+ try:
+ return convert(self.sections[section][name])
+ except KeyError:
+ return default
+
+ def __getitem__(self, name):
+ if name not in self.sections:
+ raise KeyError(name)
+ return SectionWrapper(self, name)
+
+ def __iter__(self):
+ for name in sorted(self.sections, key=self.lineof):
+ yield SectionWrapper(self, name)
+
+ def __contains__(self, arg):
+ return arg in self.sections
+
+def iscommentline(line):
+ c = line.lstrip()[:1]
+ return c in COMMENTCHARS
diff --git a/py/_io/__init__.py b/py/_io/__init__.py
index 835f01f..f1a6d63 100644
--- a/py/_io/__init__.py
+++ b/py/_io/__init__.py
@@ -1 +1 @@
-""" input/output helping """
+""" input/output helping """
diff --git a/py/_io/capture.py b/py/_io/capture.py
index bc157ed..3ce2259 100644
--- a/py/_io/capture.py
+++ b/py/_io/capture.py
@@ -1,371 +1,371 @@
-import os
-import sys
-import py
-import tempfile
-
-try:
- from io import StringIO
-except ImportError:
- from StringIO import StringIO
-
-if sys.version_info < (3,0):
- class TextIO(StringIO):
- def write(self, data):
- if not isinstance(data, unicode):
- data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
- StringIO.write(self, data)
-else:
- TextIO = StringIO
-
-try:
- from io import BytesIO
-except ImportError:
- class BytesIO(StringIO):
- def write(self, data):
- if isinstance(data, unicode):
- raise TypeError("not a byte value: %r" %(data,))
- StringIO.write(self, data)
-
-patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
-
-class FDCapture:
- """ Capture IO to/from a given os-level filedescriptor. """
-
- def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False):
- """ save targetfd descriptor, and open a new
- temporary file there. If no tmpfile is
- specified a tempfile.Tempfile() will be opened
- in text mode.
- """
- self.targetfd = targetfd
- if tmpfile is None and targetfd != 0:
- f = tempfile.TemporaryFile('wb+')
- tmpfile = dupfile(f, encoding="UTF-8")
- f.close()
- self.tmpfile = tmpfile
- self._savefd = os.dup(self.targetfd)
- if patchsys:
- self._oldsys = getattr(sys, patchsysdict[targetfd])
- if now:
- self.start()
-
- def start(self):
- try:
- os.fstat(self._savefd)
- except OSError:
- raise ValueError("saved filedescriptor not valid, "
- "did you call start() twice?")
- if self.targetfd == 0 and not self.tmpfile:
- fd = os.open(devnullpath, os.O_RDONLY)
- os.dup2(fd, 0)
- os.close(fd)
- if hasattr(self, '_oldsys'):
- setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
- else:
- os.dup2(self.tmpfile.fileno(), self.targetfd)
- if hasattr(self, '_oldsys'):
- setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
-
- def done(self):
- """ unpatch and clean up, returns the self.tmpfile (file object)
- """
- os.dup2(self._savefd, self.targetfd)
- os.close(self._savefd)
- if self.targetfd != 0:
- self.tmpfile.seek(0)
- if hasattr(self, '_oldsys'):
- setattr(sys, patchsysdict[self.targetfd], self._oldsys)
- return self.tmpfile
-
- def writeorg(self, data):
- """ write a string to the original file descriptor
- """
- tempfp = tempfile.TemporaryFile()
- try:
- os.dup2(self._savefd, tempfp.fileno())
- tempfp.write(data)
- finally:
- tempfp.close()
-
-
-def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
- """ return a new open file object that's a duplicate of f
-
- mode is duplicated if not given, 'buffering' controls
- buffer size (defaulting to no buffering) and 'raising'
- defines whether an exception is raised when an incompatible
- file object is passed in (if raising is False, the file
- object itself will be returned)
- """
- try:
- fd = f.fileno()
- mode = mode or f.mode
- except AttributeError:
- if raising:
- raise
- return f
- newfd = os.dup(fd)
- if sys.version_info >= (3,0):
- if encoding is not None:
- mode = mode.replace("b", "")
- buffering = True
- return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
- else:
- f = os.fdopen(newfd, mode, buffering)
- if encoding is not None:
- return EncodedFile(f, encoding)
- return f
-
-class EncodedFile(object):
- def __init__(self, _stream, encoding):
- self._stream = _stream
- self.encoding = encoding
-
- def write(self, obj):
- if isinstance(obj, unicode):
- obj = obj.encode(self.encoding)
- elif isinstance(obj, str):
- pass
- else:
- obj = str(obj)
- self._stream.write(obj)
-
- def writelines(self, linelist):
- data = ''.join(linelist)
- self.write(data)
-
- def __getattr__(self, name):
- return getattr(self._stream, name)
-
-class Capture(object):
- def call(cls, func, *args, **kwargs):
- """ return a (res, out, err) tuple where
- out and err represent the output/error output
- during function execution.
- call the given function with args/kwargs
- and capture output/error during its execution.
- """
- so = cls()
- try:
- res = func(*args, **kwargs)
- finally:
- out, err = so.reset()
- return res, out, err
- call = classmethod(call)
-
- def reset(self):
- """ reset sys.stdout/stderr and return captured output as strings. """
- if hasattr(self, '_reset'):
- raise ValueError("was already reset")
- self._reset = True
- outfile, errfile = self.done(save=False)
- out, err = "", ""
- if outfile and not outfile.closed:
- out = outfile.read()
- outfile.close()
- if errfile and errfile != outfile and not errfile.closed:
- err = errfile.read()
- errfile.close()
- return out, err
-
- def suspend(self):
- """ return current snapshot captures, memorize tempfiles. """
- outerr = self.readouterr()
- outfile, errfile = self.done()
- return outerr
-
-
-class StdCaptureFD(Capture):
- """ This class allows to capture writes to FD1 and FD2
- and may connect a NULL file to FD0 (and prevent
- reads from sys.stdin). If any of the 0,1,2 file descriptors
- is invalid it will not be captured.
- """
- def __init__(self, out=True, err=True, mixed=False,
- in_=True, patchsys=True, now=True):
- self._options = {
- "out": out,
- "err": err,
- "mixed": mixed,
- "in_": in_,
- "patchsys": patchsys,
- "now": now,
- }
- self._save()
- if now:
- self.startall()
-
- def _save(self):
- in_ = self._options['in_']
- out = self._options['out']
- err = self._options['err']
- mixed = self._options['mixed']
- patchsys = self._options['patchsys']
- if in_:
- try:
- self.in_ = FDCapture(0, tmpfile=None, now=False,
- patchsys=patchsys)
- except OSError:
- pass
- if out:
- tmpfile = None
- if hasattr(out, 'write'):
- tmpfile = out
- try:
- self.out = FDCapture(1, tmpfile=tmpfile,
- now=False, patchsys=patchsys)
- self._options['out'] = self.out.tmpfile
- except OSError:
- pass
- if err:
- if out and mixed:
- tmpfile = self.out.tmpfile
- elif hasattr(err, 'write'):
- tmpfile = err
- else:
- tmpfile = None
- try:
- self.err = FDCapture(2, tmpfile=tmpfile,
- now=False, patchsys=patchsys)
- self._options['err'] = self.err.tmpfile
- except OSError:
- pass
-
- def startall(self):
- if hasattr(self, 'in_'):
- self.in_.start()
- if hasattr(self, 'out'):
- self.out.start()
- if hasattr(self, 'err'):
- self.err.start()
-
- def resume(self):
- """ resume capturing with original temp files. """
- self.startall()
-
- def done(self, save=True):
- """ return (outfile, errfile) and stop capturing. """
- outfile = errfile = None
- if hasattr(self, 'out') and not self.out.tmpfile.closed:
- outfile = self.out.done()
- if hasattr(self, 'err') and not self.err.tmpfile.closed:
- errfile = self.err.done()
- if hasattr(self, 'in_'):
- tmpfile = self.in_.done()
- if save:
- self._save()
- return outfile, errfile
-
- def readouterr(self):
- """ return snapshot value of stdout/stderr capturings. """
- if hasattr(self, "out"):
- out = self._readsnapshot(self.out.tmpfile)
- else:
- out = ""
- if hasattr(self, "err"):
- err = self._readsnapshot(self.err.tmpfile)
- else:
- err = ""
- return [out, err]
-
- def _readsnapshot(self, f):
- f.seek(0)
- res = f.read()
- enc = getattr(f, "encoding", None)
- if enc:
- res = py.builtin._totext(res, enc, "replace")
- f.truncate(0)
- f.seek(0)
- return res
-
-
-class StdCapture(Capture):
- """ This class allows to capture writes to sys.stdout|stderr "in-memory"
- and will raise errors on tries to read from sys.stdin. It only
- modifies sys.stdout|stderr|stdin attributes and does not
- touch underlying File Descriptors (use StdCaptureFD for that).
- """
- def __init__(self, out=True, err=True, in_=True, mixed=False, now=True):
- self._oldout = sys.stdout
- self._olderr = sys.stderr
- self._oldin = sys.stdin
- if out and not hasattr(out, 'file'):
- out = TextIO()
- self.out = out
- if err:
- if mixed:
- err = out
- elif not hasattr(err, 'write'):
- err = TextIO()
- self.err = err
- self.in_ = in_
- if now:
- self.startall()
-
- def startall(self):
- if self.out:
- sys.stdout = self.out
- if self.err:
- sys.stderr = self.err
- if self.in_:
- sys.stdin = self.in_ = DontReadFromInput()
-
- def done(self, save=True):
- """ return (outfile, errfile) and stop capturing. """
- outfile = errfile = None
- if self.out and not self.out.closed:
- sys.stdout = self._oldout
- outfile = self.out
- outfile.seek(0)
- if self.err and not self.err.closed:
- sys.stderr = self._olderr
- errfile = self.err
- errfile.seek(0)
- if self.in_:
- sys.stdin = self._oldin
- return outfile, errfile
-
- def resume(self):
- """ resume capturing with original temp files. """
- self.startall()
-
- def readouterr(self):
- """ return snapshot value of stdout/stderr capturings. """
- out = err = ""
- if self.out:
- out = self.out.getvalue()
- self.out.truncate(0)
- self.out.seek(0)
- if self.err:
- err = self.err.getvalue()
- self.err.truncate(0)
- self.err.seek(0)
- return out, err
-
-class DontReadFromInput:
- """Temporary stub class. Ideally when stdin is accessed, the
- capturing should be turned off, with possibly all data captured
- so far sent to the screen. This should be configurable, though,
- because in automated test runs it is better to crash than
- hang indefinitely.
- """
- def read(self, *args):
- raise IOError("reading from stdin while output is captured")
- readline = read
- readlines = read
- __iter__ = read
-
- def fileno(self):
- raise ValueError("redirected Stdin is pseudofile, has no fileno()")
- def isatty(self):
- return False
- def close(self):
- pass
-
-try:
- devnullpath = os.devnull
-except AttributeError:
- if os.name == 'nt':
- devnullpath = 'NUL'
- else:
- devnullpath = '/dev/null'
+import os
+import sys
+import py
+import tempfile
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+if sys.version_info < (3,0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" %(data,))
+ StringIO.write(self, data)
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None and targetfd != 0:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(self.targetfd)
+ if patchsys:
+ self._oldsys = getattr(sys, patchsysdict[targetfd])
+ if now:
+ self.start()
+
+ def start(self):
+ try:
+ os.fstat(self._savefd)
+ except OSError:
+ raise ValueError("saved filedescriptor not valid, "
+ "did you call start() twice?")
+ if self.targetfd == 0 and not self.tmpfile:
+ fd = os.open(devnullpath, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+ else:
+ os.dup2(self.tmpfile.fileno(), self.targetfd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ os.close(self._savefd)
+ if self.targetfd != 0:
+ self.tmpfile.seek(0)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ mode = mode or f.mode
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ if sys.version_info >= (3,0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ elif isinstance(obj, str):
+ pass
+ else:
+ obj = str(obj)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+class Capture(object):
+ def call(cls, func, *args, **kwargs):
+ """ return a (res, out, err) tuple where
+ out and err represent the output/error output
+ during function execution.
+ call the given function with args/kwargs
+ and capture output/error during its execution.
+ """
+ so = cls()
+ try:
+ res = func(*args, **kwargs)
+ finally:
+ out, err = so.reset()
+ return res, out, err
+ call = classmethod(call)
+
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already reset")
+ self._reset = True
+ outfile, errfile = self.done(save=False)
+ out, err = "", ""
+ if outfile and not outfile.closed:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile and not errfile.closed:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ return outerr
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin). If any of the 0,1,2 file descriptors
+ is invalid it will not be captured.
+ """
+ def __init__(self, out=True, err=True, mixed=False,
+ in_=True, patchsys=True, now=True):
+ self._options = {
+ "out": out,
+ "err": err,
+ "mixed": mixed,
+ "in_": in_,
+ "patchsys": patchsys,
+ "now": now,
+ }
+ self._save()
+ if now:
+ self.startall()
+
+ def _save(self):
+ in_ = self._options['in_']
+ out = self._options['out']
+ err = self._options['err']
+ mixed = self._options['mixed']
+ patchsys = self._options['patchsys']
+ if in_:
+ try:
+ self.in_ = FDCapture(0, tmpfile=None, now=False,
+ patchsys=patchsys)
+ except OSError:
+ pass
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ try:
+ self.out = FDCapture(1, tmpfile=tmpfile,
+ now=False, patchsys=patchsys)
+ self._options['out'] = self.out.tmpfile
+ except OSError:
+ pass
+ if err:
+ if out and mixed:
+ tmpfile = self.out.tmpfile
+ elif hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ try:
+ self.err = FDCapture(2, tmpfile=tmpfile,
+ now=False, patchsys=patchsys)
+ self._options['err'] = self.err.tmpfile
+ except OSError:
+ pass
+
+ def startall(self):
+ if hasattr(self, 'in_'):
+ self.in_.start()
+ if hasattr(self, 'out'):
+ self.out.start()
+ if hasattr(self, 'err'):
+ self.err.start()
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if hasattr(self, 'out') and not self.out.tmpfile.closed:
+ outfile = self.out.done()
+ if hasattr(self, 'err') and not self.err.tmpfile.closed:
+ errfile = self.err.done()
+ if hasattr(self, 'in_'):
+ tmpfile = self.in_.done()
+ if save:
+ self._save()
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ if hasattr(self, "out"):
+ out = self._readsnapshot(self.out.tmpfile)
+ else:
+ out = ""
+ if hasattr(self, "err"):
+ err = self._readsnapshot(self.err.tmpfile)
+ else:
+ err = ""
+ return [out, err]
+
+ def _readsnapshot(self, f):
+ f.seek(0)
+ res = f.read()
+ enc = getattr(f, "encoding", None)
+ if enc:
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True, mixed=False, now=True):
+ self._oldout = sys.stdout
+ self._olderr = sys.stderr
+ self._oldin = sys.stdin
+ if out and not hasattr(out, 'file'):
+ out = TextIO()
+ self.out = out
+ if err:
+ if mixed:
+ err = out
+ elif not hasattr(err, 'write'):
+ err = TextIO()
+ self.err = err
+ self.in_ = in_
+ if now:
+ self.startall()
+
+ def startall(self):
+ if self.out:
+ sys.stdout = self.out
+ if self.err:
+ sys.stderr = self.err
+ if self.in_:
+ sys.stdin = self.in_ = DontReadFromInput()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if self.out and not self.out.closed:
+ sys.stdout = self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ if self.err and not self.err.closed:
+ sys.stderr = self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ if self.in_:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self.out:
+ out = self.out.getvalue()
+ self.out.truncate(0)
+ self.out.seek(0)
+ if self.err:
+ err = self.err.getvalue()
+ self.err.truncate(0)
+ self.err.seek(0)
+ return out, err
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+ def isatty(self):
+ return False
+ def close(self):
+ pass
+
+try:
+ devnullpath = os.devnull
+except AttributeError:
+ if os.name == 'nt':
+ devnullpath = 'NUL'
+ else:
+ devnullpath = '/dev/null'
diff --git a/py/_io/saferepr.py b/py/_io/saferepr.py
index 8518290..e86f18f 100644
--- a/py/_io/saferepr.py
+++ b/py/_io/saferepr.py
@@ -1,71 +1,71 @@
-import py
-import sys
-
-builtin_repr = repr
-
-reprlib = py.builtin._tryimport('repr', 'reprlib')
-
-class SafeRepr(reprlib.Repr):
- """ subclass of repr.Repr that limits the resulting size of repr()
- and includes information on exceptions raised during the call.
- """
- def repr(self, x):
- return self._callhelper(reprlib.Repr.repr, self, x)
-
- def repr_unicode(self, x, level):
- # Strictly speaking wrong on narrow builds
- def repr(u):
- if "'" not in u:
- return py.builtin._totext("'%s'") % u
- elif '"' not in u:
- return py.builtin._totext('"%s"') % u
- else:
- return py.builtin._totext("'%s'") % u.replace("'", r"\'")
- s = repr(x[:self.maxstring])
- if len(s) > self.maxstring:
- i = max(0, (self.maxstring-3)//2)
- j = max(0, self.maxstring-3-i)
- s = repr(x[:i] + x[len(x)-j:])
- s = s[:i] + '...' + s[len(s)-j:]
- return s
-
- def repr_instance(self, x, level):
- return self._callhelper(builtin_repr, x)
-
- def _callhelper(self, call, x, *args):
- try:
- # Try the vanilla repr and make sure that the result is a string
- s = call(x, *args)
- except py.builtin._sysex:
- raise
- except:
- cls, e, tb = sys.exc_info()
- exc_name = getattr(cls, '__name__', 'unknown')
- try:
- exc_info = str(e)
- except py.builtin._sysex:
- raise
- except:
- exc_info = 'unknown'
- return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
- exc_name, exc_info, x.__class__.__name__, id(x))
- else:
- if len(s) > self.maxsize:
- i = max(0, (self.maxsize-3)//2)
- j = max(0, self.maxsize-3-i)
- s = s[:i] + '...' + s[len(s)-j:]
- return s
-
-def saferepr(obj, maxsize=240):
- """ return a size-limited safe repr-string for the given object.
- Failing __repr__ functions of user instances will be represented
- with a short exception info and 'saferepr' generally takes
- care to never raise exceptions itself. This function is a wrapper
- around the Repr/reprlib functionality of the standard 2.6 lib.
- """
- # review exception handling
- srepr = SafeRepr()
- srepr.maxstring = maxsize
- srepr.maxsize = maxsize
- srepr.maxother = 160
- return srepr.repr(obj)
+import py
+import sys
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+class SafeRepr(reprlib.Repr):
+ """ subclass of repr.Repr that limits the resulting size of repr()
+ and includes information on exceptions raised during the call.
+ """
+ def repr(self, x):
+ return self._callhelper(reprlib.Repr.repr, self, x)
+
+ def repr_unicode(self, x, level):
+ # Strictly speaking wrong on narrow builds
+ def repr(u):
+ if "'" not in u:
+ return py.builtin._totext("'%s'") % u
+ elif '"' not in u:
+ return py.builtin._totext('"%s"') % u
+ else:
+ return py.builtin._totext("'%s'") % u.replace("'", r"\'")
+ s = repr(x[:self.maxstring])
+ if len(s) > self.maxstring:
+ i = max(0, (self.maxstring-3)//2)
+ j = max(0, self.maxstring-3-i)
+ s = repr(x[:i] + x[len(x)-j:])
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+ def repr_instance(self, x, level):
+ return self._callhelper(builtin_repr, x)
+
+ def _callhelper(self, call, x, *args):
+ try:
+ # Try the vanilla repr and make sure that the result is a string
+ s = call(x, *args)
+ except py.builtin._sysex:
+ raise
+ except:
+ cls, e, tb = sys.exc_info()
+ exc_name = getattr(cls, '__name__', 'unknown')
+ try:
+ exc_info = str(e)
+ except py.builtin._sysex:
+ raise
+ except:
+ exc_info = 'unknown'
+ return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
+ exc_name, exc_info, x.__class__.__name__, id(x))
+ else:
+ if len(s) > self.maxsize:
+ i = max(0, (self.maxsize-3)//2)
+ j = max(0, self.maxsize-3-i)
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+def saferepr(obj, maxsize=240):
+ """ return a size-limited safe repr-string for the given object.
+ Failing __repr__ functions of user instances will be represented
+ with a short exception info and 'saferepr' generally takes
+ care to never raise exceptions itself. This function is a wrapper
+ around the Repr/reprlib functionality of the standard 2.6 lib.
+ """
+ # review exception handling
+ srepr = SafeRepr()
+ srepr.maxstring = maxsize
+ srepr.maxsize = maxsize
+ srepr.maxother = 160
+ return srepr.repr(obj)
diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py
index 390e8ca..715547d 100644
--- a/py/_io/terminalwriter.py
+++ b/py/_io/terminalwriter.py
@@ -1,357 +1,357 @@
-"""
-
-Helper functions for writing to terminals and files.
-
-"""
-
-
-import sys, os
-import py
-py3k = sys.version_info[0] >= 3
-from py.builtin import text, bytes
-
-win32_and_ctypes = False
-colorama = None
-if sys.platform == "win32":
- try:
- import colorama
- except ImportError:
- try:
- import ctypes
- win32_and_ctypes = True
- except ImportError:
- pass
-
-
-def _getdimensions():
- import termios,fcntl,struct
- call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
- height,width = struct.unpack( "hhhh", call ) [:2]
- return height, width
-
-
-def get_terminal_width():
- height = width = 0
- try:
- height, width = _getdimensions()
- except py.builtin._sysex:
- raise
- except:
- # pass to fallback below
- pass
-
- if width == 0:
- # FALLBACK:
- # * some exception happened
- # * or this is emacs terminal which reports (0,0)
- width = int(os.environ.get('COLUMNS', 80))
-
- # XXX the windows getdimensions may be bogus, let's sanify a bit
- if width < 40:
- width = 80
- return width
-
-terminal_width = get_terminal_width()
-
-# XXX unify with _escaped func below
-def ansi_print(text, esc, file=None, newline=True, flush=False):
- if file is None:
- file = sys.stderr
- text = text.rstrip()
- if esc and not isinstance(esc, tuple):
- esc = (esc,)
- if esc and sys.platform != "win32" and file.isatty():
- text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
- text +
- '\x1b[0m') # ANSI color code "reset"
- if newline:
- text += '\n'
-
- if esc and win32_and_ctypes and file.isatty():
- if 1 in esc:
- bold = True
- esc = tuple([x for x in esc if x != 1])
- else:
- bold = False
- esctable = {() : FOREGROUND_WHITE, # normal
- (31,): FOREGROUND_RED, # red
- (32,): FOREGROUND_GREEN, # green
- (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
- (34,): FOREGROUND_BLUE, # blue
- (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
- (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
- (37,): FOREGROUND_WHITE, # white
- (39,): FOREGROUND_WHITE, # reset
- }
- attr = esctable.get(esc, FOREGROUND_WHITE)
- if bold:
- attr |= FOREGROUND_INTENSITY
- STD_OUTPUT_HANDLE = -11
- STD_ERROR_HANDLE = -12
- if file is sys.stderr:
- handle = GetStdHandle(STD_ERROR_HANDLE)
- else:
- handle = GetStdHandle(STD_OUTPUT_HANDLE)
- oldcolors = GetConsoleInfo(handle).wAttributes
- attr |= (oldcolors & 0x0f0)
- SetConsoleTextAttribute(handle, attr)
- while len(text) > 32768:
- file.write(text[:32768])
- text = text[32768:]
- if text:
- file.write(text)
- SetConsoleTextAttribute(handle, oldcolors)
- else:
- file.write(text)
-
- if flush:
- file.flush()
-
-def should_do_markup(file):
- if os.environ.get('PY_COLORS') == '1':
- return True
- if os.environ.get('PY_COLORS') == '0':
- return False
- return hasattr(file, 'isatty') and file.isatty() \
- and os.environ.get('TERM') != 'dumb' \
- and not (sys.platform.startswith('java') and os._name == 'nt')
-
-class TerminalWriter(object):
- _esctable = dict(black=30, red=31, green=32, yellow=33,
- blue=34, purple=35, cyan=36, white=37,
- Black=40, Red=41, Green=42, Yellow=43,
- Blue=44, Purple=45, Cyan=46, White=47,
- bold=1, light=2, blink=5, invert=7)
-
- # XXX deprecate stringio argument
- def __init__(self, file=None, stringio=False, encoding=None):
- if file is None:
- if stringio:
- self.stringio = file = py.io.TextIO()
- else:
- file = py.std.sys.stdout
- elif py.builtin.callable(file) and not (
- hasattr(file, "write") and hasattr(file, "flush")):
- file = WriteFile(file, encoding=encoding)
- if hasattr(file, "isatty") and file.isatty() and colorama:
- file = colorama.AnsiToWin32(file).stream
- self.encoding = encoding or getattr(file, 'encoding', "utf-8")
- self._file = file
- self.hasmarkup = should_do_markup(file)
- self._lastlen = 0
-
- @property
- def fullwidth(self):
- if hasattr(self, '_terminal_width'):
- return self._terminal_width
- return get_terminal_width()
-
- @fullwidth.setter
- def fullwidth(self, value):
- self._terminal_width = value
-
- def _escaped(self, text, esc):
- if esc and self.hasmarkup:
- text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
- text +'\x1b[0m')
- return text
-
- def markup(self, text, **kw):
- esc = []
- for name in kw:
- if name not in self._esctable:
- raise ValueError("unknown markup: %r" %(name,))
- if kw[name]:
- esc.append(self._esctable[name])
- return self._escaped(text, tuple(esc))
-
- def sep(self, sepchar, title=None, fullwidth=None, **kw):
- if fullwidth is None:
- fullwidth = self.fullwidth
- # the goal is to have the line be as long as possible
- # under the condition that len(line) <= fullwidth
- if sys.platform == "win32":
- # if we print in the last column on windows we are on a
- # new line but there is no way to verify/neutralize this
- # (we may not know the exact line width)
- # so let's be defensive to avoid empty lines in the output
- fullwidth -= 1
- if title is not None:
- # we want 2 + 2*len(fill) + len(title) <= fullwidth
- # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
- # 2*len(sepchar)*N <= fullwidth - len(title) - 2
- # N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
- N = (fullwidth - len(title) - 2) // (2*len(sepchar))
- fill = sepchar * N
- line = "%s %s %s" % (fill, title, fill)
- else:
- # we want len(sepchar)*N <= fullwidth
- # i.e. N <= fullwidth // len(sepchar)
- line = sepchar * (fullwidth // len(sepchar))
- # in some situations there is room for an extra sepchar at the right,
- # in particular if we consider that with a sepchar like "_ " the
- # trailing space is not important at the end of the line
- if len(line) + len(sepchar.rstrip()) <= fullwidth:
- line += sepchar.rstrip()
-
- self.line(line, **kw)
-
- def write(self, msg, **kw):
- if msg:
- if not isinstance(msg, (bytes, text)):
- msg = text(msg)
- if self.hasmarkup and kw:
- markupmsg = self.markup(msg, **kw)
- else:
- markupmsg = msg
- write_out(self._file, markupmsg)
-
- def line(self, s='', **kw):
- self.write(s, **kw)
- self._checkfill(s)
- self.write('\n')
-
- def reline(self, line, **kw):
- if not self.hasmarkup:
- raise ValueError("cannot use rewrite-line without terminal")
- self.write(line, **kw)
- self._checkfill(line)
- self.write('\r')
- self._lastlen = len(line)
-
- def _checkfill(self, line):
- diff2last = self._lastlen - len(line)
- if diff2last > 0:
- self.write(" " * diff2last)
-
-class Win32ConsoleWriter(TerminalWriter):
- def write(self, msg, **kw):
- if msg:
- if not isinstance(msg, (bytes, text)):
- msg = text(msg)
- oldcolors = None
- if self.hasmarkup and kw:
- handle = GetStdHandle(STD_OUTPUT_HANDLE)
- oldcolors = GetConsoleInfo(handle).wAttributes
- default_bg = oldcolors & 0x00F0
- attr = default_bg
- if kw.pop('bold', False):
- attr |= FOREGROUND_INTENSITY
-
- if kw.pop('red', False):
- attr |= FOREGROUND_RED
- elif kw.pop('blue', False):
- attr |= FOREGROUND_BLUE
- elif kw.pop('green', False):
- attr |= FOREGROUND_GREEN
- elif kw.pop('yellow', False):
- attr |= FOREGROUND_GREEN|FOREGROUND_RED
- else:
- attr |= oldcolors & 0x0007
-
- SetConsoleTextAttribute(handle, attr)
- write_out(self._file, msg)
- if oldcolors:
- SetConsoleTextAttribute(handle, oldcolors)
-
-class WriteFile(object):
- def __init__(self, writemethod, encoding=None):
- self.encoding = encoding
- self._writemethod = writemethod
-
- def write(self, data):
- if self.encoding:
- data = data.encode(self.encoding, "replace")
- self._writemethod(data)
-
- def flush(self):
- return
-
-
-if win32_and_ctypes:
- TerminalWriter = Win32ConsoleWriter
- import ctypes
- from ctypes import wintypes
-
- # ctypes access to the Windows console
- STD_OUTPUT_HANDLE = -11
- STD_ERROR_HANDLE = -12
- FOREGROUND_BLACK = 0x0000 # black text
- FOREGROUND_BLUE = 0x0001 # text color contains blue.
- FOREGROUND_GREEN = 0x0002 # text color contains green.
- FOREGROUND_RED = 0x0004 # text color contains red.
- FOREGROUND_WHITE = 0x0007
- FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
- BACKGROUND_BLACK = 0x0000 # background color black
- BACKGROUND_BLUE = 0x0010 # background color contains blue.
- BACKGROUND_GREEN = 0x0020 # background color contains green.
- BACKGROUND_RED = 0x0040 # background color contains red.
- BACKGROUND_WHITE = 0x0070
- BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
-
- SHORT = ctypes.c_short
- class COORD(ctypes.Structure):
- _fields_ = [('X', SHORT),
- ('Y', SHORT)]
- class SMALL_RECT(ctypes.Structure):
- _fields_ = [('Left', SHORT),
- ('Top', SHORT),
- ('Right', SHORT),
- ('Bottom', SHORT)]
- class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
- _fields_ = [('dwSize', COORD),
- ('dwCursorPosition', COORD),
- ('wAttributes', wintypes.WORD),
- ('srWindow', SMALL_RECT),
- ('dwMaximumWindowSize', COORD)]
-
- _GetStdHandle = ctypes.windll.kernel32.GetStdHandle
- _GetStdHandle.argtypes = [wintypes.DWORD]
- _GetStdHandle.restype = wintypes.HANDLE
- def GetStdHandle(kind):
- return _GetStdHandle(kind)
-
- SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
- SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
- SetConsoleTextAttribute.restype = wintypes.BOOL
-
- _GetConsoleScreenBufferInfo = \
- ctypes.windll.kernel32.GetConsoleScreenBufferInfo
- _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
- ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
- _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
- def GetConsoleInfo(handle):
- info = CONSOLE_SCREEN_BUFFER_INFO()
- _GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
- return info
-
- def _getdimensions():
- handle = GetStdHandle(STD_OUTPUT_HANDLE)
- info = GetConsoleInfo(handle)
- # Substract one from the width, otherwise the cursor wraps
- # and the ending \n causes an empty line to display.
- return info.dwSize.Y, info.dwSize.X - 1
-
-def write_out(fil, msg):
- # XXX sometimes "msg" is of type bytes, sometimes text which
- # complicates the situation. Should we try to enforce unicode?
- try:
- # on py27 and above writing out to sys.stdout with an encoding
- # should usually work for unicode messages (if the encoding is
- # capable of it)
- fil.write(msg)
- except UnicodeEncodeError:
- # on py26 it might not work because stdout expects bytes
- if fil.encoding:
- try:
- fil.write(msg.encode(fil.encoding))
- except UnicodeEncodeError:
- # it might still fail if the encoding is not capable
- pass
- else:
- fil.flush()
- return
- # fallback: escape all unicode characters
- msg = msg.encode("unicode-escape").decode("ascii")
- fil.write(msg)
- fil.flush()
+"""
+
+Helper functions for writing to terminals and files.
+
+"""
+
+
+import sys, os
+import py
+py3k = sys.version_info[0] >= 3
+from py.builtin import text, bytes
+
+win32_and_ctypes = False
+colorama = None
+if sys.platform == "win32":
+ try:
+ import colorama
+ except ImportError:
+ try:
+ import ctypes
+ win32_and_ctypes = True
+ except ImportError:
+ pass
+
+
+def _getdimensions():
+ import termios,fcntl,struct
+ call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
+ height,width = struct.unpack( "hhhh", call ) [:2]
+ return height, width
+
+
+def get_terminal_width():
+ height = width = 0
+ try:
+ height, width = _getdimensions()
+ except py.builtin._sysex:
+ raise
+ except:
+ # pass to fallback below
+ pass
+
+ if width == 0:
+ # FALLBACK:
+ # * some exception happened
+ # * or this is emacs terminal which reports (0,0)
+ width = int(os.environ.get('COLUMNS', 80))
+
+ # XXX the windows getdimensions may be bogus, let's sanify a bit
+ if width < 40:
+ width = 80
+ return width
+
+terminal_width = get_terminal_width()
+
+# XXX unify with _escaped func below
+def ansi_print(text, esc, file=None, newline=True, flush=False):
+ if file is None:
+ file = sys.stderr
+ text = text.rstrip()
+ if esc and not isinstance(esc, tuple):
+ esc = (esc,)
+ if esc and sys.platform != "win32" and file.isatty():
+ text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
+ text +
+ '\x1b[0m') # ANSI color code "reset"
+ if newline:
+ text += '\n'
+
+ if esc and win32_and_ctypes and file.isatty():
+ if 1 in esc:
+ bold = True
+ esc = tuple([x for x in esc if x != 1])
+ else:
+ bold = False
+ esctable = {() : FOREGROUND_WHITE, # normal
+ (31,): FOREGROUND_RED, # red
+ (32,): FOREGROUND_GREEN, # green
+ (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
+ (34,): FOREGROUND_BLUE, # blue
+ (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
+ (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
+ (37,): FOREGROUND_WHITE, # white
+ (39,): FOREGROUND_WHITE, # reset
+ }
+ attr = esctable.get(esc, FOREGROUND_WHITE)
+ if bold:
+ attr |= FOREGROUND_INTENSITY
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+ if file is sys.stderr:
+ handle = GetStdHandle(STD_ERROR_HANDLE)
+ else:
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ oldcolors = GetConsoleInfo(handle).wAttributes
+ attr |= (oldcolors & 0x0f0)
+ SetConsoleTextAttribute(handle, attr)
+ while len(text) > 32768:
+ file.write(text[:32768])
+ text = text[32768:]
+ if text:
+ file.write(text)
+ SetConsoleTextAttribute(handle, oldcolors)
+ else:
+ file.write(text)
+
+ if flush:
+ file.flush()
+
+def should_do_markup(file):
+ if os.environ.get('PY_COLORS') == '1':
+ return True
+ if os.environ.get('PY_COLORS') == '0':
+ return False
+ return hasattr(file, 'isatty') and file.isatty() \
+ and os.environ.get('TERM') != 'dumb' \
+ and not (sys.platform.startswith('java') and os._name == 'nt')
+
+class TerminalWriter(object):
+ _esctable = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, purple=35, cyan=36, white=37,
+ Black=40, Red=41, Green=42, Yellow=43,
+ Blue=44, Purple=45, Cyan=46, White=47,
+ bold=1, light=2, blink=5, invert=7)
+
+ # XXX deprecate stringio argument
+ def __init__(self, file=None, stringio=False, encoding=None):
+ if file is None:
+ if stringio:
+ self.stringio = file = py.io.TextIO()
+ else:
+ file = py.std.sys.stdout
+ elif py.builtin.callable(file) and not (
+ hasattr(file, "write") and hasattr(file, "flush")):
+ file = WriteFile(file, encoding=encoding)
+ if hasattr(file, "isatty") and file.isatty() and colorama:
+ file = colorama.AnsiToWin32(file).stream
+ self.encoding = encoding or getattr(file, 'encoding', "utf-8")
+ self._file = file
+ self.hasmarkup = should_do_markup(file)
+ self._lastlen = 0
+
+ @property
+ def fullwidth(self):
+ if hasattr(self, '_terminal_width'):
+ return self._terminal_width
+ return get_terminal_width()
+
+ @fullwidth.setter
+ def fullwidth(self, value):
+ self._terminal_width = value
+
+ def _escaped(self, text, esc):
+ if esc and self.hasmarkup:
+ text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
+ text +'\x1b[0m')
+ return text
+
+ def markup(self, text, **kw):
+ esc = []
+ for name in kw:
+ if name not in self._esctable:
+ raise ValueError("unknown markup: %r" %(name,))
+ if kw[name]:
+ esc.append(self._esctable[name])
+ return self._escaped(text, tuple(esc))
+
+ def sep(self, sepchar, title=None, fullwidth=None, **kw):
+ if fullwidth is None:
+ fullwidth = self.fullwidth
+ # the goal is to have the line be as long as possible
+ # under the condition that len(line) <= fullwidth
+ if sys.platform == "win32":
+ # if we print in the last column on windows we are on a
+ # new line but there is no way to verify/neutralize this
+ # (we may not know the exact line width)
+ # so let's be defensive to avoid empty lines in the output
+ fullwidth -= 1
+ if title is not None:
+ # we want 2 + 2*len(fill) + len(title) <= fullwidth
+ # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
+ # 2*len(sepchar)*N <= fullwidth - len(title) - 2
+ # N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
+ N = (fullwidth - len(title) - 2) // (2*len(sepchar))
+ fill = sepchar * N
+ line = "%s %s %s" % (fill, title, fill)
+ else:
+ # we want len(sepchar)*N <= fullwidth
+ # i.e. N <= fullwidth // len(sepchar)
+ line = sepchar * (fullwidth // len(sepchar))
+ # in some situations there is room for an extra sepchar at the right,
+ # in particular if we consider that with a sepchar like "_ " the
+ # trailing space is not important at the end of the line
+ if len(line) + len(sepchar.rstrip()) <= fullwidth:
+ line += sepchar.rstrip()
+
+ self.line(line, **kw)
+
+ def write(self, msg, **kw):
+ if msg:
+ if not isinstance(msg, (bytes, text)):
+ msg = text(msg)
+ if self.hasmarkup and kw:
+ markupmsg = self.markup(msg, **kw)
+ else:
+ markupmsg = msg
+ write_out(self._file, markupmsg)
+
+ def line(self, s='', **kw):
+ self.write(s, **kw)
+ self._checkfill(s)
+ self.write('\n')
+
+ def reline(self, line, **kw):
+ if not self.hasmarkup:
+ raise ValueError("cannot use rewrite-line without terminal")
+ self.write(line, **kw)
+ self._checkfill(line)
+ self.write('\r')
+ self._lastlen = len(line)
+
+ def _checkfill(self, line):
+ diff2last = self._lastlen - len(line)
+ if diff2last > 0:
+ self.write(" " * diff2last)
+
+class Win32ConsoleWriter(TerminalWriter):
+ def write(self, msg, **kw):
+ if msg:
+ if not isinstance(msg, (bytes, text)):
+ msg = text(msg)
+ oldcolors = None
+ if self.hasmarkup and kw:
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ oldcolors = GetConsoleInfo(handle).wAttributes
+ default_bg = oldcolors & 0x00F0
+ attr = default_bg
+ if kw.pop('bold', False):
+ attr |= FOREGROUND_INTENSITY
+
+ if kw.pop('red', False):
+ attr |= FOREGROUND_RED
+ elif kw.pop('blue', False):
+ attr |= FOREGROUND_BLUE
+ elif kw.pop('green', False):
+ attr |= FOREGROUND_GREEN
+ elif kw.pop('yellow', False):
+ attr |= FOREGROUND_GREEN|FOREGROUND_RED
+ else:
+ attr |= oldcolors & 0x0007
+
+ SetConsoleTextAttribute(handle, attr)
+ write_out(self._file, msg)
+ if oldcolors:
+ SetConsoleTextAttribute(handle, oldcolors)
+
+class WriteFile(object):
+ def __init__(self, writemethod, encoding=None):
+ self.encoding = encoding
+ self._writemethod = writemethod
+
+ def write(self, data):
+ if self.encoding:
+ data = data.encode(self.encoding, "replace")
+ self._writemethod(data)
+
+ def flush(self):
+ return
+
+
+if win32_and_ctypes:
+ TerminalWriter = Win32ConsoleWriter
+ import ctypes
+ from ctypes import wintypes
+
+ # ctypes access to the Windows console
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+ FOREGROUND_BLACK = 0x0000 # black text
+ FOREGROUND_BLUE = 0x0001 # text color contains blue.
+ FOREGROUND_GREEN = 0x0002 # text color contains green.
+ FOREGROUND_RED = 0x0004 # text color contains red.
+ FOREGROUND_WHITE = 0x0007
+ FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
+ BACKGROUND_BLACK = 0x0000 # background color black
+ BACKGROUND_BLUE = 0x0010 # background color contains blue.
+ BACKGROUND_GREEN = 0x0020 # background color contains green.
+ BACKGROUND_RED = 0x0040 # background color contains red.
+ BACKGROUND_WHITE = 0x0070
+ BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
+
+ SHORT = ctypes.c_short
+ class COORD(ctypes.Structure):
+ _fields_ = [('X', SHORT),
+ ('Y', SHORT)]
+ class SMALL_RECT(ctypes.Structure):
+ _fields_ = [('Left', SHORT),
+ ('Top', SHORT),
+ ('Right', SHORT),
+ ('Bottom', SHORT)]
+ class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+ _fields_ = [('dwSize', COORD),
+ ('dwCursorPosition', COORD),
+ ('wAttributes', wintypes.WORD),
+ ('srWindow', SMALL_RECT),
+ ('dwMaximumWindowSize', COORD)]
+
+ _GetStdHandle = ctypes.windll.kernel32.GetStdHandle
+ _GetStdHandle.argtypes = [wintypes.DWORD]
+ _GetStdHandle.restype = wintypes.HANDLE
+ def GetStdHandle(kind):
+ return _GetStdHandle(kind)
+
+ SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
+ SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
+ SetConsoleTextAttribute.restype = wintypes.BOOL
+
+ _GetConsoleScreenBufferInfo = \
+ ctypes.windll.kernel32.GetConsoleScreenBufferInfo
+ _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
+ ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
+ _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
+ def GetConsoleInfo(handle):
+ info = CONSOLE_SCREEN_BUFFER_INFO()
+ _GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
+ return info
+
+ def _getdimensions():
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ info = GetConsoleInfo(handle)
+ # Substract one from the width, otherwise the cursor wraps
+ # and the ending \n causes an empty line to display.
+ return info.dwSize.Y, info.dwSize.X - 1
+
+def write_out(fil, msg):
+ # XXX sometimes "msg" is of type bytes, sometimes text which
+ # complicates the situation. Should we try to enforce unicode?
+ try:
+ # on py27 and above writing out to sys.stdout with an encoding
+ # should usually work for unicode messages (if the encoding is
+ # capable of it)
+ fil.write(msg)
+ except UnicodeEncodeError:
+ # on py26 it might not work because stdout expects bytes
+ if fil.encoding:
+ try:
+ fil.write(msg.encode(fil.encoding))
+ except UnicodeEncodeError:
+ # it might still fail if the encoding is not capable
+ pass
+ else:
+ fil.flush()
+ return
+ # fallback: escape all unicode characters
+ msg = msg.encode("unicode-escape").decode("ascii")
+ fil.write(msg)
+ fil.flush()
diff --git a/py/_log/__init__.py b/py/_log/__init__.py
index fad62e9..b211e2d 100644
--- a/py/_log/__init__.py
+++ b/py/_log/__init__.py
@@ -1,2 +1,2 @@
-""" logging API ('producers' and 'consumers' connected via keywords) """
-
+""" logging API ('producers' and 'consumers' connected via keywords) """
+
diff --git a/py/_log/log.py b/py/_log/log.py
index ce47e8c..86205fe 100644
--- a/py/_log/log.py
+++ b/py/_log/log.py
@@ -1,186 +1,186 @@
-"""
-basic logging functionality based on a producer/consumer scheme.
-
-XXX implement this API: (maybe put it into slogger.py?)
-
- log = Logger(
- info=py.log.STDOUT,
- debug=py.log.STDOUT,
- command=None)
- log.info("hello", "world")
- log.command("hello", "world")
-
- log = Logger(info=Logger(something=...),
- debug=py.log.STDOUT,
- command=None)
-"""
-import py, sys
-
-class Message(object):
- def __init__(self, keywords, args):
- self.keywords = keywords
- self.args = args
-
- def content(self):
- return " ".join(map(str, self.args))
-
- def prefix(self):
- return "[%s] " % (":".join(self.keywords))
-
- def __str__(self):
- return self.prefix() + self.content()
-
-
-class Producer(object):
- """ (deprecated) Log producer API which sends messages to be logged
- to a 'consumer' object, which then prints them to stdout,
- stderr, files, etc. Used extensively by PyPy-1.1.
- """
-
- Message = Message # to allow later customization
- keywords2consumer = {}
-
- def __init__(self, keywords, keywordmapper=None, **kw):
- if hasattr(keywords, 'split'):
- keywords = tuple(keywords.split())
- self._keywords = keywords
- if keywordmapper is None:
- keywordmapper = default_keywordmapper
- self._keywordmapper = keywordmapper
-
- def __repr__(self):
- return "<py.log.Producer %s>" % ":".join(self._keywords)
-
- def __getattr__(self, name):
- if '_' in name:
- raise AttributeError(name)
- producer = self.__class__(self._keywords + (name,))
- setattr(self, name, producer)
- return producer
-
- def __call__(self, *args):
- """ write a message to the appropriate consumer(s) """
- func = self._keywordmapper.getconsumer(self._keywords)
- if func is not None:
- func(self.Message(self._keywords, args))
-
-class KeywordMapper:
- def __init__(self):
- self.keywords2consumer = {}
-
- def getstate(self):
- return self.keywords2consumer.copy()
- def setstate(self, state):
- self.keywords2consumer.clear()
- self.keywords2consumer.update(state)
-
- def getconsumer(self, keywords):
- """ return a consumer matching the given keywords.
-
- tries to find the most suitable consumer by walking, starting from
- the back, the list of keywords, the first consumer matching a
- keyword is returned (falling back to py.log.default)
- """
- for i in range(len(keywords), 0, -1):
- try:
- return self.keywords2consumer[keywords[:i]]
- except KeyError:
- continue
- return self.keywords2consumer.get('default', default_consumer)
-
- def setconsumer(self, keywords, consumer):
- """ set a consumer for a set of keywords. """
- # normalize to tuples
- if isinstance(keywords, str):
- keywords = tuple(filter(None, keywords.split()))
- elif hasattr(keywords, '_keywords'):
- keywords = keywords._keywords
- elif not isinstance(keywords, tuple):
- raise TypeError("key %r is not a string or tuple" % (keywords,))
- if consumer is not None and not py.builtin.callable(consumer):
- if not hasattr(consumer, 'write'):
- raise TypeError(
- "%r should be None, callable or file-like" % (consumer,))
- consumer = File(consumer)
- self.keywords2consumer[keywords] = consumer
-
-def default_consumer(msg):
- """ the default consumer, prints the message to stdout (using 'print') """
- sys.stderr.write(str(msg)+"\n")
-
-default_keywordmapper = KeywordMapper()
-
-def setconsumer(keywords, consumer):
- default_keywordmapper.setconsumer(keywords, consumer)
-
-def setstate(state):
- default_keywordmapper.setstate(state)
-def getstate():
- return default_keywordmapper.getstate()
-
-#
-# Consumers
-#
-
-class File(object):
- """ log consumer wrapping a file(-like) object """
- def __init__(self, f):
- assert hasattr(f, 'write')
- #assert isinstance(f, file) or not hasattr(f, 'open')
- self._file = f
-
- def __call__(self, msg):
- """ write a message to the log """
- self._file.write(str(msg) + "\n")
- if hasattr(self._file, 'flush'):
- self._file.flush()
-
-class Path(object):
- """ log consumer that opens and writes to a Path """
- def __init__(self, filename, append=False,
- delayed_create=False, buffering=False):
- self._append = append
- self._filename = str(filename)
- self._buffering = buffering
- if not delayed_create:
- self._openfile()
-
- def _openfile(self):
- mode = self._append and 'a' or 'w'
- f = open(self._filename, mode)
- self._file = f
-
- def __call__(self, msg):
- """ write a message to the log """
- if not hasattr(self, "_file"):
- self._openfile()
- self._file.write(str(msg) + "\n")
- if not self._buffering:
- self._file.flush()
-
-def STDOUT(msg):
- """ consumer that writes to sys.stdout """
- sys.stdout.write(str(msg)+"\n")
-
-def STDERR(msg):
- """ consumer that writes to sys.stderr """
- sys.stderr.write(str(msg)+"\n")
-
-class Syslog:
- """ consumer that writes to the syslog daemon """
-
- def __init__(self, priority = None):
- if priority is None:
- priority = self.LOG_INFO
- self.priority = priority
-
- def __call__(self, msg):
- """ write a message to the log """
- py.std.syslog.syslog(self.priority, str(msg))
-
-for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
- _prio = "LOG_" + _prio
- try:
- setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
- except AttributeError:
- pass
+"""
+basic logging functionality based on a producer/consumer scheme.
+
+XXX implement this API: (maybe put it into slogger.py?)
+
+ log = Logger(
+ info=py.log.STDOUT,
+ debug=py.log.STDOUT,
+ command=None)
+ log.info("hello", "world")
+ log.command("hello", "world")
+
+ log = Logger(info=Logger(something=...),
+ debug=py.log.STDOUT,
+ command=None)
+"""
+import py, sys
+
+class Message(object):
+ def __init__(self, keywords, args):
+ self.keywords = keywords
+ self.args = args
+
+ def content(self):
+ return " ".join(map(str, self.args))
+
+ def prefix(self):
+ return "[%s] " % (":".join(self.keywords))
+
+ def __str__(self):
+ return self.prefix() + self.content()
+
+
+class Producer(object):
+ """ (deprecated) Log producer API which sends messages to be logged
+ to a 'consumer' object, which then prints them to stdout,
+ stderr, files, etc. Used extensively by PyPy-1.1.
+ """
+
+ Message = Message # to allow later customization
+ keywords2consumer = {}
+
+ def __init__(self, keywords, keywordmapper=None, **kw):
+ if hasattr(keywords, 'split'):
+ keywords = tuple(keywords.split())
+ self._keywords = keywords
+ if keywordmapper is None:
+ keywordmapper = default_keywordmapper
+ self._keywordmapper = keywordmapper
+
+ def __repr__(self):
+ return "<py.log.Producer %s>" % ":".join(self._keywords)
+
+ def __getattr__(self, name):
+ if '_' in name:
+ raise AttributeError(name)
+ producer = self.__class__(self._keywords + (name,))
+ setattr(self, name, producer)
+ return producer
+
+ def __call__(self, *args):
+ """ write a message to the appropriate consumer(s) """
+ func = self._keywordmapper.getconsumer(self._keywords)
+ if func is not None:
+ func(self.Message(self._keywords, args))
+
+class KeywordMapper:
+ def __init__(self):
+ self.keywords2consumer = {}
+
+ def getstate(self):
+ return self.keywords2consumer.copy()
+ def setstate(self, state):
+ self.keywords2consumer.clear()
+ self.keywords2consumer.update(state)
+
+ def getconsumer(self, keywords):
+ """ return a consumer matching the given keywords.
+
+ tries to find the most suitable consumer by walking, starting from
+ the back, the list of keywords, the first consumer matching a
+ keyword is returned (falling back to py.log.default)
+ """
+ for i in range(len(keywords), 0, -1):
+ try:
+ return self.keywords2consumer[keywords[:i]]
+ except KeyError:
+ continue
+ return self.keywords2consumer.get('default', default_consumer)
+
+ def setconsumer(self, keywords, consumer):
+ """ set a consumer for a set of keywords. """
+ # normalize to tuples
+ if isinstance(keywords, str):
+ keywords = tuple(filter(None, keywords.split()))
+ elif hasattr(keywords, '_keywords'):
+ keywords = keywords._keywords
+ elif not isinstance(keywords, tuple):
+ raise TypeError("key %r is not a string or tuple" % (keywords,))
+ if consumer is not None and not py.builtin.callable(consumer):
+ if not hasattr(consumer, 'write'):
+ raise TypeError(
+ "%r should be None, callable or file-like" % (consumer,))
+ consumer = File(consumer)
+ self.keywords2consumer[keywords] = consumer
+
+def default_consumer(msg):
+ """ the default consumer, prints the message to stdout (using 'print') """
+ sys.stderr.write(str(msg)+"\n")
+
+default_keywordmapper = KeywordMapper()
+
+def setconsumer(keywords, consumer):
+ default_keywordmapper.setconsumer(keywords, consumer)
+
+def setstate(state):
+ default_keywordmapper.setstate(state)
+def getstate():
+ return default_keywordmapper.getstate()
+
+#
+# Consumers
+#
+
+class File(object):
+ """ log consumer wrapping a file(-like) object """
+ def __init__(self, f):
+ assert hasattr(f, 'write')
+ #assert isinstance(f, file) or not hasattr(f, 'open')
+ self._file = f
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ self._file.write(str(msg) + "\n")
+ if hasattr(self._file, 'flush'):
+ self._file.flush()
+
+class Path(object):
+ """ log consumer that opens and writes to a Path """
+ def __init__(self, filename, append=False,
+ delayed_create=False, buffering=False):
+ self._append = append
+ self._filename = str(filename)
+ self._buffering = buffering
+ if not delayed_create:
+ self._openfile()
+
+ def _openfile(self):
+ mode = self._append and 'a' or 'w'
+ f = open(self._filename, mode)
+ self._file = f
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ if not hasattr(self, "_file"):
+ self._openfile()
+ self._file.write(str(msg) + "\n")
+ if not self._buffering:
+ self._file.flush()
+
+def STDOUT(msg):
+ """ consumer that writes to sys.stdout """
+ sys.stdout.write(str(msg)+"\n")
+
+def STDERR(msg):
+ """ consumer that writes to sys.stderr """
+ sys.stderr.write(str(msg)+"\n")
+
+class Syslog:
+ """ consumer that writes to the syslog daemon """
+
+ def __init__(self, priority = None):
+ if priority is None:
+ priority = self.LOG_INFO
+ self.priority = priority
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ py.std.syslog.syslog(self.priority, str(msg))
+
+for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
+ _prio = "LOG_" + _prio
+ try:
+ setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
+ except AttributeError:
+ pass
diff --git a/py/_log/warning.py b/py/_log/warning.py
index 722e31e..a137fe8 100644
--- a/py/_log/warning.py
+++ b/py/_log/warning.py
@@ -1,76 +1,76 @@
-import py, sys
-
-class DeprecationWarning(DeprecationWarning):
- def __init__(self, msg, path, lineno):
- self.msg = msg
- self.path = path
- self.lineno = lineno
- def __repr__(self):
- return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
- def __str__(self):
- return self.msg
-
-def _apiwarn(startversion, msg, stacklevel=2, function=None):
- # below is mostly COPIED from python2.4/warnings.py's def warn()
- # Get context information
- if isinstance(stacklevel, str):
- frame = sys._getframe(1)
- level = 1
- found = frame.f_code.co_filename.find(stacklevel) != -1
- while frame:
- co = frame.f_code
- if co.co_filename.find(stacklevel) == -1:
- if found:
- stacklevel = level
- break
- else:
- found = True
- level += 1
- frame = frame.f_back
- else:
- stacklevel = 1
- msg = "%s (since version %s)" %(msg, startversion)
- warn(msg, stacklevel=stacklevel+1, function=function)
-
-def warn(msg, stacklevel=1, function=None):
- if function is not None:
- filename = py.std.inspect.getfile(function)
- lineno = py.code.getrawcode(function).co_firstlineno
- else:
- try:
- caller = sys._getframe(stacklevel)
- except ValueError:
- globals = sys.__dict__
- lineno = 1
- else:
- globals = caller.f_globals
- lineno = caller.f_lineno
- if '__name__' in globals:
- module = globals['__name__']
- else:
- module = "<string>"
- filename = globals.get('__file__')
- if filename:
- fnl = filename.lower()
- if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
- filename = filename[:-1]
- elif fnl.endswith("$py.class"):
- filename = filename.replace('$py.class', '.py')
- else:
- if module == "__main__":
- try:
- filename = sys.argv[0]
- except AttributeError:
- # embedded interpreters don't have sys.argv, see bug #839151
- filename = '__main__'
- if not filename:
- filename = module
- path = py.path.local(filename)
- warning = DeprecationWarning(msg, path, lineno)
- py.std.warnings.warn_explicit(warning, category=Warning,
- filename=str(warning.path),
- lineno=warning.lineno,
- registry=py.std.warnings.__dict__.setdefault(
- "__warningsregistry__", {})
- )
-
+import py, sys
+
+class DeprecationWarning(DeprecationWarning):
+ def __init__(self, msg, path, lineno):
+ self.msg = msg
+ self.path = path
+ self.lineno = lineno
+ def __repr__(self):
+ return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
+ def __str__(self):
+ return self.msg
+
+def _apiwarn(startversion, msg, stacklevel=2, function=None):
+ # below is mostly COPIED from python2.4/warnings.py's def warn()
+ # Get context information
+ if isinstance(stacklevel, str):
+ frame = sys._getframe(1)
+ level = 1
+ found = frame.f_code.co_filename.find(stacklevel) != -1
+ while frame:
+ co = frame.f_code
+ if co.co_filename.find(stacklevel) == -1:
+ if found:
+ stacklevel = level
+ break
+ else:
+ found = True
+ level += 1
+ frame = frame.f_back
+ else:
+ stacklevel = 1
+ msg = "%s (since version %s)" %(msg, startversion)
+ warn(msg, stacklevel=stacklevel+1, function=function)
+
+def warn(msg, stacklevel=1, function=None):
+ if function is not None:
+ filename = py.std.inspect.getfile(function)
+ lineno = py.code.getrawcode(function).co_firstlineno
+ else:
+ try:
+ caller = sys._getframe(stacklevel)
+ except ValueError:
+ globals = sys.__dict__
+ lineno = 1
+ else:
+ globals = caller.f_globals
+ lineno = caller.f_lineno
+ if '__name__' in globals:
+ module = globals['__name__']
+ else:
+ module = "<string>"
+ filename = globals.get('__file__')
+ if filename:
+ fnl = filename.lower()
+ if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
+ filename = filename[:-1]
+ elif fnl.endswith("$py.class"):
+ filename = filename.replace('$py.class', '.py')
+ else:
+ if module == "__main__":
+ try:
+ filename = sys.argv[0]
+ except AttributeError:
+ # embedded interpreters don't have sys.argv, see bug #839151
+ filename = '__main__'
+ if not filename:
+ filename = module
+ path = py.path.local(filename)
+ warning = DeprecationWarning(msg, path, lineno)
+ py.std.warnings.warn_explicit(warning, category=Warning,
+ filename=str(warning.path),
+ lineno=warning.lineno,
+ registry=py.std.warnings.__dict__.setdefault(
+ "__warningsregistry__", {})
+ )
+
diff --git a/py/_path/__init__.py b/py/_path/__init__.py
index 51f3246..7b1ea8a 100644
--- a/py/_path/__init__.py
+++ b/py/_path/__init__.py
@@ -1 +1 @@
-""" unified file system api """
+""" unified file system api """
diff --git a/py/_path/cacheutil.py b/py/_path/cacheutil.py
index 9922504..89ea90b 100644
--- a/py/_path/cacheutil.py
+++ b/py/_path/cacheutil.py
@@ -1,114 +1,114 @@
-"""
-This module contains multithread-safe cache implementations.
-
-All Caches have
-
- getorbuild(key, builder)
- delentry(key)
-
-methods and allow configuration when instantiating the cache class.
-"""
-from time import time as gettime
-
-class BasicCache(object):
- def __init__(self, maxentries=128):
- self.maxentries = maxentries
- self.prunenum = int(maxentries - maxentries/8)
- self._dict = {}
-
- def clear(self):
- self._dict.clear()
-
- def _getentry(self, key):
- return self._dict[key]
-
- def _putentry(self, key, entry):
- self._prunelowestweight()
- self._dict[key] = entry
-
- def delentry(self, key, raising=False):
- try:
- del self._dict[key]
- except KeyError:
- if raising:
- raise
-
- def getorbuild(self, key, builder):
- try:
- entry = self._getentry(key)
- except KeyError:
- entry = self._build(key, builder)
- self._putentry(key, entry)
- return entry.value
-
- def _prunelowestweight(self):
- """ prune out entries with lowest weight. """
- numentries = len(self._dict)
- if numentries >= self.maxentries:
- # evict according to entry's weight
- items = [(entry.weight, key)
- for key, entry in self._dict.items()]
- items.sort()
- index = numentries - self.prunenum
- if index > 0:
- for weight, key in items[:index]:
- # in MT situations the element might be gone
- self.delentry(key, raising=False)
-
-class BuildcostAccessCache(BasicCache):
- """ A BuildTime/Access-counting cache implementation.
- the weight of a value is computed as the product of
-
- num-accesses-of-a-value * time-to-build-the-value
-
- The values with the least such weights are evicted
- if the cache maxentries threshold is superceded.
- For implementation flexibility more than one object
- might be evicted at a time.
- """
- # time function to use for measuring build-times
-
- def _build(self, key, builder):
- start = gettime()
- val = builder()
- end = gettime()
- return WeightedCountingEntry(val, end-start)
-
-
-class WeightedCountingEntry(object):
- def __init__(self, value, oneweight):
- self._value = value
- self.weight = self._oneweight = oneweight
-
- def value(self):
- self.weight += self._oneweight
- return self._value
- value = property(value)
-
-class AgingCache(BasicCache):
- """ This cache prunes out cache entries that are too old.
- """
- def __init__(self, maxentries=128, maxseconds=10.0):
- super(AgingCache, self).__init__(maxentries)
- self.maxseconds = maxseconds
-
- def _getentry(self, key):
- entry = self._dict[key]
- if entry.isexpired():
- self.delentry(key)
- raise KeyError(key)
- return entry
-
- def _build(self, key, builder):
- val = builder()
- entry = AgingEntry(val, gettime() + self.maxseconds)
- return entry
-
-class AgingEntry(object):
- def __init__(self, value, expirationtime):
- self.value = value
- self.weight = expirationtime
-
- def isexpired(self):
- t = gettime()
- return t >= self.weight
+"""
+This module contains multithread-safe cache implementations.
+
+All Caches have
+
+ getorbuild(key, builder)
+ delentry(key)
+
+methods and allow configuration when instantiating the cache class.
+"""
+from time import time as gettime
+
+class BasicCache(object):
+ def __init__(self, maxentries=128):
+ self.maxentries = maxentries
+ self.prunenum = int(maxentries - maxentries/8)
+ self._dict = {}
+
+ def clear(self):
+ self._dict.clear()
+
+ def _getentry(self, key):
+ return self._dict[key]
+
+ def _putentry(self, key, entry):
+ self._prunelowestweight()
+ self._dict[key] = entry
+
+ def delentry(self, key, raising=False):
+ try:
+ del self._dict[key]
+ except KeyError:
+ if raising:
+ raise
+
+ def getorbuild(self, key, builder):
+ try:
+ entry = self._getentry(key)
+ except KeyError:
+ entry = self._build(key, builder)
+ self._putentry(key, entry)
+ return entry.value
+
+ def _prunelowestweight(self):
+ """ prune out entries with lowest weight. """
+ numentries = len(self._dict)
+ if numentries >= self.maxentries:
+ # evict according to entry's weight
+ items = [(entry.weight, key)
+ for key, entry in self._dict.items()]
+ items.sort()
+ index = numentries - self.prunenum
+ if index > 0:
+ for weight, key in items[:index]:
+ # in MT situations the element might be gone
+ self.delentry(key, raising=False)
+
+class BuildcostAccessCache(BasicCache):
+ """ A BuildTime/Access-counting cache implementation.
+ the weight of a value is computed as the product of
+
+ num-accesses-of-a-value * time-to-build-the-value
+
+ The values with the least such weights are evicted
+ if the cache maxentries threshold is superceded.
+ For implementation flexibility more than one object
+ might be evicted at a time.
+ """
+ # time function to use for measuring build-times
+
+ def _build(self, key, builder):
+ start = gettime()
+ val = builder()
+ end = gettime()
+ return WeightedCountingEntry(val, end-start)
+
+
+class WeightedCountingEntry(object):
+ def __init__(self, value, oneweight):
+ self._value = value
+ self.weight = self._oneweight = oneweight
+
+ def value(self):
+ self.weight += self._oneweight
+ return self._value
+ value = property(value)
+
+class AgingCache(BasicCache):
+ """ This cache prunes out cache entries that are too old.
+ """
+ def __init__(self, maxentries=128, maxseconds=10.0):
+ super(AgingCache, self).__init__(maxentries)
+ self.maxseconds = maxseconds
+
+ def _getentry(self, key):
+ entry = self._dict[key]
+ if entry.isexpired():
+ self.delentry(key)
+ raise KeyError(key)
+ return entry
+
+ def _build(self, key, builder):
+ val = builder()
+ entry = AgingEntry(val, gettime() + self.maxseconds)
+ return entry
+
+class AgingEntry(object):
+ def __init__(self, value, expirationtime):
+ self.value = value
+ self.weight = expirationtime
+
+ def isexpired(self):
+ t = gettime()
+ return t >= self.weight
diff --git a/py/_path/common.py b/py/_path/common.py
index bf42ed5..28faf4f 100644
--- a/py/_path/common.py
+++ b/py/_path/common.py
@@ -1,439 +1,440 @@
-"""
-"""
-import os, sys, posixpath
-import py
-
-# Moved from local.py.
-iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
-
-try:
- from os import fspath
-except ImportError:
- def fspath(path):
- """
- Return the string representation of the path.
- If str or bytes is passed in, it is returned unchanged.
- This code comes from PEP 519, modified to support earlier versions of
- python.
-
- This is required for python < 3.6.
- """
- if isinstance(path, (py.builtin.text, py.builtin.bytes)):
- return path
-
- # Work from the object's type to match method resolution of other magic
- # methods.
- path_type = type(path)
- try:
- return path_type.__fspath__(path)
- except AttributeError:
- if hasattr(path_type, '__fspath__'):
- raise
- try:
- import pathlib
- except ImportError:
- pass
- else:
- if isinstance(path, pathlib.PurePath):
- return py.builtin.text(path)
-
- raise TypeError("expected str, bytes or os.PathLike object, not "
- + path_type.__name__)
-
-class Checkers:
- _depend_on_existence = 'exists', 'link', 'dir', 'file'
-
- def __init__(self, path):
- self.path = path
-
- def dir(self):
- raise NotImplementedError
-
- def file(self):
- raise NotImplementedError
-
- def dotfile(self):
- return self.path.basename.startswith('.')
-
- def ext(self, arg):
- if not arg.startswith('.'):
- arg = '.' + arg
- return self.path.ext == arg
-
- def exists(self):
- raise NotImplementedError
-
- def basename(self, arg):
- return self.path.basename == arg
-
- def basestarts(self, arg):
- return self.path.basename.startswith(arg)
-
- def relto(self, arg):
- return self.path.relto(arg)
-
- def fnmatch(self, arg):
- return self.path.fnmatch(arg)
-
- def endswith(self, arg):
- return str(self.path).endswith(arg)
-
- def _evaluate(self, kw):
- for name, value in kw.items():
- invert = False
- meth = None
- try:
- meth = getattr(self, name)
- except AttributeError:
- if name[:3] == 'not':
- invert = True
- try:
- meth = getattr(self, name[3:])
- except AttributeError:
- pass
- if meth is None:
- raise TypeError(
- "no %r checker available for %r" % (name, self.path))
- try:
- if py.code.getrawcode(meth).co_argcount > 1:
- if (not meth(value)) ^ invert:
- return False
- else:
- if bool(value) ^ bool(meth()) ^ invert:
- return False
- except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
- # EBUSY feels not entirely correct,
- # but its kind of necessary since ENOMEDIUM
- # is not accessible in python
- for name in self._depend_on_existence:
- if name in kw:
- if kw.get(name):
- return False
- name = 'not' + name
- if name in kw:
- if not kw.get(name):
- return False
- return True
-
-class NeverRaised(Exception):
- pass
-
-class PathBase(object):
- """ shared implementation for filesystem path objects."""
- Checkers = Checkers
-
- def __div__(self, other):
- return self.join(fspath(other))
- __truediv__ = __div__ # py3k
-
- def basename(self):
- """ basename part of path. """
- return self._getbyspec('basename')[0]
- basename = property(basename, None, None, basename.__doc__)
-
- def dirname(self):
- """ dirname part of path. """
- return self._getbyspec('dirname')[0]
- dirname = property(dirname, None, None, dirname.__doc__)
-
- def purebasename(self):
- """ pure base name of the path."""
- return self._getbyspec('purebasename')[0]
- purebasename = property(purebasename, None, None, purebasename.__doc__)
-
- def ext(self):
- """ extension of the path (including the '.')."""
- return self._getbyspec('ext')[0]
- ext = property(ext, None, None, ext.__doc__)
-
- def dirpath(self, *args, **kwargs):
- """ return the directory path joined with any given path arguments. """
- return self.new(basename='').join(*args, **kwargs)
-
- def read_binary(self):
- """ read and return a bytestring from reading the path. """
- with self.open('rb') as f:
- return f.read()
-
- def read_text(self, encoding):
- """ read and return a Unicode string from reading the path. """
- with self.open("r", encoding=encoding) as f:
- return f.read()
-
-
- def read(self, mode='r'):
- """ read and return a bytestring from reading the path. """
- with self.open(mode) as f:
- return f.read()
-
- def readlines(self, cr=1):
- """ read and return a list of lines from the path. if cr is False, the
-newline will be removed from the end of each line. """
- if not cr:
- content = self.read('rU')
- return content.split('\n')
- else:
- f = self.open('rU')
- try:
- return f.readlines()
- finally:
- f.close()
-
- def load(self):
- """ (deprecated) return object unpickled from self.read() """
- f = self.open('rb')
- try:
- return py.error.checked_call(py.std.pickle.load, f)
- finally:
- f.close()
-
- def move(self, target):
- """ move this path to target. """
- if target.relto(self):
- raise py.error.EINVAL(target,
- "cannot move path into a subdirectory of itself")
- try:
- self.rename(target)
- except py.error.EXDEV: # invalid cross-device link
- self.copy(target)
- self.remove()
-
- def __repr__(self):
- """ return a string representation of this path. """
- return repr(str(self))
-
- def check(self, **kw):
- """ check a path for existence and properties.
-
- Without arguments, return True if the path exists, otherwise False.
-
- valid checkers::
-
- file=1 # is a file
- file=0 # is not a file (may not even exist)
- dir=1 # is a dir
- link=1 # is a link
- exists=1 # exists
-
- You can specify multiple checker definitions, for example::
-
- path.check(file=1, link=1) # a link pointing to a file
- """
- if not kw:
- kw = {'exists' : 1}
- return self.Checkers(self)._evaluate(kw)
-
- def fnmatch(self, pattern):
- """return true if the basename/fullname matches the glob-'pattern'.
-
- valid pattern characters::
-
- * matches everything
- ? matches any single character
- [seq] matches any character in seq
- [!seq] matches any char not in seq
-
- If the pattern contains a path-separator then the full path
- is used for pattern matching and a '*' is prepended to the
- pattern.
-
- if the pattern doesn't contain a path-separator the pattern
- is only matched against the basename.
- """
- return FNMatcher(pattern)(self)
-
- def relto(self, relpath):
- """ return a string which is the relative part of the path
- to the given 'relpath'.
- """
- if not isinstance(relpath, (str, PathBase)):
- raise TypeError("%r: not a string or path object" %(relpath,))
- strrelpath = str(relpath)
- if strrelpath and strrelpath[-1] != self.sep:
- strrelpath += self.sep
- #assert strrelpath[-1] == self.sep
- #assert strrelpath[-2] != self.sep
- strself = self.strpath
- if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
- if os.path.normcase(strself).startswith(
- os.path.normcase(strrelpath)):
- return strself[len(strrelpath):]
- elif strself.startswith(strrelpath):
- return strself[len(strrelpath):]
- return ""
-
- def ensure_dir(self, *args):
- """ ensure the path joined with args is a directory. """
- return self.ensure(*args, **{"dir": True})
-
- def bestrelpath(self, dest):
- """ return a string which is a relative path from self
- (assumed to be a directory) to dest such that
- self.join(bestrelpath) == dest and if not such
- path can be determined return dest.
- """
- try:
- if self == dest:
- return os.curdir
- base = self.common(dest)
- if not base: # can be the case on windows
- return str(dest)
- self2base = self.relto(base)
- reldest = dest.relto(base)
- if self2base:
- n = self2base.count(self.sep) + 1
- else:
- n = 0
- l = [os.pardir] * n
- if reldest:
- l.append(reldest)
- target = dest.sep.join(l)
- return target
- except AttributeError:
- return str(dest)
-
- def exists(self):
- return self.check()
-
- def isdir(self):
- return self.check(dir=1)
-
- def isfile(self):
- return self.check(file=1)
-
- def parts(self, reverse=False):
- """ return a root-first list of all ancestor directories
- plus the path itself.
- """
- current = self
- l = [self]
- while 1:
- last = current
- current = current.dirpath()
- if last == current:
- break
- l.append(current)
- if not reverse:
- l.reverse()
- return l
-
- def common(self, other):
- """ return the common part shared with the other path
- or None if there is no common part.
- """
- last = None
- for x, y in zip(self.parts(), other.parts()):
- if x != y:
- return last
- last = x
- return last
-
- def __add__(self, other):
- """ return new path object with 'other' added to the basename"""
- return self.new(basename=self.basename+str(other))
-
- def __cmp__(self, other):
- """ return sort value (-1, 0, +1). """
- try:
- return cmp(self.strpath, other.strpath)
- except AttributeError:
- return cmp(str(self), str(other)) # self.path, other.path)
-
- def __lt__(self, other):
- try:
- return self.strpath < other.strpath
- except AttributeError:
- return str(self) < str(other)
-
- def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
- """ yields all paths below the current one
-
- fil is a filter (glob pattern or callable), if not matching the
- path will not be yielded, defaulting to None (everything is
- returned)
-
- rec is a filter (glob pattern or callable) that controls whether
- a node is descended, defaulting to None
-
- ignore is an Exception class that is ignoredwhen calling dirlist()
- on any of the paths (by default, all exceptions are reported)
-
- bf if True will cause a breadthfirst search instead of the
- default depthfirst. Default: False
-
- sort if True will sort entries within each directory level.
- """
- for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
- yield x
-
- def _sortlist(self, res, sort):
- if sort:
- if hasattr(sort, '__call__'):
- res.sort(sort)
- else:
- res.sort()
-
- def samefile(self, other):
- """ return True if other refers to the same stat object as self. """
- return self.strpath == str(other)
-
- def __fspath__(self):
- return str(self)
-
-class Visitor:
- def __init__(self, fil, rec, ignore, bf, sort):
- if isinstance(fil, py.builtin._basestring):
- fil = FNMatcher(fil)
- if isinstance(rec, py.builtin._basestring):
- self.rec = FNMatcher(rec)
- elif not hasattr(rec, '__call__') and rec:
- self.rec = lambda path: True
- else:
- self.rec = rec
- self.fil = fil
- self.ignore = ignore
- self.breadthfirst = bf
- self.optsort = sort and sorted or (lambda x: x)
-
- def gen(self, path):
- try:
- entries = path.listdir()
- except self.ignore:
- return
- rec = self.rec
- dirs = self.optsort([p for p in entries
- if p.check(dir=1) and (rec is None or rec(p))])
- if not self.breadthfirst:
- for subdir in dirs:
- for p in self.gen(subdir):
- yield p
- for p in self.optsort(entries):
- if self.fil is None or self.fil(p):
- yield p
- if self.breadthfirst:
- for subdir in dirs:
- for p in self.gen(subdir):
- yield p
-
-class FNMatcher:
- def __init__(self, pattern):
- self.pattern = pattern
-
- def __call__(self, path):
- pattern = self.pattern
-
- if (pattern.find(path.sep) == -1 and
- iswin32 and
- pattern.find(posixpath.sep) != -1):
- # Running on Windows, the pattern has no Windows path separators,
- # and the pattern has one or more Posix path separators. Replace
- # the Posix path separators with the Windows path separator.
- pattern = pattern.replace(posixpath.sep, path.sep)
-
- if pattern.find(path.sep) == -1:
- name = path.basename
- else:
- name = str(path) # path.strpath # XXX svn?
- if not os.path.isabs(pattern):
- pattern = '*' + path.sep + pattern
- return py.std.fnmatch.fnmatch(name, pattern)
+"""
+"""
+import os, sys, posixpath
+import fnmatch
+import py
+
+# Moved from local.py.
+iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
+
+try:
+ from os import fspath
+except ImportError:
+ def fspath(path):
+ """
+ Return the string representation of the path.
+ If str or bytes is passed in, it is returned unchanged.
+ This code comes from PEP 519, modified to support earlier versions of
+ python.
+
+ This is required for python < 3.6.
+ """
+ if isinstance(path, (py.builtin.text, py.builtin.bytes)):
+ return path
+
+ # Work from the object's type to match method resolution of other magic
+ # methods.
+ path_type = type(path)
+ try:
+ return path_type.__fspath__(path)
+ except AttributeError:
+ if hasattr(path_type, '__fspath__'):
+ raise
+ try:
+ import pathlib
+ except ImportError:
+ pass
+ else:
+ if isinstance(path, pathlib.PurePath):
+ return py.builtin.text(path)
+
+ raise TypeError("expected str, bytes or os.PathLike object, not "
+ + path_type.__name__)
+
+class Checkers:
+ _depend_on_existence = 'exists', 'link', 'dir', 'file'
+
+ def __init__(self, path):
+ self.path = path
+
+ def dir(self):
+ raise NotImplementedError
+
+ def file(self):
+ raise NotImplementedError
+
+ def dotfile(self):
+ return self.path.basename.startswith('.')
+
+ def ext(self, arg):
+ if not arg.startswith('.'):
+ arg = '.' + arg
+ return self.path.ext == arg
+
+ def exists(self):
+ raise NotImplementedError
+
+ def basename(self, arg):
+ return self.path.basename == arg
+
+ def basestarts(self, arg):
+ return self.path.basename.startswith(arg)
+
+ def relto(self, arg):
+ return self.path.relto(arg)
+
+ def fnmatch(self, arg):
+ return self.path.fnmatch(arg)
+
+ def endswith(self, arg):
+ return str(self.path).endswith(arg)
+
+ def _evaluate(self, kw):
+ for name, value in kw.items():
+ invert = False
+ meth = None
+ try:
+ meth = getattr(self, name)
+ except AttributeError:
+ if name[:3] == 'not':
+ invert = True
+ try:
+ meth = getattr(self, name[3:])
+ except AttributeError:
+ pass
+ if meth is None:
+ raise TypeError(
+ "no %r checker available for %r" % (name, self.path))
+ try:
+ if py.code.getrawcode(meth).co_argcount > 1:
+ if (not meth(value)) ^ invert:
+ return False
+ else:
+ if bool(value) ^ bool(meth()) ^ invert:
+ return False
+ except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
+ # EBUSY feels not entirely correct,
+ # but its kind of necessary since ENOMEDIUM
+ # is not accessible in python
+ for name in self._depend_on_existence:
+ if name in kw:
+ if kw.get(name):
+ return False
+ name = 'not' + name
+ if name in kw:
+ if not kw.get(name):
+ return False
+ return True
+
+class NeverRaised(Exception):
+ pass
+
+class PathBase(object):
+ """ shared implementation for filesystem path objects."""
+ Checkers = Checkers
+
+ def __div__(self, other):
+ return self.join(fspath(other))
+ __truediv__ = __div__ # py3k
+
+ def basename(self):
+ """ basename part of path. """
+ return self._getbyspec('basename')[0]
+ basename = property(basename, None, None, basename.__doc__)
+
+ def dirname(self):
+ """ dirname part of path. """
+ return self._getbyspec('dirname')[0]
+ dirname = property(dirname, None, None, dirname.__doc__)
+
+ def purebasename(self):
+ """ pure base name of the path."""
+ return self._getbyspec('purebasename')[0]
+ purebasename = property(purebasename, None, None, purebasename.__doc__)
+
+ def ext(self):
+ """ extension of the path (including the '.')."""
+ return self._getbyspec('ext')[0]
+ ext = property(ext, None, None, ext.__doc__)
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory path joined with any given path arguments. """
+ return self.new(basename='').join(*args, **kwargs)
+
+ def read_binary(self):
+ """ read and return a bytestring from reading the path. """
+ with self.open('rb') as f:
+ return f.read()
+
+ def read_text(self, encoding):
+ """ read and return a Unicode string from reading the path. """
+ with self.open("r", encoding=encoding) as f:
+ return f.read()
+
+
+ def read(self, mode='r'):
+ """ read and return a bytestring from reading the path. """
+ with self.open(mode) as f:
+ return f.read()
+
+ def readlines(self, cr=1):
+ """ read and return a list of lines from the path. if cr is False, the
+newline will be removed from the end of each line. """
+ if not cr:
+ content = self.read('rU')
+ return content.split('\n')
+ else:
+ f = self.open('rU')
+ try:
+ return f.readlines()
+ finally:
+ f.close()
+
+ def load(self):
+ """ (deprecated) return object unpickled from self.read() """
+ f = self.open('rb')
+ try:
+ return py.error.checked_call(py.std.pickle.load, f)
+ finally:
+ f.close()
+
+ def move(self, target):
+ """ move this path to target. """
+ if target.relto(self):
+ raise py.error.EINVAL(target,
+ "cannot move path into a subdirectory of itself")
+ try:
+ self.rename(target)
+ except py.error.EXDEV: # invalid cross-device link
+ self.copy(target)
+ self.remove()
+
+ def __repr__(self):
+ """ return a string representation of this path. """
+ return repr(str(self))
+
+ def check(self, **kw):
+ """ check a path for existence and properties.
+
+ Without arguments, return True if the path exists, otherwise False.
+
+ valid checkers::
+
+ file=1 # is a file
+ file=0 # is not a file (may not even exist)
+ dir=1 # is a dir
+ link=1 # is a link
+ exists=1 # exists
+
+ You can specify multiple checker definitions, for example::
+
+ path.check(file=1, link=1) # a link pointing to a file
+ """
+ if not kw:
+ kw = {'exists' : 1}
+ return self.Checkers(self)._evaluate(kw)
+
+ def fnmatch(self, pattern):
+ """return true if the basename/fullname matches the glob-'pattern'.
+
+ valid pattern characters::
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ If the pattern contains a path-separator then the full path
+ is used for pattern matching and a '*' is prepended to the
+ pattern.
+
+ if the pattern doesn't contain a path-separator the pattern
+ is only matched against the basename.
+ """
+ return FNMatcher(pattern)(self)
+
+ def relto(self, relpath):
+ """ return a string which is the relative part of the path
+ to the given 'relpath'.
+ """
+ if not isinstance(relpath, (str, PathBase)):
+ raise TypeError("%r: not a string or path object" %(relpath,))
+ strrelpath = str(relpath)
+ if strrelpath and strrelpath[-1] != self.sep:
+ strrelpath += self.sep
+ #assert strrelpath[-1] == self.sep
+ #assert strrelpath[-2] != self.sep
+ strself = self.strpath
+ if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
+ if os.path.normcase(strself).startswith(
+ os.path.normcase(strrelpath)):
+ return strself[len(strrelpath):]
+ elif strself.startswith(strrelpath):
+ return strself[len(strrelpath):]
+ return ""
+
+ def ensure_dir(self, *args):
+ """ ensure the path joined with args is a directory. """
+ return self.ensure(*args, **{"dir": True})
+
+ def bestrelpath(self, dest):
+ """ return a string which is a relative path from self
+ (assumed to be a directory) to dest such that
+ self.join(bestrelpath) == dest and if not such
+ path can be determined return dest.
+ """
+ try:
+ if self == dest:
+ return os.curdir
+ base = self.common(dest)
+ if not base: # can be the case on windows
+ return str(dest)
+ self2base = self.relto(base)
+ reldest = dest.relto(base)
+ if self2base:
+ n = self2base.count(self.sep) + 1
+ else:
+ n = 0
+ l = [os.pardir] * n
+ if reldest:
+ l.append(reldest)
+ target = dest.sep.join(l)
+ return target
+ except AttributeError:
+ return str(dest)
+
+ def exists(self):
+ return self.check()
+
+ def isdir(self):
+ return self.check(dir=1)
+
+ def isfile(self):
+ return self.check(file=1)
+
+ def parts(self, reverse=False):
+ """ return a root-first list of all ancestor directories
+ plus the path itself.
+ """
+ current = self
+ l = [self]
+ while 1:
+ last = current
+ current = current.dirpath()
+ if last == current:
+ break
+ l.append(current)
+ if not reverse:
+ l.reverse()
+ return l
+
+ def common(self, other):
+ """ return the common part shared with the other path
+ or None if there is no common part.
+ """
+ last = None
+ for x, y in zip(self.parts(), other.parts()):
+ if x != y:
+ return last
+ last = x
+ return last
+
+ def __add__(self, other):
+ """ return new path object with 'other' added to the basename"""
+ return self.new(basename=self.basename+str(other))
+
+ def __cmp__(self, other):
+ """ return sort value (-1, 0, +1). """
+ try:
+ return cmp(self.strpath, other.strpath)
+ except AttributeError:
+ return cmp(str(self), str(other)) # self.path, other.path)
+
+ def __lt__(self, other):
+ try:
+ return self.strpath < other.strpath
+ except AttributeError:
+ return str(self) < str(other)
+
+ def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
+ """ yields all paths below the current one
+
+ fil is a filter (glob pattern or callable), if not matching the
+ path will not be yielded, defaulting to None (everything is
+ returned)
+
+ rec is a filter (glob pattern or callable) that controls whether
+ a node is descended, defaulting to None
+
+ ignore is an Exception class that is ignoredwhen calling dirlist()
+ on any of the paths (by default, all exceptions are reported)
+
+ bf if True will cause a breadthfirst search instead of the
+ default depthfirst. Default: False
+
+ sort if True will sort entries within each directory level.
+ """
+ for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
+ yield x
+
+ def _sortlist(self, res, sort):
+ if sort:
+ if hasattr(sort, '__call__'):
+ res.sort(sort)
+ else:
+ res.sort()
+
+ def samefile(self, other):
+ """ return True if other refers to the same stat object as self. """
+ return self.strpath == str(other)
+
+ def __fspath__(self):
+ return self.strpath
+
+class Visitor:
+ def __init__(self, fil, rec, ignore, bf, sort):
+ if isinstance(fil, py.builtin._basestring):
+ fil = FNMatcher(fil)
+ if isinstance(rec, py.builtin._basestring):
+ self.rec = FNMatcher(rec)
+ elif not hasattr(rec, '__call__') and rec:
+ self.rec = lambda path: True
+ else:
+ self.rec = rec
+ self.fil = fil
+ self.ignore = ignore
+ self.breadthfirst = bf
+ self.optsort = sort and sorted or (lambda x: x)
+
+ def gen(self, path):
+ try:
+ entries = path.listdir()
+ except self.ignore:
+ return
+ rec = self.rec
+ dirs = self.optsort([p for p in entries
+ if p.check(dir=1) and (rec is None or rec(p))])
+ if not self.breadthfirst:
+ for subdir in dirs:
+ for p in self.gen(subdir):
+ yield p
+ for p in self.optsort(entries):
+ if self.fil is None or self.fil(p):
+ yield p
+ if self.breadthfirst:
+ for subdir in dirs:
+ for p in self.gen(subdir):
+ yield p
+
+class FNMatcher:
+ def __init__(self, pattern):
+ self.pattern = pattern
+
+ def __call__(self, path):
+ pattern = self.pattern
+
+ if (pattern.find(path.sep) == -1 and
+ iswin32 and
+ pattern.find(posixpath.sep) != -1):
+ # Running on Windows, the pattern has no Windows path separators,
+ # and the pattern has one or more Posix path separators. Replace
+ # the Posix path separators with the Windows path separator.
+ pattern = pattern.replace(posixpath.sep, path.sep)
+
+ if pattern.find(path.sep) == -1:
+ name = path.basename
+ else:
+ name = str(path) # path.strpath # XXX svn?
+ if not os.path.isabs(pattern):
+ pattern = '*' + path.sep + pattern
+ return fnmatch.fnmatch(name, pattern)
diff --git a/py/_path/local.py b/py/_path/local.py
index 0d4e4c9..b9d5398 100644
--- a/py/_path/local.py
+++ b/py/_path/local.py
@@ -1,928 +1,928 @@
-"""
-local path implementation.
-"""
-from __future__ import with_statement
-
-from contextlib import contextmanager
-import sys, os, re, atexit, io
-import py
-from py._path import common
-from py._path.common import iswin32, fspath
-from stat import S_ISLNK, S_ISDIR, S_ISREG
-
-from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
-
-if sys.version_info > (3,0):
- def map_as_list(func, iter):
- return list(map(func, iter))
-else:
- map_as_list = map
-
-class Stat(object):
- def __getattr__(self, name):
- return getattr(self._osstatresult, "st_" + name)
-
- def __init__(self, path, osstatresult):
- self.path = path
- self._osstatresult = osstatresult
-
- @property
- def owner(self):
- if iswin32:
- raise NotImplementedError("XXX win32")
- import pwd
- entry = py.error.checked_call(pwd.getpwuid, self.uid)
- return entry[0]
-
- @property
- def group(self):
- """ return group name of file. """
- if iswin32:
- raise NotImplementedError("XXX win32")
- import grp
- entry = py.error.checked_call(grp.getgrgid, self.gid)
- return entry[0]
-
- def isdir(self):
- return S_ISDIR(self._osstatresult.st_mode)
-
- def isfile(self):
- return S_ISREG(self._osstatresult.st_mode)
-
- def islink(self):
- st = self.path.lstat()
- return S_ISLNK(self._osstatresult.st_mode)
-
-class PosixPath(common.PathBase):
- def chown(self, user, group, rec=0):
- """ change ownership to the given user and group.
- user and group may be specified by a number or
- by a name. if rec is True change ownership
- recursively.
- """
- uid = getuserid(user)
- gid = getgroupid(group)
- if rec:
- for x in self.visit(rec=lambda x: x.check(link=0)):
- if x.check(link=0):
- py.error.checked_call(os.chown, str(x), uid, gid)
- py.error.checked_call(os.chown, str(self), uid, gid)
-
- def readlink(self):
- """ return value of a symbolic link. """
- return py.error.checked_call(os.readlink, self.strpath)
-
- def mklinkto(self, oldname):
- """ posix style hard link to another name. """
- py.error.checked_call(os.link, str(oldname), str(self))
-
- def mksymlinkto(self, value, absolute=1):
- """ create a symbolic link with the given value (pointing to another name). """
- if absolute:
- py.error.checked_call(os.symlink, str(value), self.strpath)
- else:
- base = self.common(value)
- # with posix local paths '/' is always a common base
- relsource = self.__class__(value).relto(base)
- reldest = self.relto(base)
- n = reldest.count(self.sep)
- target = self.sep.join(('..', )*n + (relsource, ))
- py.error.checked_call(os.symlink, target, self.strpath)
-
-def getuserid(user):
- import pwd
- if not isinstance(user, int):
- user = pwd.getpwnam(user)[2]
- return user
-
-def getgroupid(group):
- import grp
- if not isinstance(group, int):
- group = grp.getgrnam(group)[2]
- return group
-
-FSBase = not iswin32 and PosixPath or common.PathBase
-
-class LocalPath(FSBase):
- """ object oriented interface to os.path and other local filesystem
- related information.
- """
- class ImportMismatchError(ImportError):
- """ raised on pyimport() if there is a mismatch of __file__'s"""
-
- sep = os.sep
- class Checkers(common.Checkers):
- def _stat(self):
- try:
- return self._statcache
- except AttributeError:
- try:
- self._statcache = self.path.stat()
- except py.error.ELOOP:
- self._statcache = self.path.lstat()
- return self._statcache
-
- def dir(self):
- return S_ISDIR(self._stat().mode)
-
- def file(self):
- return S_ISREG(self._stat().mode)
-
- def exists(self):
- return self._stat()
-
- def link(self):
- st = self.path.lstat()
- return S_ISLNK(st.mode)
-
- def __init__(self, path=None, expanduser=False):
- """ Initialize and return a local Path instance.
-
- Path can be relative to the current directory.
- If path is None it defaults to the current working directory.
- If expanduser is True, tilde-expansion is performed.
- Note that Path instances always carry an absolute path.
- Note also that passing in a local path object will simply return
- the exact same path object. Use new() to get a new copy.
- """
- if path is None:
- self.strpath = py.error.checked_call(os.getcwd)
- else:
- try:
- path = fspath(path)
- except TypeError:
- raise ValueError("can only pass None, Path instances "
- "or non-empty strings to LocalPath")
- if expanduser:
- path = os.path.expanduser(path)
- self.strpath = abspath(path)
-
- def __hash__(self):
- return hash(self.strpath)
-
- def __eq__(self, other):
- s1 = fspath(self)
- try:
- s2 = fspath(other)
- except TypeError:
- return False
- if iswin32:
- s1 = s1.lower()
- try:
- s2 = s2.lower()
- except AttributeError:
- return False
- return s1 == s2
-
- def __ne__(self, other):
- return not (self == other)
-
- def __lt__(self, other):
- return fspath(self) < fspath(other)
-
- def __gt__(self, other):
- return fspath(self) > fspath(other)
-
- def samefile(self, other):
- """ return True if 'other' references the same file as 'self'.
- """
- other = fspath(other)
- if not isabs(other):
- other = abspath(other)
- if self == other:
- return True
- if iswin32:
- return False # there is no samefile
- return py.error.checked_call(
- os.path.samefile, self.strpath, other)
-
- def remove(self, rec=1, ignore_errors=False):
- """ remove a file or directory (or a directory tree if rec=1).
- if ignore_errors is True, errors while removing directories will
- be ignored.
- """
- if self.check(dir=1, link=0):
- if rec:
- # force remove of readonly files on windows
- if iswin32:
- self.chmod(448, rec=1) # octcal 0700
- py.error.checked_call(py.std.shutil.rmtree, self.strpath,
- ignore_errors=ignore_errors)
- else:
- py.error.checked_call(os.rmdir, self.strpath)
- else:
- if iswin32:
- self.chmod(448) # octcal 0700
- py.error.checked_call(os.remove, self.strpath)
-
- def computehash(self, hashtype="md5", chunksize=524288):
- """ return hexdigest of hashvalue for this file. """
- try:
- try:
- import hashlib as mod
- except ImportError:
- if hashtype == "sha1":
- hashtype = "sha"
- mod = __import__(hashtype)
- hash = getattr(mod, hashtype)()
- except (AttributeError, ImportError):
- raise ValueError("Don't know how to compute %r hash" %(hashtype,))
- f = self.open('rb')
- try:
- while 1:
- buf = f.read(chunksize)
- if not buf:
- return hash.hexdigest()
- hash.update(buf)
- finally:
- f.close()
-
- def new(self, **kw):
- """ create a modified version of this path.
- the following keyword arguments modify various path parts::
-
- a:/some/path/to/a/file.ext
- xx drive
- xxxxxxxxxxxxxxxxx dirname
- xxxxxxxx basename
- xxxx purebasename
- xxx ext
- """
- obj = object.__new__(self.__class__)
- if not kw:
- obj.strpath = self.strpath
- return obj
- drive, dirname, basename, purebasename,ext = self._getbyspec(
- "drive,dirname,basename,purebasename,ext")
- if 'basename' in kw:
- if 'purebasename' in kw or 'ext' in kw:
- raise ValueError("invalid specification %r" % kw)
- else:
- pb = kw.setdefault('purebasename', purebasename)
- try:
- ext = kw['ext']
- except KeyError:
- pass
- else:
- if ext and not ext.startswith('.'):
- ext = '.' + ext
- kw['basename'] = pb + ext
-
- if ('dirname' in kw and not kw['dirname']):
- kw['dirname'] = drive
- else:
- kw.setdefault('dirname', dirname)
- kw.setdefault('sep', self.sep)
- obj.strpath = normpath(
- "%(dirname)s%(sep)s%(basename)s" % kw)
- return obj
-
- def _getbyspec(self, spec):
- """ see new for what 'spec' can be. """
- res = []
- parts = self.strpath.split(self.sep)
-
- args = filter(None, spec.split(',') )
- append = res.append
- for name in args:
- if name == 'drive':
- append(parts[0])
- elif name == 'dirname':
- append(self.sep.join(parts[:-1]))
- else:
- basename = parts[-1]
- if name == 'basename':
- append(basename)
- else:
- i = basename.rfind('.')
- if i == -1:
- purebasename, ext = basename, ''
- else:
- purebasename, ext = basename[:i], basename[i:]
- if name == 'purebasename':
- append(purebasename)
- elif name == 'ext':
- append(ext)
- else:
- raise ValueError("invalid part specification %r" % name)
- return res
-
- def dirpath(self, *args, **kwargs):
- """ return the directory path joined with any given path arguments. """
- if not kwargs:
- path = object.__new__(self.__class__)
- path.strpath = dirname(self.strpath)
- if args:
- path = path.join(*args)
- return path
- return super(LocalPath, self).dirpath(*args, **kwargs)
-
- def join(self, *args, **kwargs):
- """ return a new path by appending all 'args' as path
- components. if abs=1 is used restart from root if any
- of the args is an absolute path.
- """
- sep = self.sep
- strargs = [fspath(arg) for arg in args]
- strpath = self.strpath
- if kwargs.get('abs'):
- newargs = []
- for arg in reversed(strargs):
- if isabs(arg):
- strpath = arg
- strargs = newargs
- break
- newargs.insert(0, arg)
- for arg in strargs:
- arg = arg.strip(sep)
- if iswin32:
- # allow unix style paths even on windows.
- arg = arg.strip('/')
- arg = arg.replace('/', sep)
- strpath = strpath + sep + arg
- obj = object.__new__(self.__class__)
- obj.strpath = normpath(strpath)
- return obj
-
- def open(self, mode='r', ensure=False, encoding=None):
- """ return an opened file with the given mode.
-
- If ensure is True, create parent directories if needed.
- """
- if ensure:
- self.dirpath().ensure(dir=1)
- if encoding:
- return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
- return py.error.checked_call(open, self.strpath, mode)
-
- def _fastjoin(self, name):
- child = object.__new__(self.__class__)
- child.strpath = self.strpath + self.sep + name
- return child
-
- def islink(self):
- return islink(self.strpath)
-
- def check(self, **kw):
- if not kw:
- return exists(self.strpath)
- if len(kw) == 1:
- if "dir" in kw:
- return not kw["dir"] ^ isdir(self.strpath)
- if "file" in kw:
- return not kw["file"] ^ isfile(self.strpath)
- return super(LocalPath, self).check(**kw)
-
- _patternchars = set("*?[" + os.path.sep)
- def listdir(self, fil=None, sort=None):
- """ list directory contents, possibly filter by the given fil func
- and possibly sorted.
- """
- if fil is None and sort is None:
- names = py.error.checked_call(os.listdir, self.strpath)
- return map_as_list(self._fastjoin, names)
- if isinstance(fil, py.builtin._basestring):
- if not self._patternchars.intersection(fil):
- child = self._fastjoin(fil)
- if exists(child.strpath):
- return [child]
- return []
- fil = common.FNMatcher(fil)
- names = py.error.checked_call(os.listdir, self.strpath)
- res = []
- for name in names:
- child = self._fastjoin(name)
- if fil is None or fil(child):
- res.append(child)
- self._sortlist(res, sort)
- return res
-
- def size(self):
- """ return size of the underlying file object """
- return self.stat().size
-
- def mtime(self):
- """ return last modification time of the path. """
- return self.stat().mtime
-
- def copy(self, target, mode=False, stat=False):
- """ copy path to target.
-
- If mode is True, will copy copy permission from path to target.
- If stat is True, copy permission, last modification
- time, last access time, and flags from path to target.
- """
- if self.check(file=1):
- if target.check(dir=1):
- target = target.join(self.basename)
- assert self!=target
- copychunked(self, target)
- if mode:
- copymode(self.strpath, target.strpath)
- if stat:
- copystat(self, target)
- else:
- def rec(p):
- return p.check(link=0)
- for x in self.visit(rec=rec):
- relpath = x.relto(self)
- newx = target.join(relpath)
- newx.dirpath().ensure(dir=1)
- if x.check(link=1):
- newx.mksymlinkto(x.readlink())
- continue
- elif x.check(file=1):
- copychunked(x, newx)
- elif x.check(dir=1):
- newx.ensure(dir=1)
- if mode:
- copymode(x.strpath, newx.strpath)
- if stat:
- copystat(x, newx)
-
- def rename(self, target):
- """ rename this path to target. """
- target = fspath(target)
- return py.error.checked_call(os.rename, self.strpath, target)
-
- def dump(self, obj, bin=1):
- """ pickle object into path location"""
- f = self.open('wb')
- try:
- py.error.checked_call(py.std.pickle.dump, obj, f, bin)
- finally:
- f.close()
-
- def mkdir(self, *args):
- """ create & return the directory joined with args. """
- p = self.join(*args)
- py.error.checked_call(os.mkdir, fspath(p))
- return p
-
- def write_binary(self, data, ensure=False):
- """ write binary data into path. If ensure is True create
- missing parent directories.
- """
- if ensure:
- self.dirpath().ensure(dir=1)
- with self.open('wb') as f:
- f.write(data)
-
- def write_text(self, data, encoding, ensure=False):
- """ write text data into path using the specified encoding.
- If ensure is True create missing parent directories.
- """
- if ensure:
- self.dirpath().ensure(dir=1)
- with self.open('w', encoding=encoding) as f:
- f.write(data)
-
- def write(self, data, mode='w', ensure=False):
- """ write data into path. If ensure is True create
- missing parent directories.
- """
- if ensure:
- self.dirpath().ensure(dir=1)
- if 'b' in mode:
- if not py.builtin._isbytes(data):
- raise ValueError("can only process bytes")
- else:
- if not py.builtin._istext(data):
- if not py.builtin._isbytes(data):
- data = str(data)
- else:
- data = py.builtin._totext(data, sys.getdefaultencoding())
- f = self.open(mode)
- try:
- f.write(data)
- finally:
- f.close()
-
- def _ensuredirs(self):
- parent = self.dirpath()
- if parent == self:
- return self
- if parent.check(dir=0):
- parent._ensuredirs()
- if self.check(dir=0):
- try:
- self.mkdir()
- except py.error.EEXIST:
- # race condition: file/dir created by another thread/process.
- # complain if it is not a dir
- if self.check(dir=0):
- raise
- return self
-
- def ensure(self, *args, **kwargs):
- """ ensure that an args-joined path exists (by default as
- a file). if you specify a keyword argument 'dir=True'
- then the path is forced to be a directory path.
- """
- p = self.join(*args)
- if kwargs.get('dir', 0):
- return p._ensuredirs()
- else:
- p.dirpath()._ensuredirs()
- if not p.check(file=1):
- p.open('w').close()
- return p
-
- def stat(self, raising=True):
- """ Return an os.stat() tuple. """
- if raising == True:
- return Stat(self, py.error.checked_call(os.stat, self.strpath))
- try:
- return Stat(self, os.stat(self.strpath))
- except KeyboardInterrupt:
- raise
- except Exception:
- return None
-
- def lstat(self):
- """ Return an os.lstat() tuple. """
- return Stat(self, py.error.checked_call(os.lstat, self.strpath))
-
- def setmtime(self, mtime=None):
- """ set modification time for the given path. if 'mtime' is None
- (the default) then the file's mtime is set to current time.
-
- Note that the resolution for 'mtime' is platform dependent.
- """
- if mtime is None:
- return py.error.checked_call(os.utime, self.strpath, mtime)
- try:
- return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
- except py.error.EINVAL:
- return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
-
- def chdir(self):
- """ change directory to self and return old current directory """
- try:
- old = self.__class__()
- except py.error.ENOENT:
- old = None
- py.error.checked_call(os.chdir, self.strpath)
- return old
-
-
- @contextmanager
- def as_cwd(self):
- """ return context manager which changes to current dir during the
- managed "with" context. On __enter__ it returns the old dir.
- """
- old = self.chdir()
- try:
- yield old
- finally:
- old.chdir()
-
- def realpath(self):
- """ return a new path which contains no symbolic links."""
- return self.__class__(os.path.realpath(self.strpath))
-
- def atime(self):
- """ return last access time of the path. """
- return self.stat().atime
-
- def __repr__(self):
- return 'local(%r)' % self.strpath
-
- def __str__(self):
- """ return string representation of the Path. """
- return self.strpath
-
- def chmod(self, mode, rec=0):
- """ change permissions to the given mode. If mode is an
- integer it directly encodes the os-specific modes.
- if rec is True perform recursively.
- """
- if not isinstance(mode, int):
- raise TypeError("mode %r must be an integer" % (mode,))
- if rec:
- for x in self.visit(rec=rec):
- py.error.checked_call(os.chmod, str(x), mode)
- py.error.checked_call(os.chmod, self.strpath, mode)
-
- def pypkgpath(self):
- """ return the Python package path by looking for the last
- directory upwards which still contains an __init__.py.
- Return None if a pkgpath can not be determined.
- """
- pkgpath = None
- for parent in self.parts(reverse=True):
- if parent.isdir():
- if not parent.join('__init__.py').exists():
- break
- if not isimportable(parent.basename):
- break
- pkgpath = parent
- return pkgpath
-
- def _ensuresyspath(self, ensuremode, path):
- if ensuremode:
- s = str(path)
- if ensuremode == "append":
- if s not in sys.path:
- sys.path.append(s)
- else:
- if s != sys.path[0]:
- sys.path.insert(0, s)
-
- def pyimport(self, modname=None, ensuresyspath=True):
- """ return path as an imported python module.
-
- If modname is None, look for the containing package
- and construct an according module name.
- The module will be put/looked up in sys.modules.
- if ensuresyspath is True then the root dir for importing
- the file (taking __init__.py files into account) will
- be prepended to sys.path if it isn't there already.
- If ensuresyspath=="append" the root dir will be appended
- if it isn't already contained in sys.path.
- if ensuresyspath is False no modification of syspath happens.
- """
- if not self.check():
- raise py.error.ENOENT(self)
-
- pkgpath = None
- if modname is None:
- pkgpath = self.pypkgpath()
- if pkgpath is not None:
- pkgroot = pkgpath.dirpath()
- names = self.new(ext="").relto(pkgroot).split(self.sep)
- if names[-1] == "__init__":
- names.pop()
- modname = ".".join(names)
- else:
- pkgroot = self.dirpath()
- modname = self.purebasename
-
- self._ensuresyspath(ensuresyspath, pkgroot)
- __import__(modname)
- mod = sys.modules[modname]
- if self.basename == "__init__.py":
- return mod # we don't check anything as we might
- # we in a namespace package ... too icky to check
- modfile = mod.__file__
- if modfile[-4:] in ('.pyc', '.pyo'):
- modfile = modfile[:-1]
- elif modfile.endswith('$py.class'):
- modfile = modfile[:-9] + '.py'
- if modfile.endswith(os.path.sep + "__init__.py"):
- if self.basename != "__init__.py":
- modfile = modfile[:-12]
- try:
- issame = self.samefile(modfile)
- except py.error.ENOENT:
- issame = False
- if not issame:
- raise self.ImportMismatchError(modname, modfile, self)
- return mod
- else:
- try:
- return sys.modules[modname]
- except KeyError:
- # we have a custom modname, do a pseudo-import
- mod = py.std.types.ModuleType(modname)
- mod.__file__ = str(self)
- sys.modules[modname] = mod
- try:
- py.builtin.execfile(str(self), mod.__dict__)
- except:
- del sys.modules[modname]
- raise
- return mod
-
- def sysexec(self, *argv, **popen_opts):
- """ return stdout text from executing a system child process,
- where the 'self' path points to executable.
- The process is directly invoked and not through a system shell.
- """
- from subprocess import Popen, PIPE
- argv = map_as_list(str, argv)
- popen_opts['stdout'] = popen_opts['stderr'] = PIPE
- proc = Popen([str(self)] + argv, **popen_opts)
- stdout, stderr = proc.communicate()
- ret = proc.wait()
- if py.builtin._isbytes(stdout):
- stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
- if ret != 0:
- if py.builtin._isbytes(stderr):
- stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
- raise py.process.cmdexec.Error(ret, ret, str(self),
- stdout, stderr,)
- return stdout
-
- def sysfind(cls, name, checker=None, paths=None):
- """ return a path object found by looking at the systems
- underlying PATH specification. If the checker is not None
- it will be invoked to filter matching paths. If a binary
- cannot be found, None is returned
- Note: This is probably not working on plain win32 systems
- but may work on cygwin.
- """
- if isabs(name):
- p = py.path.local(name)
- if p.check(file=1):
- return p
- else:
- if paths is None:
- if iswin32:
- paths = py.std.os.environ['Path'].split(';')
- if '' not in paths and '.' not in paths:
- paths.append('.')
- try:
- systemroot = os.environ['SYSTEMROOT']
- except KeyError:
- pass
- else:
- paths = [re.sub('%SystemRoot%', systemroot, path)
- for path in paths]
- else:
- paths = py.std.os.environ['PATH'].split(':')
- tryadd = []
- if iswin32:
- tryadd += os.environ['PATHEXT'].split(os.pathsep)
- tryadd.append("")
-
- for x in paths:
- for addext in tryadd:
- p = py.path.local(x).join(name, abs=True) + addext
- try:
- if p.check(file=1):
- if checker:
- if not checker(p):
- continue
- return p
- except py.error.EACCES:
- pass
- return None
- sysfind = classmethod(sysfind)
-
- def _gethomedir(cls):
- try:
- x = os.environ['HOME']
- except KeyError:
- try:
- x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
- except KeyError:
- return None
- return cls(x)
- _gethomedir = classmethod(_gethomedir)
-
- #"""
- #special class constructors for local filesystem paths
- #"""
- def get_temproot(cls):
- """ return the system's temporary directory
- (where tempfiles are usually created in)
- """
- return py.path.local(py.std.tempfile.gettempdir())
- get_temproot = classmethod(get_temproot)
-
- def mkdtemp(cls, rootdir=None):
- """ return a Path object pointing to a fresh new temporary directory
- (which we created ourself).
- """
- import tempfile
- if rootdir is None:
- rootdir = cls.get_temproot()
- return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
- mkdtemp = classmethod(mkdtemp)
-
- def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
- lock_timeout = 172800): # two days
- """ return unique directory with a number greater than the current
- maximum one. The number is assumed to start directly after prefix.
- if keep is true directories with a number less than (maxnum-keep)
- will be removed.
- """
- if rootdir is None:
- rootdir = cls.get_temproot()
-
- def parse_num(path):
- """ parse the number out of a path (if it matches the prefix) """
- bn = path.basename
- if bn.startswith(prefix):
- try:
- return int(bn[len(prefix):])
- except ValueError:
- pass
-
- # compute the maximum number currently in use with the
- # prefix
- lastmax = None
- while True:
- maxnum = -1
- for path in rootdir.listdir():
- num = parse_num(path)
- if num is not None:
- maxnum = max(maxnum, num)
-
- # make the new directory
- try:
- udir = rootdir.mkdir(prefix + str(maxnum+1))
- except py.error.EEXIST:
- # race condition: another thread/process created the dir
- # in the meantime. Try counting again
- if lastmax == maxnum:
- raise
- lastmax = maxnum
- continue
- break
-
- # put a .lock file in the new directory that will be removed at
- # process exit
- if lock_timeout:
- lockfile = udir.join('.lock')
- mypid = os.getpid()
- if hasattr(lockfile, 'mksymlinkto'):
- lockfile.mksymlinkto(str(mypid))
- else:
- lockfile.write(str(mypid))
- def try_remove_lockfile():
- # in a fork() situation, only the last process should
- # remove the .lock, otherwise the other processes run the
- # risk of seeing their temporary dir disappear. For now
- # we remove the .lock in the parent only (i.e. we assume
- # that the children finish before the parent).
- if os.getpid() != mypid:
- return
- try:
- lockfile.remove()
- except py.error.Error:
- pass
- atexit.register(try_remove_lockfile)
-
- # prune old directories
- if keep:
- for path in rootdir.listdir():
- num = parse_num(path)
- if num is not None and num <= (maxnum - keep):
- lf = path.join('.lock')
- try:
- t1 = lf.lstat().mtime
- t2 = lockfile.lstat().mtime
- if not lock_timeout or abs(t2-t1) < lock_timeout:
- continue # skip directories still locked
- except py.error.Error:
- pass # assume that it means that there is no 'lf'
- try:
- path.remove(rec=1)
- except KeyboardInterrupt:
- raise
- except: # this might be py.error.Error, WindowsError ...
- pass
-
- # make link...
- try:
- username = os.environ['USER'] #linux, et al
- except KeyError:
- try:
- username = os.environ['USERNAME'] #windows
- except KeyError:
- username = 'current'
-
- src = str(udir)
- dest = src[:src.rfind('-')] + '-' + username
- try:
- os.unlink(dest)
- except OSError:
- pass
- try:
- os.symlink(src, dest)
- except (OSError, AttributeError, NotImplementedError):
- pass
-
- return udir
- make_numbered_dir = classmethod(make_numbered_dir)
-
-def copymode(src, dest):
- """ copy permission from src to dst. """
- py.std.shutil.copymode(src, dest)
-
-def copystat(src, dest):
- """ copy permission, last modification time, last access time, and flags from src to dst."""
- py.std.shutil.copystat(str(src), str(dest))
-
-def copychunked(src, dest):
- chunksize = 524288 # half a meg of bytes
- fsrc = src.open('rb')
- try:
- fdest = dest.open('wb')
- try:
- while 1:
- buf = fsrc.read(chunksize)
- if not buf:
- break
- fdest.write(buf)
- finally:
- fdest.close()
- finally:
- fsrc.close()
-
-def isimportable(name):
- if name and (name[0].isalpha() or name[0] == '_'):
- name = name.replace("_", '')
- return not name or name.isalnum()
+"""
+local path implementation.
+"""
+from __future__ import with_statement
+
+from contextlib import contextmanager
+import sys, os, re, atexit, io
+import py
+from py._path import common
+from py._path.common import iswin32, fspath
+from stat import S_ISLNK, S_ISDIR, S_ISREG
+
+from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
+
+if sys.version_info > (3,0):
+ def map_as_list(func, iter):
+ return list(map(func, iter))
+else:
+ map_as_list = map
+
+class Stat(object):
+ def __getattr__(self, name):
+ return getattr(self._osstatresult, "st_" + name)
+
+ def __init__(self, path, osstatresult):
+ self.path = path
+ self._osstatresult = osstatresult
+
+ @property
+ def owner(self):
+ if iswin32:
+ raise NotImplementedError("XXX win32")
+ import pwd
+ entry = py.error.checked_call(pwd.getpwuid, self.uid)
+ return entry[0]
+
+ @property
+ def group(self):
+ """ return group name of file. """
+ if iswin32:
+ raise NotImplementedError("XXX win32")
+ import grp
+ entry = py.error.checked_call(grp.getgrgid, self.gid)
+ return entry[0]
+
+ def isdir(self):
+ return S_ISDIR(self._osstatresult.st_mode)
+
+ def isfile(self):
+ return S_ISREG(self._osstatresult.st_mode)
+
+ def islink(self):
+ st = self.path.lstat()
+ return S_ISLNK(self._osstatresult.st_mode)
+
+class PosixPath(common.PathBase):
+ def chown(self, user, group, rec=0):
+ """ change ownership to the given user and group.
+ user and group may be specified by a number or
+ by a name. if rec is True change ownership
+ recursively.
+ """
+ uid = getuserid(user)
+ gid = getgroupid(group)
+ if rec:
+ for x in self.visit(rec=lambda x: x.check(link=0)):
+ if x.check(link=0):
+ py.error.checked_call(os.chown, str(x), uid, gid)
+ py.error.checked_call(os.chown, str(self), uid, gid)
+
+ def readlink(self):
+ """ return value of a symbolic link. """
+ return py.error.checked_call(os.readlink, self.strpath)
+
+ def mklinkto(self, oldname):
+ """ posix style hard link to another name. """
+ py.error.checked_call(os.link, str(oldname), str(self))
+
+ def mksymlinkto(self, value, absolute=1):
+ """ create a symbolic link with the given value (pointing to another name). """
+ if absolute:
+ py.error.checked_call(os.symlink, str(value), self.strpath)
+ else:
+ base = self.common(value)
+ # with posix local paths '/' is always a common base
+ relsource = self.__class__(value).relto(base)
+ reldest = self.relto(base)
+ n = reldest.count(self.sep)
+ target = self.sep.join(('..', )*n + (relsource, ))
+ py.error.checked_call(os.symlink, target, self.strpath)
+
+def getuserid(user):
+ import pwd
+ if not isinstance(user, int):
+ user = pwd.getpwnam(user)[2]
+ return user
+
+def getgroupid(group):
+ import grp
+ if not isinstance(group, int):
+ group = grp.getgrnam(group)[2]
+ return group
+
+FSBase = not iswin32 and PosixPath or common.PathBase
+
+class LocalPath(FSBase):
+ """ object oriented interface to os.path and other local filesystem
+ related information.
+ """
+ class ImportMismatchError(ImportError):
+ """ raised on pyimport() if there is a mismatch of __file__'s"""
+
+ sep = os.sep
+ class Checkers(common.Checkers):
+ def _stat(self):
+ try:
+ return self._statcache
+ except AttributeError:
+ try:
+ self._statcache = self.path.stat()
+ except py.error.ELOOP:
+ self._statcache = self.path.lstat()
+ return self._statcache
+
+ def dir(self):
+ return S_ISDIR(self._stat().mode)
+
+ def file(self):
+ return S_ISREG(self._stat().mode)
+
+ def exists(self):
+ return self._stat()
+
+ def link(self):
+ st = self.path.lstat()
+ return S_ISLNK(st.mode)
+
+ def __init__(self, path=None, expanduser=False):
+ """ Initialize and return a local Path instance.
+
+ Path can be relative to the current directory.
+ If path is None it defaults to the current working directory.
+ If expanduser is True, tilde-expansion is performed.
+ Note that Path instances always carry an absolute path.
+ Note also that passing in a local path object will simply return
+ the exact same path object. Use new() to get a new copy.
+ """
+ if path is None:
+ self.strpath = py.error.checked_call(os.getcwd)
+ else:
+ try:
+ path = fspath(path)
+ except TypeError:
+ raise ValueError("can only pass None, Path instances "
+ "or non-empty strings to LocalPath")
+ if expanduser:
+ path = os.path.expanduser(path)
+ self.strpath = abspath(path)
+
+ def __hash__(self):
+ return hash(self.strpath)
+
+ def __eq__(self, other):
+ s1 = fspath(self)
+ try:
+ s2 = fspath(other)
+ except TypeError:
+ return False
+ if iswin32:
+ s1 = s1.lower()
+ try:
+ s2 = s2.lower()
+ except AttributeError:
+ return False
+ return s1 == s2
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __lt__(self, other):
+ return fspath(self) < fspath(other)
+
+ def __gt__(self, other):
+ return fspath(self) > fspath(other)
+
+ def samefile(self, other):
+ """ return True if 'other' references the same file as 'self'.
+ """
+ other = fspath(other)
+ if not isabs(other):
+ other = abspath(other)
+ if self == other:
+ return True
+ if iswin32:
+ return False # there is no samefile
+ return py.error.checked_call(
+ os.path.samefile, self.strpath, other)
+
+ def remove(self, rec=1, ignore_errors=False):
+ """ remove a file or directory (or a directory tree if rec=1).
+ if ignore_errors is True, errors while removing directories will
+ be ignored.
+ """
+ if self.check(dir=1, link=0):
+ if rec:
+ # force remove of readonly files on windows
+ if iswin32:
+ self.chmod(448, rec=1) # octcal 0700
+ py.error.checked_call(py.std.shutil.rmtree, self.strpath,
+ ignore_errors=ignore_errors)
+ else:
+ py.error.checked_call(os.rmdir, self.strpath)
+ else:
+ if iswin32:
+ self.chmod(448) # octcal 0700
+ py.error.checked_call(os.remove, self.strpath)
+
+ def computehash(self, hashtype="md5", chunksize=524288):
+ """ return hexdigest of hashvalue for this file. """
+ try:
+ try:
+ import hashlib as mod
+ except ImportError:
+ if hashtype == "sha1":
+ hashtype = "sha"
+ mod = __import__(hashtype)
+ hash = getattr(mod, hashtype)()
+ except (AttributeError, ImportError):
+ raise ValueError("Don't know how to compute %r hash" %(hashtype,))
+ f = self.open('rb')
+ try:
+ while 1:
+ buf = f.read(chunksize)
+ if not buf:
+ return hash.hexdigest()
+ hash.update(buf)
+ finally:
+ f.close()
+
+ def new(self, **kw):
+ """ create a modified version of this path.
+ the following keyword arguments modify various path parts::
+
+ a:/some/path/to/a/file.ext
+ xx drive
+ xxxxxxxxxxxxxxxxx dirname
+ xxxxxxxx basename
+ xxxx purebasename
+ xxx ext
+ """
+ obj = object.__new__(self.__class__)
+ if not kw:
+ obj.strpath = self.strpath
+ return obj
+ drive, dirname, basename, purebasename,ext = self._getbyspec(
+ "drive,dirname,basename,purebasename,ext")
+ if 'basename' in kw:
+ if 'purebasename' in kw or 'ext' in kw:
+ raise ValueError("invalid specification %r" % kw)
+ else:
+ pb = kw.setdefault('purebasename', purebasename)
+ try:
+ ext = kw['ext']
+ except KeyError:
+ pass
+ else:
+ if ext and not ext.startswith('.'):
+ ext = '.' + ext
+ kw['basename'] = pb + ext
+
+ if ('dirname' in kw and not kw['dirname']):
+ kw['dirname'] = drive
+ else:
+ kw.setdefault('dirname', dirname)
+ kw.setdefault('sep', self.sep)
+ obj.strpath = normpath(
+ "%(dirname)s%(sep)s%(basename)s" % kw)
+ return obj
+
+ def _getbyspec(self, spec):
+ """ see new for what 'spec' can be. """
+ res = []
+ parts = self.strpath.split(self.sep)
+
+ args = filter(None, spec.split(',') )
+ append = res.append
+ for name in args:
+ if name == 'drive':
+ append(parts[0])
+ elif name == 'dirname':
+ append(self.sep.join(parts[:-1]))
+ else:
+ basename = parts[-1]
+ if name == 'basename':
+ append(basename)
+ else:
+ i = basename.rfind('.')
+ if i == -1:
+ purebasename, ext = basename, ''
+ else:
+ purebasename, ext = basename[:i], basename[i:]
+ if name == 'purebasename':
+ append(purebasename)
+ elif name == 'ext':
+ append(ext)
+ else:
+ raise ValueError("invalid part specification %r" % name)
+ return res
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory path joined with any given path arguments. """
+ if not kwargs:
+ path = object.__new__(self.__class__)
+ path.strpath = dirname(self.strpath)
+ if args:
+ path = path.join(*args)
+ return path
+ return super(LocalPath, self).dirpath(*args, **kwargs)
+
+ def join(self, *args, **kwargs):
+ """ return a new path by appending all 'args' as path
+ components. if abs=1 is used restart from root if any
+ of the args is an absolute path.
+ """
+ sep = self.sep
+ strargs = [fspath(arg) for arg in args]
+ strpath = self.strpath
+ if kwargs.get('abs'):
+ newargs = []
+ for arg in reversed(strargs):
+ if isabs(arg):
+ strpath = arg
+ strargs = newargs
+ break
+ newargs.insert(0, arg)
+ for arg in strargs:
+ arg = arg.strip(sep)
+ if iswin32:
+ # allow unix style paths even on windows.
+ arg = arg.strip('/')
+ arg = arg.replace('/', sep)
+ strpath = strpath + sep + arg
+ obj = object.__new__(self.__class__)
+ obj.strpath = normpath(strpath)
+ return obj
+
+ def open(self, mode='r', ensure=False, encoding=None):
+ """ return an opened file with the given mode.
+
+ If ensure is True, create parent directories if needed.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ if encoding:
+ return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
+ return py.error.checked_call(open, self.strpath, mode)
+
+ def _fastjoin(self, name):
+ child = object.__new__(self.__class__)
+ child.strpath = self.strpath + self.sep + name
+ return child
+
+ def islink(self):
+ return islink(self.strpath)
+
+ def check(self, **kw):
+ if not kw:
+ return exists(self.strpath)
+ if len(kw) == 1:
+ if "dir" in kw:
+ return not kw["dir"] ^ isdir(self.strpath)
+ if "file" in kw:
+ return not kw["file"] ^ isfile(self.strpath)
+ return super(LocalPath, self).check(**kw)
+
+ _patternchars = set("*?[" + os.path.sep)
+ def listdir(self, fil=None, sort=None):
+ """ list directory contents, possibly filter by the given fil func
+ and possibly sorted.
+ """
+ if fil is None and sort is None:
+ names = py.error.checked_call(os.listdir, self.strpath)
+ return map_as_list(self._fastjoin, names)
+ if isinstance(fil, py.builtin._basestring):
+ if not self._patternchars.intersection(fil):
+ child = self._fastjoin(fil)
+ if exists(child.strpath):
+ return [child]
+ return []
+ fil = common.FNMatcher(fil)
+ names = py.error.checked_call(os.listdir, self.strpath)
+ res = []
+ for name in names:
+ child = self._fastjoin(name)
+ if fil is None or fil(child):
+ res.append(child)
+ self._sortlist(res, sort)
+ return res
+
+ def size(self):
+ """ return size of the underlying file object """
+ return self.stat().size
+
+ def mtime(self):
+ """ return last modification time of the path. """
+ return self.stat().mtime
+
+ def copy(self, target, mode=False, stat=False):
+ """ copy path to target.
+
+ If mode is True, will copy copy permission from path to target.
+ If stat is True, copy permission, last modification
+ time, last access time, and flags from path to target.
+ """
+ if self.check(file=1):
+ if target.check(dir=1):
+ target = target.join(self.basename)
+ assert self!=target
+ copychunked(self, target)
+ if mode:
+ copymode(self.strpath, target.strpath)
+ if stat:
+ copystat(self, target)
+ else:
+ def rec(p):
+ return p.check(link=0)
+ for x in self.visit(rec=rec):
+ relpath = x.relto(self)
+ newx = target.join(relpath)
+ newx.dirpath().ensure(dir=1)
+ if x.check(link=1):
+ newx.mksymlinkto(x.readlink())
+ continue
+ elif x.check(file=1):
+ copychunked(x, newx)
+ elif x.check(dir=1):
+ newx.ensure(dir=1)
+ if mode:
+ copymode(x.strpath, newx.strpath)
+ if stat:
+ copystat(x, newx)
+
+ def rename(self, target):
+ """ rename this path to target. """
+ target = fspath(target)
+ return py.error.checked_call(os.rename, self.strpath, target)
+
+ def dump(self, obj, bin=1):
+ """ pickle object into path location"""
+ f = self.open('wb')
+ try:
+ py.error.checked_call(py.std.pickle.dump, obj, f, bin)
+ finally:
+ f.close()
+
+ def mkdir(self, *args):
+ """ create & return the directory joined with args. """
+ p = self.join(*args)
+ py.error.checked_call(os.mkdir, fspath(p))
+ return p
+
+ def write_binary(self, data, ensure=False):
+ """ write binary data into path. If ensure is True create
+ missing parent directories.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ with self.open('wb') as f:
+ f.write(data)
+
+ def write_text(self, data, encoding, ensure=False):
+ """ write text data into path using the specified encoding.
+ If ensure is True create missing parent directories.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ with self.open('w', encoding=encoding) as f:
+ f.write(data)
+
+ def write(self, data, mode='w', ensure=False):
+ """ write data into path. If ensure is True create
+ missing parent directories.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ if 'b' in mode:
+ if not py.builtin._isbytes(data):
+ raise ValueError("can only process bytes")
+ else:
+ if not py.builtin._istext(data):
+ if not py.builtin._isbytes(data):
+ data = str(data)
+ else:
+ data = py.builtin._totext(data, sys.getdefaultencoding())
+ f = self.open(mode)
+ try:
+ f.write(data)
+ finally:
+ f.close()
+
+ def _ensuredirs(self):
+ parent = self.dirpath()
+ if parent == self:
+ return self
+ if parent.check(dir=0):
+ parent._ensuredirs()
+ if self.check(dir=0):
+ try:
+ self.mkdir()
+ except py.error.EEXIST:
+ # race condition: file/dir created by another thread/process.
+ # complain if it is not a dir
+ if self.check(dir=0):
+ raise
+ return self
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). if you specify a keyword argument 'dir=True'
+ then the path is forced to be a directory path.
+ """
+ p = self.join(*args)
+ if kwargs.get('dir', 0):
+ return p._ensuredirs()
+ else:
+ p.dirpath()._ensuredirs()
+ if not p.check(file=1):
+ p.open('w').close()
+ return p
+
+ def stat(self, raising=True):
+ """ Return an os.stat() tuple. """
+ if raising == True:
+ return Stat(self, py.error.checked_call(os.stat, self.strpath))
+ try:
+ return Stat(self, os.stat(self.strpath))
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ return None
+
+ def lstat(self):
+ """ Return an os.lstat() tuple. """
+ return Stat(self, py.error.checked_call(os.lstat, self.strpath))
+
+ def setmtime(self, mtime=None):
+ """ set modification time for the given path. if 'mtime' is None
+ (the default) then the file's mtime is set to current time.
+
+ Note that the resolution for 'mtime' is platform dependent.
+ """
+ if mtime is None:
+ return py.error.checked_call(os.utime, self.strpath, mtime)
+ try:
+ return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
+ except py.error.EINVAL:
+ return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
+
+ def chdir(self):
+ """ change directory to self and return old current directory """
+ try:
+ old = self.__class__()
+ except py.error.ENOENT:
+ old = None
+ py.error.checked_call(os.chdir, self.strpath)
+ return old
+
+
+ @contextmanager
+ def as_cwd(self):
+ """ return context manager which changes to current dir during the
+ managed "with" context. On __enter__ it returns the old dir.
+ """
+ old = self.chdir()
+ try:
+ yield old
+ finally:
+ old.chdir()
+
+ def realpath(self):
+ """ return a new path which contains no symbolic links."""
+ return self.__class__(os.path.realpath(self.strpath))
+
+ def atime(self):
+ """ return last access time of the path. """
+ return self.stat().atime
+
+ def __repr__(self):
+ return 'local(%r)' % self.strpath
+
+ def __str__(self):
+ """ return string representation of the Path. """
+ return self.strpath
+
+ def chmod(self, mode, rec=0):
+ """ change permissions to the given mode. If mode is an
+ integer it directly encodes the os-specific modes.
+ if rec is True perform recursively.
+ """
+ if not isinstance(mode, int):
+ raise TypeError("mode %r must be an integer" % (mode,))
+ if rec:
+ for x in self.visit(rec=rec):
+ py.error.checked_call(os.chmod, str(x), mode)
+ py.error.checked_call(os.chmod, self.strpath, mode)
+
+ def pypkgpath(self):
+ """ return the Python package path by looking for the last
+ directory upwards which still contains an __init__.py.
+ Return None if a pkgpath can not be determined.
+ """
+ pkgpath = None
+ for parent in self.parts(reverse=True):
+ if parent.isdir():
+ if not parent.join('__init__.py').exists():
+ break
+ if not isimportable(parent.basename):
+ break
+ pkgpath = parent
+ return pkgpath
+
+ def _ensuresyspath(self, ensuremode, path):
+ if ensuremode:
+ s = str(path)
+ if ensuremode == "append":
+ if s not in sys.path:
+ sys.path.append(s)
+ else:
+ if s != sys.path[0]:
+ sys.path.insert(0, s)
+
+ def pyimport(self, modname=None, ensuresyspath=True):
+ """ return path as an imported python module.
+
+ If modname is None, look for the containing package
+ and construct an according module name.
+ The module will be put/looked up in sys.modules.
+ if ensuresyspath is True then the root dir for importing
+ the file (taking __init__.py files into account) will
+ be prepended to sys.path if it isn't there already.
+ If ensuresyspath=="append" the root dir will be appended
+ if it isn't already contained in sys.path.
+ if ensuresyspath is False no modification of syspath happens.
+ """
+ if not self.check():
+ raise py.error.ENOENT(self)
+
+ pkgpath = None
+ if modname is None:
+ pkgpath = self.pypkgpath()
+ if pkgpath is not None:
+ pkgroot = pkgpath.dirpath()
+ names = self.new(ext="").relto(pkgroot).split(self.sep)
+ if names[-1] == "__init__":
+ names.pop()
+ modname = ".".join(names)
+ else:
+ pkgroot = self.dirpath()
+ modname = self.purebasename
+
+ self._ensuresyspath(ensuresyspath, pkgroot)
+ __import__(modname)
+ mod = sys.modules[modname]
+ if self.basename == "__init__.py":
+ return mod # we don't check anything as we might
+ # we in a namespace package ... too icky to check
+ modfile = mod.__file__
+ if modfile[-4:] in ('.pyc', '.pyo'):
+ modfile = modfile[:-1]
+ elif modfile.endswith('$py.class'):
+ modfile = modfile[:-9] + '.py'
+ if modfile.endswith(os.path.sep + "__init__.py"):
+ if self.basename != "__init__.py":
+ modfile = modfile[:-12]
+ try:
+ issame = self.samefile(modfile)
+ except py.error.ENOENT:
+ issame = False
+ if not issame:
+ raise self.ImportMismatchError(modname, modfile, self)
+ return mod
+ else:
+ try:
+ return sys.modules[modname]
+ except KeyError:
+ # we have a custom modname, do a pseudo-import
+ mod = py.std.types.ModuleType(modname)
+ mod.__file__ = str(self)
+ sys.modules[modname] = mod
+ try:
+ py.builtin.execfile(str(self), mod.__dict__)
+ except:
+ del sys.modules[modname]
+ raise
+ return mod
+
+ def sysexec(self, *argv, **popen_opts):
+ """ return stdout text from executing a system child process,
+ where the 'self' path points to executable.
+ The process is directly invoked and not through a system shell.
+ """
+ from subprocess import Popen, PIPE
+ argv = map_as_list(str, argv)
+ popen_opts['stdout'] = popen_opts['stderr'] = PIPE
+ proc = Popen([str(self)] + argv, **popen_opts)
+ stdout, stderr = proc.communicate()
+ ret = proc.wait()
+ if py.builtin._isbytes(stdout):
+ stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+ if ret != 0:
+ if py.builtin._isbytes(stderr):
+ stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
+ raise py.process.cmdexec.Error(ret, ret, str(self),
+ stdout, stderr,)
+ return stdout
+
+ def sysfind(cls, name, checker=None, paths=None):
+ """ return a path object found by looking at the systems
+ underlying PATH specification. If the checker is not None
+ it will be invoked to filter matching paths. If a binary
+ cannot be found, None is returned
+ Note: This is probably not working on plain win32 systems
+ but may work on cygwin.
+ """
+ if isabs(name):
+ p = py.path.local(name)
+ if p.check(file=1):
+ return p
+ else:
+ if paths is None:
+ if iswin32:
+ paths = py.std.os.environ['Path'].split(';')
+ if '' not in paths and '.' not in paths:
+ paths.append('.')
+ try:
+ systemroot = os.environ['SYSTEMROOT']
+ except KeyError:
+ pass
+ else:
+ paths = [re.sub('%SystemRoot%', systemroot, path)
+ for path in paths]
+ else:
+ paths = py.std.os.environ['PATH'].split(':')
+ tryadd = []
+ if iswin32:
+ tryadd += os.environ['PATHEXT'].split(os.pathsep)
+ tryadd.append("")
+
+ for x in paths:
+ for addext in tryadd:
+ p = py.path.local(x).join(name, abs=True) + addext
+ try:
+ if p.check(file=1):
+ if checker:
+ if not checker(p):
+ continue
+ return p
+ except py.error.EACCES:
+ pass
+ return None
+ sysfind = classmethod(sysfind)
+
+ def _gethomedir(cls):
+ try:
+ x = os.environ['HOME']
+ except KeyError:
+ try:
+ x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
+ except KeyError:
+ return None
+ return cls(x)
+ _gethomedir = classmethod(_gethomedir)
+
+ #"""
+ #special class constructors for local filesystem paths
+ #"""
+ def get_temproot(cls):
+ """ return the system's temporary directory
+ (where tempfiles are usually created in)
+ """
+ return py.path.local(py.std.tempfile.gettempdir())
+ get_temproot = classmethod(get_temproot)
+
+ def mkdtemp(cls, rootdir=None):
+ """ return a Path object pointing to a fresh new temporary directory
+ (which we created ourself).
+ """
+ import tempfile
+ if rootdir is None:
+ rootdir = cls.get_temproot()
+ return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
+ mkdtemp = classmethod(mkdtemp)
+
+ def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
+ lock_timeout = 172800): # two days
+ """ return unique directory with a number greater than the current
+ maximum one. The number is assumed to start directly after prefix.
+ if keep is true directories with a number less than (maxnum-keep)
+ will be removed.
+ """
+ if rootdir is None:
+ rootdir = cls.get_temproot()
+
+ def parse_num(path):
+ """ parse the number out of a path (if it matches the prefix) """
+ bn = path.basename
+ if bn.startswith(prefix):
+ try:
+ return int(bn[len(prefix):])
+ except ValueError:
+ pass
+
+ # compute the maximum number currently in use with the
+ # prefix
+ lastmax = None
+ while True:
+ maxnum = -1
+ for path in rootdir.listdir():
+ num = parse_num(path)
+ if num is not None:
+ maxnum = max(maxnum, num)
+
+ # make the new directory
+ try:
+ udir = rootdir.mkdir(prefix + str(maxnum+1))
+ except py.error.EEXIST:
+ # race condition: another thread/process created the dir
+ # in the meantime. Try counting again
+ if lastmax == maxnum:
+ raise
+ lastmax = maxnum
+ continue
+ break
+
+ # put a .lock file in the new directory that will be removed at
+ # process exit
+ if lock_timeout:
+ lockfile = udir.join('.lock')
+ mypid = os.getpid()
+ if hasattr(lockfile, 'mksymlinkto'):
+ lockfile.mksymlinkto(str(mypid))
+ else:
+ lockfile.write(str(mypid))
+ def try_remove_lockfile():
+ # in a fork() situation, only the last process should
+ # remove the .lock, otherwise the other processes run the
+ # risk of seeing their temporary dir disappear. For now
+ # we remove the .lock in the parent only (i.e. we assume
+ # that the children finish before the parent).
+ if os.getpid() != mypid:
+ return
+ try:
+ lockfile.remove()
+ except py.error.Error:
+ pass
+ atexit.register(try_remove_lockfile)
+
+ # prune old directories
+ if keep:
+ for path in rootdir.listdir():
+ num = parse_num(path)
+ if num is not None and num <= (maxnum - keep):
+ lf = path.join('.lock')
+ try:
+ t1 = lf.lstat().mtime
+ t2 = lockfile.lstat().mtime
+ if not lock_timeout or abs(t2-t1) < lock_timeout:
+ continue # skip directories still locked
+ except py.error.Error:
+ pass # assume that it means that there is no 'lf'
+ try:
+ path.remove(rec=1)
+ except KeyboardInterrupt:
+ raise
+ except: # this might be py.error.Error, WindowsError ...
+ pass
+
+ # make link...
+ try:
+ username = os.environ['USER'] #linux, et al
+ except KeyError:
+ try:
+ username = os.environ['USERNAME'] #windows
+ except KeyError:
+ username = 'current'
+
+ src = str(udir)
+ dest = src[:src.rfind('-')] + '-' + username
+ try:
+ os.unlink(dest)
+ except OSError:
+ pass
+ try:
+ os.symlink(src, dest)
+ except (OSError, AttributeError, NotImplementedError):
+ pass
+
+ return udir
+ make_numbered_dir = classmethod(make_numbered_dir)
+
+def copymode(src, dest):
+ """ copy permission from src to dst. """
+ py.std.shutil.copymode(src, dest)
+
+def copystat(src, dest):
+ """ copy permission, last modification time, last access time, and flags from src to dst."""
+ py.std.shutil.copystat(str(src), str(dest))
+
+def copychunked(src, dest):
+ chunksize = 524288 # half a meg of bytes
+ fsrc = src.open('rb')
+ try:
+ fdest = dest.open('wb')
+ try:
+ while 1:
+ buf = fsrc.read(chunksize)
+ if not buf:
+ break
+ fdest.write(buf)
+ finally:
+ fdest.close()
+ finally:
+ fsrc.close()
+
+def isimportable(name):
+ if name and (name[0].isalpha() or name[0] == '_'):
+ name = name.replace("_", '')
+ return not name or name.isalnum()
diff --git a/py/_path/svnurl.py b/py/_path/svnurl.py
index 78d7131..5f45fa2 100644
--- a/py/_path/svnurl.py
+++ b/py/_path/svnurl.py
@@ -1,380 +1,380 @@
-"""
-module defining a subversion path object based on the external
-command 'svn'. This modules aims to work with svn 1.3 and higher
-but might also interact well with earlier versions.
-"""
-
-import os, sys, time, re
-import py
-from py import path, process
-from py._path import common
-from py._path import svnwc as svncommon
-from py._path.cacheutil import BuildcostAccessCache, AgingCache
-
-DEBUG=False
-
-class SvnCommandPath(svncommon.SvnPathBase):
- """ path implementation that offers access to (possibly remote) subversion
- repositories. """
-
- _lsrevcache = BuildcostAccessCache(maxentries=128)
- _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0)
-
- def __new__(cls, path, rev=None, auth=None):
- self = object.__new__(cls)
- if isinstance(path, cls):
- rev = path.rev
- auth = path.auth
- path = path.strpath
- svncommon.checkbadchars(path)
- path = path.rstrip('/')
- self.strpath = path
- self.rev = rev
- self.auth = auth
- return self
-
- def __repr__(self):
- if self.rev == -1:
- return 'svnurl(%r)' % self.strpath
- else:
- return 'svnurl(%r, %r)' % (self.strpath, self.rev)
-
- def _svnwithrev(self, cmd, *args):
- """ execute an svn command, append our own url and revision """
- if self.rev is None:
- return self._svnwrite(cmd, *args)
- else:
- args = ['-r', self.rev] + list(args)
- return self._svnwrite(cmd, *args)
-
- def _svnwrite(self, cmd, *args):
- """ execute an svn command, append our own url """
- l = ['svn %s' % cmd]
- args = ['"%s"' % self._escape(item) for item in args]
- l.extend(args)
- l.append('"%s"' % self._encodedurl())
- # fixing the locale because we can't otherwise parse
- string = " ".join(l)
- if DEBUG:
- print("execing %s" % string)
- out = self._svncmdexecauth(string)
- return out
-
- def _svncmdexecauth(self, cmd):
- """ execute an svn command 'as is' """
- cmd = svncommon.fixlocale() + cmd
- if self.auth is not None:
- cmd += ' ' + self.auth.makecmdoptions()
- return self._cmdexec(cmd)
-
- def _cmdexec(self, cmd):
- try:
- out = process.cmdexec(cmd)
- except py.process.cmdexec.Error:
- e = sys.exc_info()[1]
- if (e.err.find('File Exists') != -1 or
- e.err.find('File already exists') != -1):
- raise py.error.EEXIST(self)
- raise
- return out
-
- def _svnpopenauth(self, cmd):
- """ execute an svn command, return a pipe for reading stdin """
- cmd = svncommon.fixlocale() + cmd
- if self.auth is not None:
- cmd += ' ' + self.auth.makecmdoptions()
- return self._popen(cmd)
-
- def _popen(self, cmd):
- return os.popen(cmd)
-
- def _encodedurl(self):
- return self._escape(self.strpath)
-
- def _norev_delentry(self, path):
- auth = self.auth and self.auth.makecmdoptions() or None
- self._lsnorevcache.delentry((str(path), auth))
-
- def open(self, mode='r'):
- """ return an opened file with the given mode. """
- if mode not in ("r", "rU",):
- raise ValueError("mode %r not supported" % (mode,))
- assert self.check(file=1) # svn cat returns an empty file otherwise
- if self.rev is None:
- return self._svnpopenauth('svn cat "%s"' % (
- self._escape(self.strpath), ))
- else:
- return self._svnpopenauth('svn cat -r %s "%s"' % (
- self.rev, self._escape(self.strpath)))
-
- def dirpath(self, *args, **kwargs):
- """ return the directory path of the current path joined
- with any given path arguments.
- """
- l = self.strpath.split(self.sep)
- if len(l) < 4:
- raise py.error.EINVAL(self, "base is not valid")
- elif len(l) == 4:
- return self.join(*args, **kwargs)
- else:
- return self.new(basename='').join(*args, **kwargs)
-
- # modifying methods (cache must be invalidated)
- def mkdir(self, *args, **kwargs):
- """ create & return the directory joined with args.
- pass a 'msg' keyword argument to set the commit message.
- """
- commit_msg = kwargs.get('msg', "mkdir by py lib invocation")
- createpath = self.join(*args)
- createpath._svnwrite('mkdir', '-m', commit_msg)
- self._norev_delentry(createpath.dirpath())
- return createpath
-
- def copy(self, target, msg='copied by py lib invocation'):
- """ copy path to target with checkin message msg."""
- if getattr(target, 'rev', None) is not None:
- raise py.error.EINVAL(target, "revisions are immutable")
- self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg,
- self._escape(self), self._escape(target)))
- self._norev_delentry(target.dirpath())
-
- def rename(self, target, msg="renamed by py lib invocation"):
- """ rename this path to target with checkin message msg. """
- if getattr(self, 'rev', None) is not None:
- raise py.error.EINVAL(self, "revisions are immutable")
- self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %(
- msg, self._escape(self), self._escape(target)))
- self._norev_delentry(self.dirpath())
- self._norev_delentry(self)
-
- def remove(self, rec=1, msg='removed by py lib invocation'):
- """ remove a file or directory (or a directory tree if rec=1) with
-checkin message msg."""
- if self.rev is not None:
- raise py.error.EINVAL(self, "revisions are immutable")
- self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self)))
- self._norev_delentry(self.dirpath())
-
- def export(self, topath):
- """ export to a local path
-
- topath should not exist prior to calling this, returns a
- py.path.local instance
- """
- topath = py.path.local(topath)
- args = ['"%s"' % (self._escape(self),),
- '"%s"' % (self._escape(topath),)]
- if self.rev is not None:
- args = ['-r', str(self.rev)] + args
- self._svncmdexecauth('svn export %s' % (' '.join(args),))
- return topath
-
- def ensure(self, *args, **kwargs):
- """ ensure that an args-joined path exists (by default as
- a file). If you specify a keyword argument 'dir=True'
- then the path is forced to be a directory path.
- """
- if getattr(self, 'rev', None) is not None:
- raise py.error.EINVAL(self, "revisions are immutable")
- target = self.join(*args)
- dir = kwargs.get('dir', 0)
- for x in target.parts(reverse=True):
- if x.check():
- break
- else:
- raise py.error.ENOENT(target, "has not any valid base!")
- if x == target:
- if not x.check(dir=dir):
- raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x)
- return x
- tocreate = target.relto(x)
- basename = tocreate.split(self.sep, 1)[0]
- tempdir = py.path.local.mkdtemp()
- try:
- tempdir.ensure(tocreate, dir=dir)
- cmd = 'svn import -m "%s" "%s" "%s"' % (
- "ensure %s" % self._escape(tocreate),
- self._escape(tempdir.join(basename)),
- x.join(basename)._encodedurl())
- self._svncmdexecauth(cmd)
- self._norev_delentry(x)
- finally:
- tempdir.remove()
- return target
-
- # end of modifying methods
- def _propget(self, name):
- res = self._svnwithrev('propget', name)
- return res[:-1] # strip trailing newline
-
- def _proplist(self):
- res = self._svnwithrev('proplist')
- lines = res.split('\n')
- lines = [x.strip() for x in lines[1:]]
- return svncommon.PropListDict(self, lines)
-
- def info(self):
- """ return an Info structure with svn-provided information. """
- parent = self.dirpath()
- nameinfo_seq = parent._listdir_nameinfo()
- bn = self.basename
- for name, info in nameinfo_seq:
- if name == bn:
- return info
- raise py.error.ENOENT(self)
-
-
- def _listdir_nameinfo(self):
- """ return sequence of name-info directory entries of self """
- def builder():
- try:
- res = self._svnwithrev('ls', '-v')
- except process.cmdexec.Error:
- e = sys.exc_info()[1]
- if e.err.find('non-existent in that revision') != -1:
- raise py.error.ENOENT(self, e.err)
- elif e.err.find("E200009:") != -1:
- raise py.error.ENOENT(self, e.err)
- elif e.err.find('File not found') != -1:
- raise py.error.ENOENT(self, e.err)
- elif e.err.find('not part of a repository')!=-1:
- raise py.error.ENOENT(self, e.err)
- elif e.err.find('Unable to open')!=-1:
- raise py.error.ENOENT(self, e.err)
- elif e.err.lower().find('method not allowed')!=-1:
- raise py.error.EACCES(self, e.err)
- raise py.error.Error(e.err)
- lines = res.split('\n')
- nameinfo_seq = []
- for lsline in lines:
- if lsline:
- info = InfoSvnCommand(lsline)
- if info._name != '.': # svn 1.5 produces '.' dirs,
- nameinfo_seq.append((info._name, info))
- nameinfo_seq.sort()
- return nameinfo_seq
- auth = self.auth and self.auth.makecmdoptions() or None
- if self.rev is not None:
- return self._lsrevcache.getorbuild((self.strpath, self.rev, auth),
- builder)
- else:
- return self._lsnorevcache.getorbuild((self.strpath, auth),
- builder)
-
- def listdir(self, fil=None, sort=None):
- """ list directory contents, possibly filter by the given fil func
- and possibly sorted.
- """
- if isinstance(fil, str):
- fil = common.FNMatcher(fil)
- nameinfo_seq = self._listdir_nameinfo()
- if len(nameinfo_seq) == 1:
- name, info = nameinfo_seq[0]
- if name == self.basename and info.kind == 'file':
- #if not self.check(dir=1):
- raise py.error.ENOTDIR(self)
- paths = [self.join(name) for (name, info) in nameinfo_seq]
- if fil:
- paths = [x for x in paths if fil(x)]
- self._sortlist(paths, sort)
- return paths
-
-
- def log(self, rev_start=None, rev_end=1, verbose=False):
- """ return a list of LogEntry instances for this path.
-rev_start is the starting revision (defaulting to the first one).
-rev_end is the last revision (defaulting to HEAD).
-if verbose is True, then the LogEntry instances also know which files changed.
-"""
- assert self.check() #make it simpler for the pipe
- rev_start = rev_start is None and "HEAD" or rev_start
- rev_end = rev_end is None and "HEAD" or rev_end
-
- if rev_start == "HEAD" and rev_end == 1:
- rev_opt = ""
- else:
- rev_opt = "-r %s:%s" % (rev_start, rev_end)
- verbose_opt = verbose and "-v" or ""
- xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' %
- (rev_opt, verbose_opt, self.strpath))
- from xml.dom import minidom
- tree = minidom.parse(xmlpipe)
- result = []
- for logentry in filter(None, tree.firstChild.childNodes):
- if logentry.nodeType == logentry.ELEMENT_NODE:
- result.append(svncommon.LogEntry(logentry))
- return result
-
-#01234567890123456789012345678901234567890123467
-# 2256 hpk 165 Nov 24 17:55 __init__.py
-# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!!
-# 1312 johnny 1627 May 05 14:32 test_decorators.py
-#
-class InfoSvnCommand:
- # the '0?' part in the middle is an indication of whether the resource is
- # locked, see 'svn help ls'
- lspattern = re.compile(
- r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? '
- '*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$')
- def __init__(self, line):
- # this is a typical line from 'svn ls http://...'
- #_ 1127 jum 0 Jul 13 15:28 branch/
- match = self.lspattern.match(line)
- data = match.groupdict()
- self._name = data['file']
- if self._name[-1] == '/':
- self._name = self._name[:-1]
- self.kind = 'dir'
- else:
- self.kind = 'file'
- #self.has_props = l.pop(0) == 'P'
- self.created_rev = int(data['rev'])
- self.last_author = data['author']
- self.size = data['size'] and int(data['size']) or 0
- self.mtime = parse_time_with_missing_year(data['date'])
- self.time = self.mtime * 1000000
-
- def __eq__(self, other):
- return self.__dict__ == other.__dict__
-
-
-#____________________________________________________
-#
-# helper functions
-#____________________________________________________
-def parse_time_with_missing_year(timestr):
- """ analyze the time part from a single line of "svn ls -v"
- the svn output doesn't show the year makes the 'timestr'
- ambigous.
- """
- import calendar
- t_now = time.gmtime()
-
- tparts = timestr.split()
- month = time.strptime(tparts.pop(0), '%b')[1]
- day = time.strptime(tparts.pop(0), '%d')[2]
- last = tparts.pop(0) # year or hour:minute
- try:
- if ":" in last:
- raise ValueError()
- year = time.strptime(last, '%Y')[0]
- hour = minute = 0
- except ValueError:
- hour, minute = time.strptime(last, '%H:%M')[3:5]
- year = t_now[0]
-
- t_result = (year, month, day, hour, minute, 0,0,0,0)
- if t_result > t_now:
- year -= 1
- t_result = (year, month, day, hour, minute, 0,0,0,0)
- return calendar.timegm(t_result)
-
-class PathEntry:
- def __init__(self, ppart):
- self.strpath = ppart.firstChild.nodeValue.encode('UTF-8')
- self.action = ppart.getAttribute('action').encode('UTF-8')
- if self.action == 'A':
- self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8')
- if self.copyfrom_path:
- self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))
-
+"""
+module defining a subversion path object based on the external
+command 'svn'. This modules aims to work with svn 1.3 and higher
+but might also interact well with earlier versions.
+"""
+
+import os, sys, time, re
+import py
+from py import path, process
+from py._path import common
+from py._path import svnwc as svncommon
+from py._path.cacheutil import BuildcostAccessCache, AgingCache
+
+DEBUG=False
+
+class SvnCommandPath(svncommon.SvnPathBase):
+ """ path implementation that offers access to (possibly remote) subversion
+ repositories. """
+
+ _lsrevcache = BuildcostAccessCache(maxentries=128)
+ _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0)
+
+ def __new__(cls, path, rev=None, auth=None):
+ self = object.__new__(cls)
+ if isinstance(path, cls):
+ rev = path.rev
+ auth = path.auth
+ path = path.strpath
+ svncommon.checkbadchars(path)
+ path = path.rstrip('/')
+ self.strpath = path
+ self.rev = rev
+ self.auth = auth
+ return self
+
+ def __repr__(self):
+ if self.rev == -1:
+ return 'svnurl(%r)' % self.strpath
+ else:
+ return 'svnurl(%r, %r)' % (self.strpath, self.rev)
+
+ def _svnwithrev(self, cmd, *args):
+ """ execute an svn command, append our own url and revision """
+ if self.rev is None:
+ return self._svnwrite(cmd, *args)
+ else:
+ args = ['-r', self.rev] + list(args)
+ return self._svnwrite(cmd, *args)
+
+ def _svnwrite(self, cmd, *args):
+ """ execute an svn command, append our own url """
+ l = ['svn %s' % cmd]
+ args = ['"%s"' % self._escape(item) for item in args]
+ l.extend(args)
+ l.append('"%s"' % self._encodedurl())
+ # fixing the locale because we can't otherwise parse
+ string = " ".join(l)
+ if DEBUG:
+ print("execing %s" % string)
+ out = self._svncmdexecauth(string)
+ return out
+
+ def _svncmdexecauth(self, cmd):
+ """ execute an svn command 'as is' """
+ cmd = svncommon.fixlocale() + cmd
+ if self.auth is not None:
+ cmd += ' ' + self.auth.makecmdoptions()
+ return self._cmdexec(cmd)
+
+ def _cmdexec(self, cmd):
+ try:
+ out = process.cmdexec(cmd)
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if (e.err.find('File Exists') != -1 or
+ e.err.find('File already exists') != -1):
+ raise py.error.EEXIST(self)
+ raise
+ return out
+
+ def _svnpopenauth(self, cmd):
+ """ execute an svn command, return a pipe for reading stdin """
+ cmd = svncommon.fixlocale() + cmd
+ if self.auth is not None:
+ cmd += ' ' + self.auth.makecmdoptions()
+ return self._popen(cmd)
+
+ def _popen(self, cmd):
+ return os.popen(cmd)
+
+ def _encodedurl(self):
+ return self._escape(self.strpath)
+
+ def _norev_delentry(self, path):
+ auth = self.auth and self.auth.makecmdoptions() or None
+ self._lsnorevcache.delentry((str(path), auth))
+
+ def open(self, mode='r'):
+ """ return an opened file with the given mode. """
+ if mode not in ("r", "rU",):
+ raise ValueError("mode %r not supported" % (mode,))
+ assert self.check(file=1) # svn cat returns an empty file otherwise
+ if self.rev is None:
+ return self._svnpopenauth('svn cat "%s"' % (
+ self._escape(self.strpath), ))
+ else:
+ return self._svnpopenauth('svn cat -r %s "%s"' % (
+ self.rev, self._escape(self.strpath)))
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory path of the current path joined
+ with any given path arguments.
+ """
+ l = self.strpath.split(self.sep)
+ if len(l) < 4:
+ raise py.error.EINVAL(self, "base is not valid")
+ elif len(l) == 4:
+ return self.join(*args, **kwargs)
+ else:
+ return self.new(basename='').join(*args, **kwargs)
+
+ # modifying methods (cache must be invalidated)
+ def mkdir(self, *args, **kwargs):
+ """ create & return the directory joined with args.
+ pass a 'msg' keyword argument to set the commit message.
+ """
+ commit_msg = kwargs.get('msg', "mkdir by py lib invocation")
+ createpath = self.join(*args)
+ createpath._svnwrite('mkdir', '-m', commit_msg)
+ self._norev_delentry(createpath.dirpath())
+ return createpath
+
+ def copy(self, target, msg='copied by py lib invocation'):
+ """ copy path to target with checkin message msg."""
+ if getattr(target, 'rev', None) is not None:
+ raise py.error.EINVAL(target, "revisions are immutable")
+ self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg,
+ self._escape(self), self._escape(target)))
+ self._norev_delentry(target.dirpath())
+
+ def rename(self, target, msg="renamed by py lib invocation"):
+ """ rename this path to target with checkin message msg. """
+ if getattr(self, 'rev', None) is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %(
+ msg, self._escape(self), self._escape(target)))
+ self._norev_delentry(self.dirpath())
+ self._norev_delentry(self)
+
+ def remove(self, rec=1, msg='removed by py lib invocation'):
+ """ remove a file or directory (or a directory tree if rec=1) with
+checkin message msg."""
+ if self.rev is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self)))
+ self._norev_delentry(self.dirpath())
+
+ def export(self, topath):
+ """ export to a local path
+
+ topath should not exist prior to calling this, returns a
+ py.path.local instance
+ """
+ topath = py.path.local(topath)
+ args = ['"%s"' % (self._escape(self),),
+ '"%s"' % (self._escape(topath),)]
+ if self.rev is not None:
+ args = ['-r', str(self.rev)] + args
+ self._svncmdexecauth('svn export %s' % (' '.join(args),))
+ return topath
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). If you specify a keyword argument 'dir=True'
+ then the path is forced to be a directory path.
+ """
+ if getattr(self, 'rev', None) is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ target = self.join(*args)
+ dir = kwargs.get('dir', 0)
+ for x in target.parts(reverse=True):
+ if x.check():
+ break
+ else:
+ raise py.error.ENOENT(target, "has not any valid base!")
+ if x == target:
+ if not x.check(dir=dir):
+ raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x)
+ return x
+ tocreate = target.relto(x)
+ basename = tocreate.split(self.sep, 1)[0]
+ tempdir = py.path.local.mkdtemp()
+ try:
+ tempdir.ensure(tocreate, dir=dir)
+ cmd = 'svn import -m "%s" "%s" "%s"' % (
+ "ensure %s" % self._escape(tocreate),
+ self._escape(tempdir.join(basename)),
+ x.join(basename)._encodedurl())
+ self._svncmdexecauth(cmd)
+ self._norev_delentry(x)
+ finally:
+ tempdir.remove()
+ return target
+
+ # end of modifying methods
+ def _propget(self, name):
+ res = self._svnwithrev('propget', name)
+ return res[:-1] # strip trailing newline
+
+ def _proplist(self):
+ res = self._svnwithrev('proplist')
+ lines = res.split('\n')
+ lines = [x.strip() for x in lines[1:]]
+ return svncommon.PropListDict(self, lines)
+
+ def info(self):
+ """ return an Info structure with svn-provided information. """
+ parent = self.dirpath()
+ nameinfo_seq = parent._listdir_nameinfo()
+ bn = self.basename
+ for name, info in nameinfo_seq:
+ if name == bn:
+ return info
+ raise py.error.ENOENT(self)
+
+
+ def _listdir_nameinfo(self):
+ """ return sequence of name-info directory entries of self """
+ def builder():
+ try:
+ res = self._svnwithrev('ls', '-v')
+ except process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('non-existent in that revision') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find("E200009:") != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('File not found') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('not part of a repository')!=-1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('Unable to open')!=-1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.lower().find('method not allowed')!=-1:
+ raise py.error.EACCES(self, e.err)
+ raise py.error.Error(e.err)
+ lines = res.split('\n')
+ nameinfo_seq = []
+ for lsline in lines:
+ if lsline:
+ info = InfoSvnCommand(lsline)
+ if info._name != '.': # svn 1.5 produces '.' dirs,
+ nameinfo_seq.append((info._name, info))
+ nameinfo_seq.sort()
+ return nameinfo_seq
+ auth = self.auth and self.auth.makecmdoptions() or None
+ if self.rev is not None:
+ return self._lsrevcache.getorbuild((self.strpath, self.rev, auth),
+ builder)
+ else:
+ return self._lsnorevcache.getorbuild((self.strpath, auth),
+ builder)
+
+ def listdir(self, fil=None, sort=None):
+ """ list directory contents, possibly filter by the given fil func
+ and possibly sorted.
+ """
+ if isinstance(fil, str):
+ fil = common.FNMatcher(fil)
+ nameinfo_seq = self._listdir_nameinfo()
+ if len(nameinfo_seq) == 1:
+ name, info = nameinfo_seq[0]
+ if name == self.basename and info.kind == 'file':
+ #if not self.check(dir=1):
+ raise py.error.ENOTDIR(self)
+ paths = [self.join(name) for (name, info) in nameinfo_seq]
+ if fil:
+ paths = [x for x in paths if fil(x)]
+ self._sortlist(paths, sort)
+ return paths
+
+
+ def log(self, rev_start=None, rev_end=1, verbose=False):
+ """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+ assert self.check() #make it simpler for the pipe
+ rev_start = rev_start is None and "HEAD" or rev_start
+ rev_end = rev_end is None and "HEAD" or rev_end
+
+ if rev_start == "HEAD" and rev_end == 1:
+ rev_opt = ""
+ else:
+ rev_opt = "-r %s:%s" % (rev_start, rev_end)
+ verbose_opt = verbose and "-v" or ""
+ xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' %
+ (rev_opt, verbose_opt, self.strpath))
+ from xml.dom import minidom
+ tree = minidom.parse(xmlpipe)
+ result = []
+ for logentry in filter(None, tree.firstChild.childNodes):
+ if logentry.nodeType == logentry.ELEMENT_NODE:
+ result.append(svncommon.LogEntry(logentry))
+ return result
+
+#01234567890123456789012345678901234567890123467
+# 2256 hpk 165 Nov 24 17:55 __init__.py
+# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!!
+# 1312 johnny 1627 May 05 14:32 test_decorators.py
+#
+class InfoSvnCommand:
+ # the '0?' part in the middle is an indication of whether the resource is
+ # locked, see 'svn help ls'
+ lspattern = re.compile(
+ r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? '
+ r'*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$')
+ def __init__(self, line):
+ # this is a typical line from 'svn ls http://...'
+ #_ 1127 jum 0 Jul 13 15:28 branch/
+ match = self.lspattern.match(line)
+ data = match.groupdict()
+ self._name = data['file']
+ if self._name[-1] == '/':
+ self._name = self._name[:-1]
+ self.kind = 'dir'
+ else:
+ self.kind = 'file'
+ #self.has_props = l.pop(0) == 'P'
+ self.created_rev = int(data['rev'])
+ self.last_author = data['author']
+ self.size = data['size'] and int(data['size']) or 0
+ self.mtime = parse_time_with_missing_year(data['date'])
+ self.time = self.mtime * 1000000
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+
+#____________________________________________________
+#
+# helper functions
+#____________________________________________________
+def parse_time_with_missing_year(timestr):
+ """ analyze the time part from a single line of "svn ls -v"
+ the svn output doesn't show the year makes the 'timestr'
+ ambigous.
+ """
+ import calendar
+ t_now = time.gmtime()
+
+ tparts = timestr.split()
+ month = time.strptime(tparts.pop(0), '%b')[1]
+ day = time.strptime(tparts.pop(0), '%d')[2]
+ last = tparts.pop(0) # year or hour:minute
+ try:
+ if ":" in last:
+ raise ValueError()
+ year = time.strptime(last, '%Y')[0]
+ hour = minute = 0
+ except ValueError:
+ hour, minute = time.strptime(last, '%H:%M')[3:5]
+ year = t_now[0]
+
+ t_result = (year, month, day, hour, minute, 0,0,0,0)
+ if t_result > t_now:
+ year -= 1
+ t_result = (year, month, day, hour, minute, 0,0,0,0)
+ return calendar.timegm(t_result)
+
+class PathEntry:
+ def __init__(self, ppart):
+ self.strpath = ppart.firstChild.nodeValue.encode('UTF-8')
+ self.action = ppart.getAttribute('action').encode('UTF-8')
+ if self.action == 'A':
+ self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8')
+ if self.copyfrom_path:
+ self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))
+
diff --git a/py/_path/svnwc.py b/py/_path/svnwc.py
index 00d3b4b..07233b0 100644
--- a/py/_path/svnwc.py
+++ b/py/_path/svnwc.py
@@ -1,1240 +1,1240 @@
-"""
-svn-Command based Implementation of a Subversion WorkingCopy Path.
-
- SvnWCCommandPath is the main class.
-
-"""
-
-import os, sys, time, re, calendar
-import py
-import subprocess
-from py._path import common
-
-#-----------------------------------------------------------
-# Caching latest repository revision and repo-paths
-# (getting them is slow with the current implementations)
-#
-# XXX make mt-safe
-#-----------------------------------------------------------
-
-class cache:
- proplist = {}
- info = {}
- entries = {}
- prop = {}
-
-class RepoEntry:
- def __init__(self, url, rev, timestamp):
- self.url = url
- self.rev = rev
- self.timestamp = timestamp
-
- def __str__(self):
- return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp)
-
-class RepoCache:
- """ The Repocache manages discovered repository paths
- and their revisions. If inside a timeout the cache
- will even return the revision of the root.
- """
- timeout = 20 # seconds after which we forget that we know the last revision
-
- def __init__(self):
- self.repos = []
-
- def clear(self):
- self.repos = []
-
- def put(self, url, rev, timestamp=None):
- if rev is None:
- return
- if timestamp is None:
- timestamp = time.time()
-
- for entry in self.repos:
- if url == entry.url:
- entry.timestamp = timestamp
- entry.rev = rev
- #print "set repo", entry
- break
- else:
- entry = RepoEntry(url, rev, timestamp)
- self.repos.append(entry)
- #print "appended repo", entry
-
- def get(self, url):
- now = time.time()
- for entry in self.repos:
- if url.startswith(entry.url):
- if now < entry.timestamp + self.timeout:
- #print "returning immediate Etrny", entry
- return entry.url, entry.rev
- return entry.url, -1
- return url, -1
-
-repositories = RepoCache()
-
-
-# svn support code
-
-ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
-if sys.platform == "win32":
- ALLOWED_CHARS += ":"
-ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
-
-def _getsvnversion(ver=[]):
- try:
- return ver[0]
- except IndexError:
- v = py.process.cmdexec("svn -q --version")
- v.strip()
- v = '.'.join(v.split('.')[:2])
- ver.append(v)
- return v
-
-def _escape_helper(text):
- text = str(text)
- if py.std.sys.platform != 'win32':
- text = str(text).replace('$', '\\$')
- return text
-
-def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
- for c in str(text):
- if c.isalnum():
- continue
- if c in allowed_chars:
- continue
- return True
- return False
-
-def checkbadchars(url):
- # (hpk) not quite sure about the exact purpose, guido w.?
- proto, uri = url.split("://", 1)
- if proto != "file":
- host, uripath = uri.split('/', 1)
- # only check for bad chars in the non-protocol parts
- if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
- or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
- raise ValueError("bad char in %r" % (url, ))
-
-
-#_______________________________________________________________
-
-class SvnPathBase(common.PathBase):
- """ Base implementation for SvnPath implementations. """
- sep = '/'
-
- def _geturl(self):
- return self.strpath
- url = property(_geturl, None, None, "url of this svn-path.")
-
- def __str__(self):
- """ return a string representation (including rev-number) """
- return self.strpath
-
- def __hash__(self):
- return hash(self.strpath)
-
- def new(self, **kw):
- """ create a modified version of this path. A 'rev' argument
- indicates a new revision.
- the following keyword arguments modify various path parts::
-
- http://host.com/repo/path/file.ext
- |-----------------------| dirname
- |------| basename
- |--| purebasename
- |--| ext
- """
- obj = object.__new__(self.__class__)
- obj.rev = kw.get('rev', self.rev)
- obj.auth = kw.get('auth', self.auth)
- dirname, basename, purebasename, ext = self._getbyspec(
- "dirname,basename,purebasename,ext")
- if 'basename' in kw:
- if 'purebasename' in kw or 'ext' in kw:
- raise ValueError("invalid specification %r" % kw)
- else:
- pb = kw.setdefault('purebasename', purebasename)
- ext = kw.setdefault('ext', ext)
- if ext and not ext.startswith('.'):
- ext = '.' + ext
- kw['basename'] = pb + ext
-
- kw.setdefault('dirname', dirname)
- kw.setdefault('sep', self.sep)
- if kw['basename']:
- obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
- else:
- obj.strpath = "%(dirname)s" % kw
- return obj
-
- def _getbyspec(self, spec):
- """ get specified parts of the path. 'arg' is a string
- with comma separated path parts. The parts are returned
- in exactly the order of the specification.
-
- you may specify the following parts:
-
- http://host.com/repo/path/file.ext
- |-----------------------| dirname
- |------| basename
- |--| purebasename
- |--| ext
- """
- res = []
- parts = self.strpath.split(self.sep)
- for name in spec.split(','):
- name = name.strip()
- if name == 'dirname':
- res.append(self.sep.join(parts[:-1]))
- elif name == 'basename':
- res.append(parts[-1])
- else:
- basename = parts[-1]
- i = basename.rfind('.')
- if i == -1:
- purebasename, ext = basename, ''
- else:
- purebasename, ext = basename[:i], basename[i:]
- if name == 'purebasename':
- res.append(purebasename)
- elif name == 'ext':
- res.append(ext)
- else:
- raise NameError("Don't know part %r" % name)
- return res
-
- def __eq__(self, other):
- """ return true if path and rev attributes each match """
- return (str(self) == str(other) and
- (self.rev == other.rev or self.rev == other.rev))
-
- def __ne__(self, other):
- return not self == other
-
- def join(self, *args):
- """ return a new Path (with the same revision) which is composed
- of the self Path followed by 'args' path components.
- """
- if not args:
- return self
-
- args = tuple([arg.strip(self.sep) for arg in args])
- parts = (self.strpath, ) + args
- newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
- return newpath
-
- def propget(self, name):
- """ return the content of the given property. """
- value = self._propget(name)
- return value
-
- def proplist(self):
- """ list all property names. """
- content = self._proplist()
- return content
-
- def size(self):
- """ Return the size of the file content of the Path. """
- return self.info().size
-
- def mtime(self):
- """ Return the last modification time of the file. """
- return self.info().mtime
-
- # shared help methods
-
- def _escape(self, cmd):
- return _escape_helper(cmd)
-
-
- #def _childmaxrev(self):
- # """ return maximum revision number of childs (or self.rev if no childs) """
- # rev = self.rev
- # for name, info in self._listdir_nameinfo():
- # rev = max(rev, info.created_rev)
- # return rev
-
- #def _getlatestrevision(self):
- # """ return latest repo-revision for this path. """
- # url = self.strpath
- # path = self.__class__(url, None)
- #
- # # we need a long walk to find the root-repo and revision
- # while 1:
- # try:
- # rev = max(rev, path._childmaxrev())
- # previous = path
- # path = path.dirpath()
- # except (IOError, process.cmdexec.Error):
- # break
- # if rev is None:
- # raise IOError, "could not determine newest repo revision for %s" % self
- # return rev
-
- class Checkers(common.Checkers):
- def dir(self):
- try:
- return self.path.info().kind == 'dir'
- except py.error.Error:
- return self._listdirworks()
-
- def _listdirworks(self):
- try:
- self.path.listdir()
- except py.error.ENOENT:
- return False
- else:
- return True
-
- def file(self):
- try:
- return self.path.info().kind == 'file'
- except py.error.ENOENT:
- return False
-
- def exists(self):
- try:
- return self.path.info()
- except py.error.ENOENT:
- return self._listdirworks()
-
-def parse_apr_time(timestr):
- i = timestr.rfind('.')
- if i == -1:
- raise ValueError("could not parse %s" % timestr)
- timestr = timestr[:i]
- parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
- return time.mktime(parsedtime)
-
-class PropListDict(dict):
- """ a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
- def __init__(self, path, keynames):
- dict.__init__(self, [(x, None) for x in keynames])
- self.path = path
-
- def __getitem__(self, key):
- value = dict.__getitem__(self, key)
- if value is None:
- value = self.path.propget(key)
- dict.__setitem__(self, key, value)
- return value
-
-def fixlocale():
- if sys.platform != 'win32':
- return 'LC_ALL=C '
- return ''
-
-# some nasty chunk of code to solve path and url conversion and quoting issues
-ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ')
-if os.sep in ILLEGAL_CHARS:
- ILLEGAL_CHARS.remove(os.sep)
-ISWINDOWS = sys.platform == 'win32'
-_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
-def _check_path(path):
- illegal = ILLEGAL_CHARS[:]
- sp = path.strpath
- if ISWINDOWS:
- illegal.remove(':')
- if not _reg_allow_disk.match(sp):
- raise ValueError('path may not contain a colon (:)')
- for char in sp:
- if char not in string.printable or char in illegal:
- raise ValueError('illegal character %r in path' % (char,))
-
-def path_to_fspath(path, addat=True):
- _check_path(path)
- sp = path.strpath
- if addat and path.rev != -1:
- sp = '%s@%s' % (sp, path.rev)
- elif addat:
- sp = '%s@HEAD' % (sp,)
- return sp
-
-def url_from_path(path):
- fspath = path_to_fspath(path, False)
- quote = py.std.urllib.quote
- if ISWINDOWS:
- match = _reg_allow_disk.match(fspath)
- fspath = fspath.replace('\\', '/')
- if match.group(1):
- fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
- quote(fspath[len(match.group(1)):]))
- else:
- fspath = quote(fspath)
- else:
- fspath = quote(fspath)
- if path.rev != -1:
- fspath = '%s@%s' % (fspath, path.rev)
- else:
- fspath = '%s@HEAD' % (fspath,)
- return 'file://%s' % (fspath,)
-
-class SvnAuth(object):
- """ container for auth information for Subversion """
- def __init__(self, username, password, cache_auth=True, interactive=True):
- self.username = username
- self.password = password
- self.cache_auth = cache_auth
- self.interactive = interactive
-
- def makecmdoptions(self):
- uname = self.username.replace('"', '\\"')
- passwd = self.password.replace('"', '\\"')
- ret = []
- if uname:
- ret.append('--username="%s"' % (uname,))
- if passwd:
- ret.append('--password="%s"' % (passwd,))
- if not self.cache_auth:
- ret.append('--no-auth-cache')
- if not self.interactive:
- ret.append('--non-interactive')
- return ' '.join(ret)
-
- def __str__(self):
- return "<SvnAuth username=%s ...>" %(self.username,)
-
-rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
-
-class SvnWCCommandPath(common.PathBase):
- """ path implementation offering access/modification to svn working copies.
- It has methods similar to the functions in os.path and similar to the
- commands of the svn client.
- """
- sep = os.sep
-
- def __new__(cls, wcpath=None, auth=None):
- self = object.__new__(cls)
- if isinstance(wcpath, cls):
- if wcpath.__class__ == cls:
- return wcpath
- wcpath = wcpath.localpath
- if _check_for_bad_chars(str(wcpath),
- ALLOWED_CHARS):
- raise ValueError("bad char in wcpath %s" % (wcpath, ))
- self.localpath = py.path.local(wcpath)
- self.auth = auth
- return self
-
- strpath = property(lambda x: str(x.localpath), None, None, "string path")
- rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
-
- def __eq__(self, other):
- return self.localpath == getattr(other, 'localpath', None)
-
- def _geturl(self):
- if getattr(self, '_url', None) is None:
- info = self.info()
- self._url = info.url #SvnPath(info.url, info.rev)
- assert isinstance(self._url, py.builtin._basestring)
- return self._url
-
- url = property(_geturl, None, None, "url of this WC item")
-
- def _escape(self, cmd):
- return _escape_helper(cmd)
-
- def dump(self, obj):
- """ pickle object into path location"""
- return self.localpath.dump(obj)
-
- def svnurl(self):
- """ return current SvnPath for this WC-item. """
- info = self.info()
- return py.path.svnurl(info.url)
-
- def __repr__(self):
- return "svnwc(%r)" % (self.strpath) # , self._url)
-
- def __str__(self):
- return str(self.localpath)
-
- def _makeauthoptions(self):
- if self.auth is None:
- return ''
- return self.auth.makecmdoptions()
-
- def _authsvn(self, cmd, args=None):
- args = args and list(args) or []
- args.append(self._makeauthoptions())
- return self._svn(cmd, *args)
-
- def _svn(self, cmd, *args):
- l = ['svn %s' % cmd]
- args = [self._escape(item) for item in args]
- l.extend(args)
- l.append('"%s"' % self._escape(self.strpath))
- # try fixing the locale because we can't otherwise parse
- string = fixlocale() + " ".join(l)
- try:
- try:
- key = 'LC_MESSAGES'
- hold = os.environ.get(key)
- os.environ[key] = 'C'
- out = py.process.cmdexec(string)
- finally:
- if hold:
- os.environ[key] = hold
- else:
- del os.environ[key]
- except py.process.cmdexec.Error:
- e = sys.exc_info()[1]
- strerr = e.err.lower()
- if strerr.find('not found') != -1:
- raise py.error.ENOENT(self)
- elif strerr.find("E200009:") != -1:
- raise py.error.ENOENT(self)
- if (strerr.find('file exists') != -1 or
- strerr.find('file already exists') != -1 or
- strerr.find('w150002:') != -1 or
- strerr.find("can't create directory") != -1):
- raise py.error.EEXIST(strerr) #self)
- raise
- return out
-
- def switch(self, url):
- """ switch to given URL. """
- self._authsvn('switch', [url])
-
- def checkout(self, url=None, rev=None):
- """ checkout from url to local wcpath. """
- args = []
- if url is None:
- url = self.url
- if rev is None or rev == -1:
- if (py.std.sys.platform != 'win32' and
- _getsvnversion() == '1.3'):
- url += "@HEAD"
- else:
- if _getsvnversion() == '1.3':
- url += "@%d" % rev
- else:
- args.append('-r' + str(rev))
- args.append(url)
- self._authsvn('co', args)
-
- def update(self, rev='HEAD', interactive=True):
- """ update working copy item to given revision. (None -> HEAD). """
- opts = ['-r', rev]
- if not interactive:
- opts.append("--non-interactive")
- self._authsvn('up', opts)
-
- def write(self, content, mode='w'):
- """ write content into local filesystem wc. """
- self.localpath.write(content, mode)
-
- def dirpath(self, *args):
- """ return the directory Path of the current Path. """
- return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
-
- def _ensuredirs(self):
- parent = self.dirpath()
- if parent.check(dir=0):
- parent._ensuredirs()
- if self.check(dir=0):
- self.mkdir()
- return self
-
- def ensure(self, *args, **kwargs):
- """ ensure that an args-joined path exists (by default as
- a file). if you specify a keyword argument 'directory=True'
- then the path is forced to be a directory path.
- """
- p = self.join(*args)
- if p.check():
- if p.check(versioned=False):
- p.add()
- return p
- if kwargs.get('dir', 0):
- return p._ensuredirs()
- parent = p.dirpath()
- parent._ensuredirs()
- p.write("")
- p.add()
- return p
-
- def mkdir(self, *args):
- """ create & return the directory joined with args. """
- if args:
- return self.join(*args).mkdir()
- else:
- self._svn('mkdir')
- return self
-
- def add(self):
- """ add ourself to svn """
- self._svn('add')
-
- def remove(self, rec=1, force=1):
- """ remove a file or a directory tree. 'rec'ursive is
- ignored and considered always true (because of
- underlying svn semantics.
- """
- assert rec, "svn cannot remove non-recursively"
- if not self.check(versioned=True):
- # not added to svn (anymore?), just remove
- py.path.local(self).remove()
- return
- flags = []
- if force:
- flags.append('--force')
- self._svn('remove', *flags)
-
- def copy(self, target):
- """ copy path to target."""
- py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
-
- def rename(self, target):
- """ rename this path to target. """
- py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
-
- def lock(self):
- """ set a lock (exclusive) on the resource """
- out = self._authsvn('lock').strip()
- if not out:
- # warning or error, raise exception
- raise ValueError("unknown error in svn lock command")
-
- def unlock(self):
- """ unset a previously set lock """
- out = self._authsvn('unlock').strip()
- if out.startswith('svn:'):
- # warning or error, raise exception
- raise Exception(out[4:])
-
- def cleanup(self):
- """ remove any locks from the resource """
- # XXX should be fixed properly!!!
- try:
- self.unlock()
- except:
- pass
-
- def status(self, updates=0, rec=0, externals=0):
- """ return (collective) Status object for this file. """
- # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
- # 2201 2192 jum test
- # XXX
- if externals:
- raise ValueError("XXX cannot perform status() "
- "on external items yet")
- else:
- #1.2 supports: externals = '--ignore-externals'
- externals = ''
- if rec:
- rec= ''
- else:
- rec = '--non-recursive'
-
- # XXX does not work on all subversion versions
- #if not externals:
- # externals = '--ignore-externals'
-
- if updates:
- updates = '-u'
- else:
- updates = ''
-
- try:
- cmd = 'status -v --xml --no-ignore %s %s %s' % (
- updates, rec, externals)
- out = self._authsvn(cmd)
- except py.process.cmdexec.Error:
- cmd = 'status -v --no-ignore %s %s %s' % (
- updates, rec, externals)
- out = self._authsvn(cmd)
- rootstatus = WCStatus(self).fromstring(out, self)
- else:
- rootstatus = XMLWCStatus(self).fromstring(out, self)
- return rootstatus
-
- def diff(self, rev=None):
- """ return a diff of the current path against revision rev (defaulting
- to the last one).
- """
- args = []
- if rev is not None:
- args.append("-r %d" % rev)
- out = self._authsvn('diff', args)
- return out
-
- def blame(self):
- """ return a list of tuples of three elements:
- (revision, commiter, line)
- """
- out = self._svn('blame')
- result = []
- blamelines = out.splitlines()
- reallines = py.path.svnurl(self.url).readlines()
- for i, (blameline, line) in enumerate(
- zip(blamelines, reallines)):
- m = rex_blame.match(blameline)
- if not m:
- raise ValueError("output line %r of svn blame does not match "
- "expected format" % (line, ))
- rev, name, _ = m.groups()
- result.append((int(rev), name, line))
- return result
-
- _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
- def commit(self, msg='', rec=1):
- """ commit with support for non-recursive commits """
- # XXX i guess escaping should be done better here?!?
- cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
- if not rec:
- cmd += ' -N'
- out = self._authsvn(cmd)
- try:
- del cache.info[self]
- except KeyError:
- pass
- if out:
- m = self._rex_commit.match(out)
- return int(m.group(1))
-
- def propset(self, name, value, *args):
- """ set property name to value on this path. """
- d = py.path.local.mkdtemp()
- try:
- p = d.join('value')
- p.write(value)
- self._svn('propset', name, '--file', str(p), *args)
- finally:
- d.remove()
-
- def propget(self, name):
- """ get property name on this path. """
- res = self._svn('propget', name)
- return res[:-1] # strip trailing newline
-
- def propdel(self, name):
- """ delete property name on this path. """
- res = self._svn('propdel', name)
- return res[:-1] # strip trailing newline
-
- def proplist(self, rec=0):
- """ return a mapping of property names to property values.
-If rec is True, then return a dictionary mapping sub-paths to such mappings.
-"""
- if rec:
- res = self._svn('proplist -R')
- return make_recursive_propdict(self, res)
- else:
- res = self._svn('proplist')
- lines = res.split('\n')
- lines = [x.strip() for x in lines[1:]]
- return PropListDict(self, lines)
-
- def revert(self, rec=0):
- """ revert the local changes of this path. if rec is True, do so
-recursively. """
- if rec:
- result = self._svn('revert -R')
- else:
- result = self._svn('revert')
- return result
-
- def new(self, **kw):
- """ create a modified version of this path. A 'rev' argument
- indicates a new revision.
- the following keyword arguments modify various path parts:
-
- http://host.com/repo/path/file.ext
- |-----------------------| dirname
- |------| basename
- |--| purebasename
- |--| ext
- """
- if kw:
- localpath = self.localpath.new(**kw)
- else:
- localpath = self.localpath
- return self.__class__(localpath, auth=self.auth)
-
- def join(self, *args, **kwargs):
- """ return a new Path (with the same revision) which is composed
- of the self Path followed by 'args' path components.
- """
- if not args:
- return self
- localpath = self.localpath.join(*args, **kwargs)
- return self.__class__(localpath, auth=self.auth)
-
- def info(self, usecache=1):
- """ return an Info structure with svn-provided information. """
- info = usecache and cache.info.get(self)
- if not info:
- try:
- output = self._svn('info')
- except py.process.cmdexec.Error:
- e = sys.exc_info()[1]
- if e.err.find('Path is not a working copy directory') != -1:
- raise py.error.ENOENT(self, e.err)
- elif e.err.find("is not under version control") != -1:
- raise py.error.ENOENT(self, e.err)
- raise
- # XXX SVN 1.3 has output on stderr instead of stdout (while it does
- # return 0!), so a bit nasty, but we assume no output is output
- # to stderr...
- if (output.strip() == '' or
- output.lower().find('not a versioned resource') != -1):
- raise py.error.ENOENT(self, output)
- info = InfoSvnWCCommand(output)
-
- # Can't reliably compare on Windows without access to win32api
- if py.std.sys.platform != 'win32':
- if info.path != self.localpath:
- raise py.error.ENOENT(self, "not a versioned resource:" +
- " %s != %s" % (info.path, self.localpath))
- cache.info[self] = info
- return info
-
- def listdir(self, fil=None, sort=None):
- """ return a sequence of Paths.
-
- listdir will return either a tuple or a list of paths
- depending on implementation choices.
- """
- if isinstance(fil, str):
- fil = common.FNMatcher(fil)
- # XXX unify argument naming with LocalPath.listdir
- def notsvn(path):
- return path.basename != '.svn'
-
- paths = []
- for localpath in self.localpath.listdir(notsvn):
- p = self.__class__(localpath, auth=self.auth)
- if notsvn(p) and (not fil or fil(p)):
- paths.append(p)
- self._sortlist(paths, sort)
- return paths
-
- def open(self, mode='r'):
- """ return an opened file with the given mode. """
- return open(self.strpath, mode)
-
- def _getbyspec(self, spec):
- return self.localpath._getbyspec(spec)
-
- class Checkers(py.path.local.Checkers):
- def __init__(self, path):
- self.svnwcpath = path
- self.path = path.localpath
- def versioned(self):
- try:
- s = self.svnwcpath.info()
- except (py.error.ENOENT, py.error.EEXIST):
- return False
- except py.process.cmdexec.Error:
- e = sys.exc_info()[1]
- if e.err.find('is not a working copy')!=-1:
- return False
- if e.err.lower().find('not a versioned resource') != -1:
- return False
- raise
- else:
- return True
-
- def log(self, rev_start=None, rev_end=1, verbose=False):
- """ return a list of LogEntry instances for this path.
-rev_start is the starting revision (defaulting to the first one).
-rev_end is the last revision (defaulting to HEAD).
-if verbose is True, then the LogEntry instances also know which files changed.
-"""
- assert self.check() # make it simpler for the pipe
- rev_start = rev_start is None and "HEAD" or rev_start
- rev_end = rev_end is None and "HEAD" or rev_end
- if rev_start == "HEAD" and rev_end == 1:
- rev_opt = ""
- else:
- rev_opt = "-r %s:%s" % (rev_start, rev_end)
- verbose_opt = verbose and "-v" or ""
- locale_env = fixlocale()
- # some blather on stderr
- auth_opt = self._makeauthoptions()
- #stdin, stdout, stderr = os.popen3(locale_env +
- # 'svn log --xml %s %s %s "%s"' % (
- # rev_opt, verbose_opt, auth_opt,
- # self.strpath))
- cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
- rev_opt, verbose_opt, auth_opt, self.strpath)
-
- popen = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- )
- stdout, stderr = popen.communicate()
- stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
- minidom,ExpatError = importxml()
- try:
- tree = minidom.parseString(stdout)
- except ExpatError:
- raise ValueError('no such revision')
- result = []
- for logentry in filter(None, tree.firstChild.childNodes):
- if logentry.nodeType == logentry.ELEMENT_NODE:
- result.append(LogEntry(logentry))
- return result
-
- def size(self):
- """ Return the size of the file content of the Path. """
- return self.info().size
-
- def mtime(self):
- """ Return the last modification time of the file. """
- return self.info().mtime
-
- def __hash__(self):
- return hash((self.strpath, self.__class__, self.auth))
-
-
-class WCStatus:
- attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
- 'deleted', 'prop_modified', 'unknown', 'update_available',
- 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
- )
-
- def __init__(self, wcpath, rev=None, modrev=None, author=None):
- self.wcpath = wcpath
- self.rev = rev
- self.modrev = modrev
- self.author = author
-
- for name in self.attrnames:
- setattr(self, name, [])
-
- def allpath(self, sort=True, **kw):
- d = {}
- for name in self.attrnames:
- if name not in kw or kw[name]:
- for path in getattr(self, name):
- d[path] = 1
- l = d.keys()
- if sort:
- l.sort()
- return l
-
- # XXX a bit scary to assume there's always 2 spaces between username and
- # path, however with win32 allowing spaces in user names there doesn't
- # seem to be a more solid approach :(
- _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
-
- def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
- """ return a new WCStatus object from data 's'
- """
- rootstatus = WCStatus(rootwcpath, rev, modrev, author)
- update_rev = None
- for line in data.split('\n'):
- if not line.strip():
- continue
- #print "processing %r" % line
- flags, rest = line[:8], line[8:]
- # first column
- c0,c1,c2,c3,c4,c5,x6,c7 = flags
- #if '*' in line:
- # print "flags", repr(flags), "rest", repr(rest)
-
- if c0 in '?XI':
- fn = line.split(None, 1)[1]
- if c0 == '?':
- wcpath = rootwcpath.join(fn, abs=1)
- rootstatus.unknown.append(wcpath)
- elif c0 == 'X':
- wcpath = rootwcpath.__class__(
- rootwcpath.localpath.join(fn, abs=1),
- auth=rootwcpath.auth)
- rootstatus.external.append(wcpath)
- elif c0 == 'I':
- wcpath = rootwcpath.join(fn, abs=1)
- rootstatus.ignored.append(wcpath)
-
- continue
-
- #elif c0 in '~!' or c4 == 'S':
- # raise NotImplementedError("received flag %r" % c0)
-
- m = WCStatus._rex_status.match(rest)
- if not m:
- if c7 == '*':
- fn = rest.strip()
- wcpath = rootwcpath.join(fn, abs=1)
- rootstatus.update_available.append(wcpath)
- continue
- if line.lower().find('against revision:')!=-1:
- update_rev = int(rest.split(':')[1].strip())
- continue
- if line.lower().find('status on external') > -1:
- # XXX not sure what to do here... perhaps we want to
- # store some state instead of just continuing, as right
- # now it makes the top-level external get added twice
- # (once as external, once as 'normal' unchanged item)
- # because of the way SVN presents external items
- continue
- # keep trying
- raise ValueError("could not parse line %r" % line)
- else:
- rev, modrev, author, fn = m.groups()
- wcpath = rootwcpath.join(fn, abs=1)
- #assert wcpath.check()
- if c0 == 'M':
- assert wcpath.check(file=1), "didn't expect a directory with changed content here"
- rootstatus.modified.append(wcpath)
- elif c0 == 'A' or c3 == '+' :
- rootstatus.added.append(wcpath)
- elif c0 == 'D':
- rootstatus.deleted.append(wcpath)
- elif c0 == 'C':
- rootstatus.conflict.append(wcpath)
- elif c0 == '~':
- rootstatus.kindmismatch.append(wcpath)
- elif c0 == '!':
- rootstatus.incomplete.append(wcpath)
- elif c0 == 'R':
- rootstatus.replaced.append(wcpath)
- elif not c0.strip():
- rootstatus.unchanged.append(wcpath)
- else:
- raise NotImplementedError("received flag %r" % c0)
-
- if c1 == 'M':
- rootstatus.prop_modified.append(wcpath)
- # XXX do we cover all client versions here?
- if c2 == 'L' or c5 == 'K':
- rootstatus.locked.append(wcpath)
- if c7 == '*':
- rootstatus.update_available.append(wcpath)
-
- if wcpath == rootwcpath:
- rootstatus.rev = rev
- rootstatus.modrev = modrev
- rootstatus.author = author
- if update_rev:
- rootstatus.update_rev = update_rev
- continue
- return rootstatus
- fromstring = staticmethod(fromstring)
-
-class XMLWCStatus(WCStatus):
- def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
- """ parse 'data' (XML string as outputted by svn st) into a status obj
- """
- # XXX for externals, the path is shown twice: once
- # with external information, and once with full info as if
- # the item was a normal non-external... the current way of
- # dealing with this issue is by ignoring it - this does make
- # externals appear as external items as well as 'normal',
- # unchanged ones in the status object so this is far from ideal
- rootstatus = WCStatus(rootwcpath, rev, modrev, author)
- update_rev = None
- minidom, ExpatError = importxml()
- try:
- doc = minidom.parseString(data)
- except ExpatError:
- e = sys.exc_info()[1]
- raise ValueError(str(e))
- urevels = doc.getElementsByTagName('against')
- if urevels:
- rootstatus.update_rev = urevels[-1].getAttribute('revision')
- for entryel in doc.getElementsByTagName('entry'):
- path = entryel.getAttribute('path')
- statusel = entryel.getElementsByTagName('wc-status')[0]
- itemstatus = statusel.getAttribute('item')
-
- if itemstatus == 'unversioned':
- wcpath = rootwcpath.join(path, abs=1)
- rootstatus.unknown.append(wcpath)
- continue
- elif itemstatus == 'external':
- wcpath = rootwcpath.__class__(
- rootwcpath.localpath.join(path, abs=1),
- auth=rootwcpath.auth)
- rootstatus.external.append(wcpath)
- continue
- elif itemstatus == 'ignored':
- wcpath = rootwcpath.join(path, abs=1)
- rootstatus.ignored.append(wcpath)
- continue
- elif itemstatus == 'incomplete':
- wcpath = rootwcpath.join(path, abs=1)
- rootstatus.incomplete.append(wcpath)
- continue
-
- rev = statusel.getAttribute('revision')
- if itemstatus == 'added' or itemstatus == 'none':
- rev = '0'
- modrev = '?'
- author = '?'
- date = ''
- elif itemstatus == "replaced":
- pass
- else:
- #print entryel.toxml()
- commitel = entryel.getElementsByTagName('commit')[0]
- if commitel:
- modrev = commitel.getAttribute('revision')
- author = ''
- author_els = commitel.getElementsByTagName('author')
- if author_els:
- for c in author_els[0].childNodes:
- author += c.nodeValue
- date = ''
- for c in commitel.getElementsByTagName('date')[0]\
- .childNodes:
- date += c.nodeValue
-
- wcpath = rootwcpath.join(path, abs=1)
-
- assert itemstatus != 'modified' or wcpath.check(file=1), (
- 'did\'t expect a directory with changed content here')
-
- itemattrname = {
- 'normal': 'unchanged',
- 'unversioned': 'unknown',
- 'conflicted': 'conflict',
- 'none': 'added',
- }.get(itemstatus, itemstatus)
-
- attr = getattr(rootstatus, itemattrname)
- attr.append(wcpath)
-
- propsstatus = statusel.getAttribute('props')
- if propsstatus not in ('none', 'normal'):
- rootstatus.prop_modified.append(wcpath)
-
- if wcpath == rootwcpath:
- rootstatus.rev = rev
- rootstatus.modrev = modrev
- rootstatus.author = author
- rootstatus.date = date
-
- # handle repos-status element (remote info)
- rstatusels = entryel.getElementsByTagName('repos-status')
- if rstatusels:
- rstatusel = rstatusels[0]
- ritemstatus = rstatusel.getAttribute('item')
- if ritemstatus in ('added', 'modified'):
- rootstatus.update_available.append(wcpath)
-
- lockels = entryel.getElementsByTagName('lock')
- if len(lockels):
- rootstatus.locked.append(wcpath)
-
- return rootstatus
- fromstring = staticmethod(fromstring)
-
-class InfoSvnWCCommand:
- def __init__(self, output):
- # Path: test
- # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
- # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
- # Revision: 2151
- # Node Kind: directory
- # Schedule: normal
- # Last Changed Author: hpk
- # Last Changed Rev: 2100
- # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
- # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
-
- d = {}
- for line in output.split('\n'):
- if not line.strip():
- continue
- key, value = line.split(':', 1)
- key = key.lower().replace(' ', '')
- value = value.strip()
- d[key] = value
- try:
- self.url = d['url']
- except KeyError:
- raise ValueError("Not a versioned resource")
- #raise ValueError, "Not a versioned resource %r" % path
- self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
- try:
- self.rev = int(d['revision'])
- except KeyError:
- self.rev = None
-
- self.path = py.path.local(d['path'])
- self.size = self.path.size()
- if 'lastchangedrev' in d:
- self.created_rev = int(d['lastchangedrev'])
- if 'lastchangedauthor' in d:
- self.last_author = d['lastchangedauthor']
- if 'lastchangeddate' in d:
- self.mtime = parse_wcinfotime(d['lastchangeddate'])
- self.time = self.mtime * 1000000
-
- def __eq__(self, other):
- return self.__dict__ == other.__dict__
-
-def parse_wcinfotime(timestr):
- """ Returns seconds since epoch, UTC. """
- # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
- m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
- if not m:
- raise ValueError("timestring %r does not match" % timestr)
- timestr, timezone = m.groups()
- # do not handle timezone specially, return value should be UTC
- parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
- return calendar.timegm(parsedtime)
-
-def make_recursive_propdict(wcroot,
- output,
- rex = re.compile("Properties on '(.*)':")):
- """ Return a dictionary of path->PropListDict mappings. """
- lines = [x for x in output.split('\n') if x]
- pdict = {}
- while lines:
- line = lines.pop(0)
- m = rex.match(line)
- if not m:
- raise ValueError("could not parse propget-line: %r" % line)
- path = m.groups()[0]
- wcpath = wcroot.join(path, abs=1)
- propnames = []
- while lines and lines[0].startswith(' '):
- propname = lines.pop(0).strip()
- propnames.append(propname)
- assert propnames, "must have found properties!"
- pdict[wcpath] = PropListDict(wcpath, propnames)
- return pdict
-
-
-def importxml(cache=[]):
- if cache:
- return cache
- from xml.dom import minidom
- from xml.parsers.expat import ExpatError
- cache.extend([minidom, ExpatError])
- return cache
-
-class LogEntry:
- def __init__(self, logentry):
- self.rev = int(logentry.getAttribute('revision'))
- for lpart in filter(None, logentry.childNodes):
- if lpart.nodeType == lpart.ELEMENT_NODE:
- if lpart.nodeName == 'author':
- self.author = lpart.firstChild.nodeValue
- elif lpart.nodeName == 'msg':
- if lpart.firstChild:
- self.msg = lpart.firstChild.nodeValue
- else:
- self.msg = ''
- elif lpart.nodeName == 'date':
- #2003-07-29T20:05:11.598637Z
- timestr = lpart.firstChild.nodeValue
- self.date = parse_apr_time(timestr)
- elif lpart.nodeName == 'paths':
- self.strpaths = []
- for ppart in filter(None, lpart.childNodes):
- if ppart.nodeType == ppart.ELEMENT_NODE:
- self.strpaths.append(PathEntry(ppart))
- def __repr__(self):
- return '<Logentry rev=%d author=%s date=%s>' % (
- self.rev, self.author, self.date)
-
-
+"""
+svn-Command based Implementation of a Subversion WorkingCopy Path.
+
+ SvnWCCommandPath is the main class.
+
+"""
+
+import os, sys, time, re, calendar
+import py
+import subprocess
+from py._path import common
+
+#-----------------------------------------------------------
+# Caching latest repository revision and repo-paths
+# (getting them is slow with the current implementations)
+#
+# XXX make mt-safe
+#-----------------------------------------------------------
+
+class cache:
+ proplist = {}
+ info = {}
+ entries = {}
+ prop = {}
+
+class RepoEntry:
+ def __init__(self, url, rev, timestamp):
+ self.url = url
+ self.rev = rev
+ self.timestamp = timestamp
+
+ def __str__(self):
+ return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp)
+
+class RepoCache:
+ """ The Repocache manages discovered repository paths
+ and their revisions. If inside a timeout the cache
+ will even return the revision of the root.
+ """
+ timeout = 20 # seconds after which we forget that we know the last revision
+
+ def __init__(self):
+ self.repos = []
+
+ def clear(self):
+ self.repos = []
+
+ def put(self, url, rev, timestamp=None):
+ if rev is None:
+ return
+ if timestamp is None:
+ timestamp = time.time()
+
+ for entry in self.repos:
+ if url == entry.url:
+ entry.timestamp = timestamp
+ entry.rev = rev
+ #print "set repo", entry
+ break
+ else:
+ entry = RepoEntry(url, rev, timestamp)
+ self.repos.append(entry)
+ #print "appended repo", entry
+
+ def get(self, url):
+ now = time.time()
+ for entry in self.repos:
+ if url.startswith(entry.url):
+ if now < entry.timestamp + self.timeout:
+ #print "returning immediate Etrny", entry
+ return entry.url, entry.rev
+ return entry.url, -1
+ return url, -1
+
+repositories = RepoCache()
+
+
+# svn support code
+
+ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
+if sys.platform == "win32":
+ ALLOWED_CHARS += ":"
+ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
+
+def _getsvnversion(ver=[]):
+ try:
+ return ver[0]
+ except IndexError:
+ v = py.process.cmdexec("svn -q --version")
+ v.strip()
+ v = '.'.join(v.split('.')[:2])
+ ver.append(v)
+ return v
+
+def _escape_helper(text):
+ text = str(text)
+ if py.std.sys.platform != 'win32':
+ text = str(text).replace('$', '\\$')
+ return text
+
+def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
+ for c in str(text):
+ if c.isalnum():
+ continue
+ if c in allowed_chars:
+ continue
+ return True
+ return False
+
+def checkbadchars(url):
+ # (hpk) not quite sure about the exact purpose, guido w.?
+ proto, uri = url.split("://", 1)
+ if proto != "file":
+ host, uripath = uri.split('/', 1)
+ # only check for bad chars in the non-protocol parts
+ if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
+ or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
+ raise ValueError("bad char in %r" % (url, ))
+
+
+#_______________________________________________________________
+
+class SvnPathBase(common.PathBase):
+ """ Base implementation for SvnPath implementations. """
+ sep = '/'
+
+ def _geturl(self):
+ return self.strpath
+ url = property(_geturl, None, None, "url of this svn-path.")
+
+ def __str__(self):
+ """ return a string representation (including rev-number) """
+ return self.strpath
+
+ def __hash__(self):
+ return hash(self.strpath)
+
+ def new(self, **kw):
+ """ create a modified version of this path. A 'rev' argument
+ indicates a new revision.
+ the following keyword arguments modify various path parts::
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ obj = object.__new__(self.__class__)
+ obj.rev = kw.get('rev', self.rev)
+ obj.auth = kw.get('auth', self.auth)
+ dirname, basename, purebasename, ext = self._getbyspec(
+ "dirname,basename,purebasename,ext")
+ if 'basename' in kw:
+ if 'purebasename' in kw or 'ext' in kw:
+ raise ValueError("invalid specification %r" % kw)
+ else:
+ pb = kw.setdefault('purebasename', purebasename)
+ ext = kw.setdefault('ext', ext)
+ if ext and not ext.startswith('.'):
+ ext = '.' + ext
+ kw['basename'] = pb + ext
+
+ kw.setdefault('dirname', dirname)
+ kw.setdefault('sep', self.sep)
+ if kw['basename']:
+ obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
+ else:
+ obj.strpath = "%(dirname)s" % kw
+ return obj
+
+ def _getbyspec(self, spec):
+ """ get specified parts of the path. 'arg' is a string
+ with comma separated path parts. The parts are returned
+ in exactly the order of the specification.
+
+ you may specify the following parts:
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ res = []
+ parts = self.strpath.split(self.sep)
+ for name in spec.split(','):
+ name = name.strip()
+ if name == 'dirname':
+ res.append(self.sep.join(parts[:-1]))
+ elif name == 'basename':
+ res.append(parts[-1])
+ else:
+ basename = parts[-1]
+ i = basename.rfind('.')
+ if i == -1:
+ purebasename, ext = basename, ''
+ else:
+ purebasename, ext = basename[:i], basename[i:]
+ if name == 'purebasename':
+ res.append(purebasename)
+ elif name == 'ext':
+ res.append(ext)
+ else:
+ raise NameError("Don't know part %r" % name)
+ return res
+
+ def __eq__(self, other):
+ """ return true if path and rev attributes each match """
+ return (str(self) == str(other) and
+ (self.rev == other.rev or self.rev == other.rev))
+
+ def __ne__(self, other):
+ return not self == other
+
+ def join(self, *args):
+ """ return a new Path (with the same revision) which is composed
+ of the self Path followed by 'args' path components.
+ """
+ if not args:
+ return self
+
+ args = tuple([arg.strip(self.sep) for arg in args])
+ parts = (self.strpath, ) + args
+ newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
+ return newpath
+
+ def propget(self, name):
+ """ return the content of the given property. """
+ value = self._propget(name)
+ return value
+
+ def proplist(self):
+ """ list all property names. """
+ content = self._proplist()
+ return content
+
+ def size(self):
+ """ Return the size of the file content of the Path. """
+ return self.info().size
+
+ def mtime(self):
+ """ Return the last modification time of the file. """
+ return self.info().mtime
+
+ # shared help methods
+
+ def _escape(self, cmd):
+ return _escape_helper(cmd)
+
+
+ #def _childmaxrev(self):
+ # """ return maximum revision number of childs (or self.rev if no childs) """
+ # rev = self.rev
+ # for name, info in self._listdir_nameinfo():
+ # rev = max(rev, info.created_rev)
+ # return rev
+
+ #def _getlatestrevision(self):
+ # """ return latest repo-revision for this path. """
+ # url = self.strpath
+ # path = self.__class__(url, None)
+ #
+ # # we need a long walk to find the root-repo and revision
+ # while 1:
+ # try:
+ # rev = max(rev, path._childmaxrev())
+ # previous = path
+ # path = path.dirpath()
+ # except (IOError, process.cmdexec.Error):
+ # break
+ # if rev is None:
+ # raise IOError, "could not determine newest repo revision for %s" % self
+ # return rev
+
+ class Checkers(common.Checkers):
+ def dir(self):
+ try:
+ return self.path.info().kind == 'dir'
+ except py.error.Error:
+ return self._listdirworks()
+
+ def _listdirworks(self):
+ try:
+ self.path.listdir()
+ except py.error.ENOENT:
+ return False
+ else:
+ return True
+
+ def file(self):
+ try:
+ return self.path.info().kind == 'file'
+ except py.error.ENOENT:
+ return False
+
+ def exists(self):
+ try:
+ return self.path.info()
+ except py.error.ENOENT:
+ return self._listdirworks()
+
+def parse_apr_time(timestr):
+ i = timestr.rfind('.')
+ if i == -1:
+ raise ValueError("could not parse %s" % timestr)
+ timestr = timestr[:i]
+ parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
+ return time.mktime(parsedtime)
+
+class PropListDict(dict):
+ """ a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
+ def __init__(self, path, keynames):
+ dict.__init__(self, [(x, None) for x in keynames])
+ self.path = path
+
+ def __getitem__(self, key):
+ value = dict.__getitem__(self, key)
+ if value is None:
+ value = self.path.propget(key)
+ dict.__setitem__(self, key, value)
+ return value
+
+def fixlocale():
+ if sys.platform != 'win32':
+ return 'LC_ALL=C '
+ return ''
+
+# some nasty chunk of code to solve path and url conversion and quoting issues
+ILLEGAL_CHARS = '* | \\ / : < > ? \t \n \x0b \x0c \r'.split(' ')
+if os.sep in ILLEGAL_CHARS:
+ ILLEGAL_CHARS.remove(os.sep)
+ISWINDOWS = sys.platform == 'win32'
+_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
+def _check_path(path):
+ illegal = ILLEGAL_CHARS[:]
+ sp = path.strpath
+ if ISWINDOWS:
+ illegal.remove(':')
+ if not _reg_allow_disk.match(sp):
+ raise ValueError('path may not contain a colon (:)')
+ for char in sp:
+ if char not in string.printable or char in illegal:
+ raise ValueError('illegal character %r in path' % (char,))
+
+def path_to_fspath(path, addat=True):
+ _check_path(path)
+ sp = path.strpath
+ if addat and path.rev != -1:
+ sp = '%s@%s' % (sp, path.rev)
+ elif addat:
+ sp = '%s@HEAD' % (sp,)
+ return sp
+
+def url_from_path(path):
+ fspath = path_to_fspath(path, False)
+ quote = py.std.urllib.quote
+ if ISWINDOWS:
+ match = _reg_allow_disk.match(fspath)
+ fspath = fspath.replace('\\', '/')
+ if match.group(1):
+ fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
+ quote(fspath[len(match.group(1)):]))
+ else:
+ fspath = quote(fspath)
+ else:
+ fspath = quote(fspath)
+ if path.rev != -1:
+ fspath = '%s@%s' % (fspath, path.rev)
+ else:
+ fspath = '%s@HEAD' % (fspath,)
+ return 'file://%s' % (fspath,)
+
+class SvnAuth(object):
+ """ container for auth information for Subversion """
+ def __init__(self, username, password, cache_auth=True, interactive=True):
+ self.username = username
+ self.password = password
+ self.cache_auth = cache_auth
+ self.interactive = interactive
+
+ def makecmdoptions(self):
+ uname = self.username.replace('"', '\\"')
+ passwd = self.password.replace('"', '\\"')
+ ret = []
+ if uname:
+ ret.append('--username="%s"' % (uname,))
+ if passwd:
+ ret.append('--password="%s"' % (passwd,))
+ if not self.cache_auth:
+ ret.append('--no-auth-cache')
+ if not self.interactive:
+ ret.append('--non-interactive')
+ return ' '.join(ret)
+
+ def __str__(self):
+ return "<SvnAuth username=%s ...>" %(self.username,)
+
+rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
+
+class SvnWCCommandPath(common.PathBase):
+ """ path implementation offering access/modification to svn working copies.
+ It has methods similar to the functions in os.path and similar to the
+ commands of the svn client.
+ """
+ sep = os.sep
+
+ def __new__(cls, wcpath=None, auth=None):
+ self = object.__new__(cls)
+ if isinstance(wcpath, cls):
+ if wcpath.__class__ == cls:
+ return wcpath
+ wcpath = wcpath.localpath
+ if _check_for_bad_chars(str(wcpath),
+ ALLOWED_CHARS):
+ raise ValueError("bad char in wcpath %s" % (wcpath, ))
+ self.localpath = py.path.local(wcpath)
+ self.auth = auth
+ return self
+
+ strpath = property(lambda x: str(x.localpath), None, None, "string path")
+ rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
+
+ def __eq__(self, other):
+ return self.localpath == getattr(other, 'localpath', None)
+
+ def _geturl(self):
+ if getattr(self, '_url', None) is None:
+ info = self.info()
+ self._url = info.url #SvnPath(info.url, info.rev)
+ assert isinstance(self._url, py.builtin._basestring)
+ return self._url
+
+ url = property(_geturl, None, None, "url of this WC item")
+
+ def _escape(self, cmd):
+ return _escape_helper(cmd)
+
+ def dump(self, obj):
+ """ pickle object into path location"""
+ return self.localpath.dump(obj)
+
+ def svnurl(self):
+ """ return current SvnPath for this WC-item. """
+ info = self.info()
+ return py.path.svnurl(info.url)
+
+ def __repr__(self):
+ return "svnwc(%r)" % (self.strpath) # , self._url)
+
+ def __str__(self):
+ return str(self.localpath)
+
+ def _makeauthoptions(self):
+ if self.auth is None:
+ return ''
+ return self.auth.makecmdoptions()
+
+ def _authsvn(self, cmd, args=None):
+ args = args and list(args) or []
+ args.append(self._makeauthoptions())
+ return self._svn(cmd, *args)
+
+ def _svn(self, cmd, *args):
+ l = ['svn %s' % cmd]
+ args = [self._escape(item) for item in args]
+ l.extend(args)
+ l.append('"%s"' % self._escape(self.strpath))
+ # try fixing the locale because we can't otherwise parse
+ string = fixlocale() + " ".join(l)
+ try:
+ try:
+ key = 'LC_MESSAGES'
+ hold = os.environ.get(key)
+ os.environ[key] = 'C'
+ out = py.process.cmdexec(string)
+ finally:
+ if hold:
+ os.environ[key] = hold
+ else:
+ del os.environ[key]
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ strerr = e.err.lower()
+ if strerr.find('not found') != -1:
+ raise py.error.ENOENT(self)
+ elif strerr.find("E200009:") != -1:
+ raise py.error.ENOENT(self)
+ if (strerr.find('file exists') != -1 or
+ strerr.find('file already exists') != -1 or
+ strerr.find('w150002:') != -1 or
+ strerr.find("can't create directory") != -1):
+ raise py.error.EEXIST(strerr) #self)
+ raise
+ return out
+
+ def switch(self, url):
+ """ switch to given URL. """
+ self._authsvn('switch', [url])
+
+ def checkout(self, url=None, rev=None):
+ """ checkout from url to local wcpath. """
+ args = []
+ if url is None:
+ url = self.url
+ if rev is None or rev == -1:
+ if (py.std.sys.platform != 'win32' and
+ _getsvnversion() == '1.3'):
+ url += "@HEAD"
+ else:
+ if _getsvnversion() == '1.3':
+ url += "@%d" % rev
+ else:
+ args.append('-r' + str(rev))
+ args.append(url)
+ self._authsvn('co', args)
+
+ def update(self, rev='HEAD', interactive=True):
+ """ update working copy item to given revision. (None -> HEAD). """
+ opts = ['-r', rev]
+ if not interactive:
+ opts.append("--non-interactive")
+ self._authsvn('up', opts)
+
+ def write(self, content, mode='w'):
+ """ write content into local filesystem wc. """
+ self.localpath.write(content, mode)
+
+ def dirpath(self, *args):
+ """ return the directory Path of the current Path. """
+ return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
+
+ def _ensuredirs(self):
+ parent = self.dirpath()
+ if parent.check(dir=0):
+ parent._ensuredirs()
+ if self.check(dir=0):
+ self.mkdir()
+ return self
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). if you specify a keyword argument 'directory=True'
+ then the path is forced to be a directory path.
+ """
+ p = self.join(*args)
+ if p.check():
+ if p.check(versioned=False):
+ p.add()
+ return p
+ if kwargs.get('dir', 0):
+ return p._ensuredirs()
+ parent = p.dirpath()
+ parent._ensuredirs()
+ p.write("")
+ p.add()
+ return p
+
+ def mkdir(self, *args):
+ """ create & return the directory joined with args. """
+ if args:
+ return self.join(*args).mkdir()
+ else:
+ self._svn('mkdir')
+ return self
+
+ def add(self):
+ """ add ourself to svn """
+ self._svn('add')
+
+ def remove(self, rec=1, force=1):
+ """ remove a file or a directory tree. 'rec'ursive is
+ ignored and considered always true (because of
+ underlying svn semantics.
+ """
+ assert rec, "svn cannot remove non-recursively"
+ if not self.check(versioned=True):
+ # not added to svn (anymore?), just remove
+ py.path.local(self).remove()
+ return
+ flags = []
+ if force:
+ flags.append('--force')
+ self._svn('remove', *flags)
+
+ def copy(self, target):
+ """ copy path to target."""
+ py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
+
+ def rename(self, target):
+ """ rename this path to target. """
+ py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
+
+ def lock(self):
+ """ set a lock (exclusive) on the resource """
+ out = self._authsvn('lock').strip()
+ if not out:
+ # warning or error, raise exception
+ raise ValueError("unknown error in svn lock command")
+
+ def unlock(self):
+ """ unset a previously set lock """
+ out = self._authsvn('unlock').strip()
+ if out.startswith('svn:'):
+ # warning or error, raise exception
+ raise Exception(out[4:])
+
+ def cleanup(self):
+ """ remove any locks from the resource """
+ # XXX should be fixed properly!!!
+ try:
+ self.unlock()
+ except:
+ pass
+
+ def status(self, updates=0, rec=0, externals=0):
+ """ return (collective) Status object for this file. """
+ # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
+ # 2201 2192 jum test
+ # XXX
+ if externals:
+ raise ValueError("XXX cannot perform status() "
+ "on external items yet")
+ else:
+ #1.2 supports: externals = '--ignore-externals'
+ externals = ''
+ if rec:
+ rec= ''
+ else:
+ rec = '--non-recursive'
+
+ # XXX does not work on all subversion versions
+ #if not externals:
+ # externals = '--ignore-externals'
+
+ if updates:
+ updates = '-u'
+ else:
+ updates = ''
+
+ try:
+ cmd = 'status -v --xml --no-ignore %s %s %s' % (
+ updates, rec, externals)
+ out = self._authsvn(cmd)
+ except py.process.cmdexec.Error:
+ cmd = 'status -v --no-ignore %s %s %s' % (
+ updates, rec, externals)
+ out = self._authsvn(cmd)
+ rootstatus = WCStatus(self).fromstring(out, self)
+ else:
+ rootstatus = XMLWCStatus(self).fromstring(out, self)
+ return rootstatus
+
+ def diff(self, rev=None):
+ """ return a diff of the current path against revision rev (defaulting
+ to the last one).
+ """
+ args = []
+ if rev is not None:
+ args.append("-r %d" % rev)
+ out = self._authsvn('diff', args)
+ return out
+
+ def blame(self):
+ """ return a list of tuples of three elements:
+ (revision, commiter, line)
+ """
+ out = self._svn('blame')
+ result = []
+ blamelines = out.splitlines()
+ reallines = py.path.svnurl(self.url).readlines()
+ for i, (blameline, line) in enumerate(
+ zip(blamelines, reallines)):
+ m = rex_blame.match(blameline)
+ if not m:
+ raise ValueError("output line %r of svn blame does not match "
+ "expected format" % (line, ))
+ rev, name, _ = m.groups()
+ result.append((int(rev), name, line))
+ return result
+
+ _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
+ def commit(self, msg='', rec=1):
+ """ commit with support for non-recursive commits """
+ # XXX i guess escaping should be done better here?!?
+ cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
+ if not rec:
+ cmd += ' -N'
+ out = self._authsvn(cmd)
+ try:
+ del cache.info[self]
+ except KeyError:
+ pass
+ if out:
+ m = self._rex_commit.match(out)
+ return int(m.group(1))
+
+ def propset(self, name, value, *args):
+ """ set property name to value on this path. """
+ d = py.path.local.mkdtemp()
+ try:
+ p = d.join('value')
+ p.write(value)
+ self._svn('propset', name, '--file', str(p), *args)
+ finally:
+ d.remove()
+
+ def propget(self, name):
+ """ get property name on this path. """
+ res = self._svn('propget', name)
+ return res[:-1] # strip trailing newline
+
+ def propdel(self, name):
+ """ delete property name on this path. """
+ res = self._svn('propdel', name)
+ return res[:-1] # strip trailing newline
+
+ def proplist(self, rec=0):
+ """ return a mapping of property names to property values.
+If rec is True, then return a dictionary mapping sub-paths to such mappings.
+"""
+ if rec:
+ res = self._svn('proplist -R')
+ return make_recursive_propdict(self, res)
+ else:
+ res = self._svn('proplist')
+ lines = res.split('\n')
+ lines = [x.strip() for x in lines[1:]]
+ return PropListDict(self, lines)
+
+ def revert(self, rec=0):
+ """ revert the local changes of this path. if rec is True, do so
+recursively. """
+ if rec:
+ result = self._svn('revert -R')
+ else:
+ result = self._svn('revert')
+ return result
+
+ def new(self, **kw):
+ """ create a modified version of this path. A 'rev' argument
+ indicates a new revision.
+ the following keyword arguments modify various path parts:
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ if kw:
+ localpath = self.localpath.new(**kw)
+ else:
+ localpath = self.localpath
+ return self.__class__(localpath, auth=self.auth)
+
+ def join(self, *args, **kwargs):
+ """ return a new Path (with the same revision) which is composed
+ of the self Path followed by 'args' path components.
+ """
+ if not args:
+ return self
+ localpath = self.localpath.join(*args, **kwargs)
+ return self.__class__(localpath, auth=self.auth)
+
+ def info(self, usecache=1):
+ """ return an Info structure with svn-provided information. """
+ info = usecache and cache.info.get(self)
+ if not info:
+ try:
+ output = self._svn('info')
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('Path is not a working copy directory') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find("is not under version control") != -1:
+ raise py.error.ENOENT(self, e.err)
+ raise
+ # XXX SVN 1.3 has output on stderr instead of stdout (while it does
+ # return 0!), so a bit nasty, but we assume no output is output
+ # to stderr...
+ if (output.strip() == '' or
+ output.lower().find('not a versioned resource') != -1):
+ raise py.error.ENOENT(self, output)
+ info = InfoSvnWCCommand(output)
+
+ # Can't reliably compare on Windows without access to win32api
+ if py.std.sys.platform != 'win32':
+ if info.path != self.localpath:
+ raise py.error.ENOENT(self, "not a versioned resource:" +
+ " %s != %s" % (info.path, self.localpath))
+ cache.info[self] = info
+ return info
+
+ def listdir(self, fil=None, sort=None):
+ """ return a sequence of Paths.
+
+ listdir will return either a tuple or a list of paths
+ depending on implementation choices.
+ """
+ if isinstance(fil, str):
+ fil = common.FNMatcher(fil)
+ # XXX unify argument naming with LocalPath.listdir
+ def notsvn(path):
+ return path.basename != '.svn'
+
+ paths = []
+ for localpath in self.localpath.listdir(notsvn):
+ p = self.__class__(localpath, auth=self.auth)
+ if notsvn(p) and (not fil or fil(p)):
+ paths.append(p)
+ self._sortlist(paths, sort)
+ return paths
+
+ def open(self, mode='r'):
+ """ return an opened file with the given mode. """
+ return open(self.strpath, mode)
+
+ def _getbyspec(self, spec):
+ return self.localpath._getbyspec(spec)
+
+ class Checkers(py.path.local.Checkers):
+ def __init__(self, path):
+ self.svnwcpath = path
+ self.path = path.localpath
+ def versioned(self):
+ try:
+ s = self.svnwcpath.info()
+ except (py.error.ENOENT, py.error.EEXIST):
+ return False
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('is not a working copy')!=-1:
+ return False
+ if e.err.lower().find('not a versioned resource') != -1:
+ return False
+ raise
+ else:
+ return True
+
+ def log(self, rev_start=None, rev_end=1, verbose=False):
+ """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+ assert self.check() # make it simpler for the pipe
+ rev_start = rev_start is None and "HEAD" or rev_start
+ rev_end = rev_end is None and "HEAD" or rev_end
+ if rev_start == "HEAD" and rev_end == 1:
+ rev_opt = ""
+ else:
+ rev_opt = "-r %s:%s" % (rev_start, rev_end)
+ verbose_opt = verbose and "-v" or ""
+ locale_env = fixlocale()
+ # some blather on stderr
+ auth_opt = self._makeauthoptions()
+ #stdin, stdout, stderr = os.popen3(locale_env +
+ # 'svn log --xml %s %s %s "%s"' % (
+ # rev_opt, verbose_opt, auth_opt,
+ # self.strpath))
+ cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
+ rev_opt, verbose_opt, auth_opt, self.strpath)
+
+ popen = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ )
+ stdout, stderr = popen.communicate()
+ stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+ minidom,ExpatError = importxml()
+ try:
+ tree = minidom.parseString(stdout)
+ except ExpatError:
+ raise ValueError('no such revision')
+ result = []
+ for logentry in filter(None, tree.firstChild.childNodes):
+ if logentry.nodeType == logentry.ELEMENT_NODE:
+ result.append(LogEntry(logentry))
+ return result
+
+ def size(self):
+ """ Return the size of the file content of the Path. """
+ return self.info().size
+
+ def mtime(self):
+ """ Return the last modification time of the file. """
+ return self.info().mtime
+
+ def __hash__(self):
+ return hash((self.strpath, self.__class__, self.auth))
+
+
+class WCStatus:
+ attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
+ 'deleted', 'prop_modified', 'unknown', 'update_available',
+ 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
+ )
+
+ def __init__(self, wcpath, rev=None, modrev=None, author=None):
+ self.wcpath = wcpath
+ self.rev = rev
+ self.modrev = modrev
+ self.author = author
+
+ for name in self.attrnames:
+ setattr(self, name, [])
+
+ def allpath(self, sort=True, **kw):
+ d = {}
+ for name in self.attrnames:
+ if name not in kw or kw[name]:
+ for path in getattr(self, name):
+ d[path] = 1
+ l = d.keys()
+ if sort:
+ l.sort()
+ return l
+
+ # XXX a bit scary to assume there's always 2 spaces between username and
+ # path, however with win32 allowing spaces in user names there doesn't
+ # seem to be a more solid approach :(
+ _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
+
+ def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+ """ return a new WCStatus object from data 's'
+ """
+ rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+ update_rev = None
+ for line in data.split('\n'):
+ if not line.strip():
+ continue
+ #print "processing %r" % line
+ flags, rest = line[:8], line[8:]
+ # first column
+ c0,c1,c2,c3,c4,c5,x6,c7 = flags
+ #if '*' in line:
+ # print "flags", repr(flags), "rest", repr(rest)
+
+ if c0 in '?XI':
+ fn = line.split(None, 1)[1]
+ if c0 == '?':
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.unknown.append(wcpath)
+ elif c0 == 'X':
+ wcpath = rootwcpath.__class__(
+ rootwcpath.localpath.join(fn, abs=1),
+ auth=rootwcpath.auth)
+ rootstatus.external.append(wcpath)
+ elif c0 == 'I':
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.ignored.append(wcpath)
+
+ continue
+
+ #elif c0 in '~!' or c4 == 'S':
+ # raise NotImplementedError("received flag %r" % c0)
+
+ m = WCStatus._rex_status.match(rest)
+ if not m:
+ if c7 == '*':
+ fn = rest.strip()
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.update_available.append(wcpath)
+ continue
+ if line.lower().find('against revision:')!=-1:
+ update_rev = int(rest.split(':')[1].strip())
+ continue
+ if line.lower().find('status on external') > -1:
+ # XXX not sure what to do here... perhaps we want to
+ # store some state instead of just continuing, as right
+ # now it makes the top-level external get added twice
+ # (once as external, once as 'normal' unchanged item)
+ # because of the way SVN presents external items
+ continue
+ # keep trying
+ raise ValueError("could not parse line %r" % line)
+ else:
+ rev, modrev, author, fn = m.groups()
+ wcpath = rootwcpath.join(fn, abs=1)
+ #assert wcpath.check()
+ if c0 == 'M':
+ assert wcpath.check(file=1), "didn't expect a directory with changed content here"
+ rootstatus.modified.append(wcpath)
+ elif c0 == 'A' or c3 == '+' :
+ rootstatus.added.append(wcpath)
+ elif c0 == 'D':
+ rootstatus.deleted.append(wcpath)
+ elif c0 == 'C':
+ rootstatus.conflict.append(wcpath)
+ elif c0 == '~':
+ rootstatus.kindmismatch.append(wcpath)
+ elif c0 == '!':
+ rootstatus.incomplete.append(wcpath)
+ elif c0 == 'R':
+ rootstatus.replaced.append(wcpath)
+ elif not c0.strip():
+ rootstatus.unchanged.append(wcpath)
+ else:
+ raise NotImplementedError("received flag %r" % c0)
+
+ if c1 == 'M':
+ rootstatus.prop_modified.append(wcpath)
+ # XXX do we cover all client versions here?
+ if c2 == 'L' or c5 == 'K':
+ rootstatus.locked.append(wcpath)
+ if c7 == '*':
+ rootstatus.update_available.append(wcpath)
+
+ if wcpath == rootwcpath:
+ rootstatus.rev = rev
+ rootstatus.modrev = modrev
+ rootstatus.author = author
+ if update_rev:
+ rootstatus.update_rev = update_rev
+ continue
+ return rootstatus
+ fromstring = staticmethod(fromstring)
+
+class XMLWCStatus(WCStatus):
+ def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+ """ parse 'data' (XML string as outputted by svn st) into a status obj
+ """
+ # XXX for externals, the path is shown twice: once
+ # with external information, and once with full info as if
+ # the item was a normal non-external... the current way of
+ # dealing with this issue is by ignoring it - this does make
+ # externals appear as external items as well as 'normal',
+ # unchanged ones in the status object so this is far from ideal
+ rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+ update_rev = None
+ minidom, ExpatError = importxml()
+ try:
+ doc = minidom.parseString(data)
+ except ExpatError:
+ e = sys.exc_info()[1]
+ raise ValueError(str(e))
+ urevels = doc.getElementsByTagName('against')
+ if urevels:
+ rootstatus.update_rev = urevels[-1].getAttribute('revision')
+ for entryel in doc.getElementsByTagName('entry'):
+ path = entryel.getAttribute('path')
+ statusel = entryel.getElementsByTagName('wc-status')[0]
+ itemstatus = statusel.getAttribute('item')
+
+ if itemstatus == 'unversioned':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.unknown.append(wcpath)
+ continue
+ elif itemstatus == 'external':
+ wcpath = rootwcpath.__class__(
+ rootwcpath.localpath.join(path, abs=1),
+ auth=rootwcpath.auth)
+ rootstatus.external.append(wcpath)
+ continue
+ elif itemstatus == 'ignored':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.ignored.append(wcpath)
+ continue
+ elif itemstatus == 'incomplete':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.incomplete.append(wcpath)
+ continue
+
+ rev = statusel.getAttribute('revision')
+ if itemstatus == 'added' or itemstatus == 'none':
+ rev = '0'
+ modrev = '?'
+ author = '?'
+ date = ''
+ elif itemstatus == "replaced":
+ pass
+ else:
+ #print entryel.toxml()
+ commitel = entryel.getElementsByTagName('commit')[0]
+ if commitel:
+ modrev = commitel.getAttribute('revision')
+ author = ''
+ author_els = commitel.getElementsByTagName('author')
+ if author_els:
+ for c in author_els[0].childNodes:
+ author += c.nodeValue
+ date = ''
+ for c in commitel.getElementsByTagName('date')[0]\
+ .childNodes:
+ date += c.nodeValue
+
+ wcpath = rootwcpath.join(path, abs=1)
+
+ assert itemstatus != 'modified' or wcpath.check(file=1), (
+ 'did\'t expect a directory with changed content here')
+
+ itemattrname = {
+ 'normal': 'unchanged',
+ 'unversioned': 'unknown',
+ 'conflicted': 'conflict',
+ 'none': 'added',
+ }.get(itemstatus, itemstatus)
+
+ attr = getattr(rootstatus, itemattrname)
+ attr.append(wcpath)
+
+ propsstatus = statusel.getAttribute('props')
+ if propsstatus not in ('none', 'normal'):
+ rootstatus.prop_modified.append(wcpath)
+
+ if wcpath == rootwcpath:
+ rootstatus.rev = rev
+ rootstatus.modrev = modrev
+ rootstatus.author = author
+ rootstatus.date = date
+
+ # handle repos-status element (remote info)
+ rstatusels = entryel.getElementsByTagName('repos-status')
+ if rstatusels:
+ rstatusel = rstatusels[0]
+ ritemstatus = rstatusel.getAttribute('item')
+ if ritemstatus in ('added', 'modified'):
+ rootstatus.update_available.append(wcpath)
+
+ lockels = entryel.getElementsByTagName('lock')
+ if len(lockels):
+ rootstatus.locked.append(wcpath)
+
+ return rootstatus
+ fromstring = staticmethod(fromstring)
+
+class InfoSvnWCCommand:
+ def __init__(self, output):
+ # Path: test
+ # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
+ # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+ # Revision: 2151
+ # Node Kind: directory
+ # Schedule: normal
+ # Last Changed Author: hpk
+ # Last Changed Rev: 2100
+ # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+ # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
+
+ d = {}
+ for line in output.split('\n'):
+ if not line.strip():
+ continue
+ key, value = line.split(':', 1)
+ key = key.lower().replace(' ', '')
+ value = value.strip()
+ d[key] = value
+ try:
+ self.url = d['url']
+ except KeyError:
+ raise ValueError("Not a versioned resource")
+ #raise ValueError, "Not a versioned resource %r" % path
+ self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
+ try:
+ self.rev = int(d['revision'])
+ except KeyError:
+ self.rev = None
+
+ self.path = py.path.local(d['path'])
+ self.size = self.path.size()
+ if 'lastchangedrev' in d:
+ self.created_rev = int(d['lastchangedrev'])
+ if 'lastchangedauthor' in d:
+ self.last_author = d['lastchangedauthor']
+ if 'lastchangeddate' in d:
+ self.mtime = parse_wcinfotime(d['lastchangeddate'])
+ self.time = self.mtime * 1000000
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+def parse_wcinfotime(timestr):
+ """ Returns seconds since epoch, UTC. """
+ # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+ m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
+ if not m:
+ raise ValueError("timestring %r does not match" % timestr)
+ timestr, timezone = m.groups()
+ # do not handle timezone specially, return value should be UTC
+ parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
+ return calendar.timegm(parsedtime)
+
+def make_recursive_propdict(wcroot,
+ output,
+ rex = re.compile("Properties on '(.*)':")):
+ """ Return a dictionary of path->PropListDict mappings. """
+ lines = [x for x in output.split('\n') if x]
+ pdict = {}
+ while lines:
+ line = lines.pop(0)
+ m = rex.match(line)
+ if not m:
+ raise ValueError("could not parse propget-line: %r" % line)
+ path = m.groups()[0]
+ wcpath = wcroot.join(path, abs=1)
+ propnames = []
+ while lines and lines[0].startswith(' '):
+ propname = lines.pop(0).strip()
+ propnames.append(propname)
+ assert propnames, "must have found properties!"
+ pdict[wcpath] = PropListDict(wcpath, propnames)
+ return pdict
+
+
+def importxml(cache=[]):
+ if cache:
+ return cache
+ from xml.dom import minidom
+ from xml.parsers.expat import ExpatError
+ cache.extend([minidom, ExpatError])
+ return cache
+
+class LogEntry:
+ def __init__(self, logentry):
+ self.rev = int(logentry.getAttribute('revision'))
+ for lpart in filter(None, logentry.childNodes):
+ if lpart.nodeType == lpart.ELEMENT_NODE:
+ if lpart.nodeName == 'author':
+ self.author = lpart.firstChild.nodeValue
+ elif lpart.nodeName == 'msg':
+ if lpart.firstChild:
+ self.msg = lpart.firstChild.nodeValue
+ else:
+ self.msg = ''
+ elif lpart.nodeName == 'date':
+ #2003-07-29T20:05:11.598637Z
+ timestr = lpart.firstChild.nodeValue
+ self.date = parse_apr_time(timestr)
+ elif lpart.nodeName == 'paths':
+ self.strpaths = []
+ for ppart in filter(None, lpart.childNodes):
+ if ppart.nodeType == ppart.ELEMENT_NODE:
+ self.strpaths.append(PathEntry(ppart))
+ def __repr__(self):
+ return '<Logentry rev=%d author=%s date=%s>' % (
+ self.rev, self.author, self.date)
+
+
diff --git a/py/_process/__init__.py b/py/_process/__init__.py
index 86c714a..3186a2d 100644
--- a/py/_process/__init__.py
+++ b/py/_process/__init__.py
@@ -1 +1 @@
-""" high-level sub-process handling """
+""" high-level sub-process handling """
diff --git a/py/_process/cmdexec.py b/py/_process/cmdexec.py
index f83a249..22a1a82 100644
--- a/py/_process/cmdexec.py
+++ b/py/_process/cmdexec.py
@@ -1,49 +1,49 @@
-import sys
-import subprocess
-import py
-from subprocess import Popen, PIPE
-
-def cmdexec(cmd):
- """ return unicode output of executing 'cmd' in a separate process.
-
- raise cmdexec.Error exeception if the command failed.
- the exception will provide an 'err' attribute containing
- the error-output from the command.
- if the subprocess module does not provide a proper encoding/unicode strings
- sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
- """
- process = subprocess.Popen(cmd, shell=True,
- universal_newlines=True,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = process.communicate()
- if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
- try:
- default_encoding = sys.getdefaultencoding() # jython may not have it
- except AttributeError:
- default_encoding = sys.stdout.encoding or 'UTF-8'
- out = unicode(out, process.stdout.encoding or default_encoding)
- err = unicode(err, process.stderr.encoding or default_encoding)
- status = process.poll()
- if status:
- raise ExecutionFailed(status, status, cmd, out, err)
- return out
-
-class ExecutionFailed(py.error.Error):
- def __init__(self, status, systemstatus, cmd, out, err):
- Exception.__init__(self)
- self.status = status
- self.systemstatus = systemstatus
- self.cmd = cmd
- self.err = err
- self.out = out
-
- def __str__(self):
- return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
-
-# export the exception under the name 'py.process.cmdexec.Error'
-cmdexec.Error = ExecutionFailed
-try:
- ExecutionFailed.__module__ = 'py.process.cmdexec'
- ExecutionFailed.__name__ = 'Error'
-except (AttributeError, TypeError):
- pass
+import sys
+import subprocess
+import py
+from subprocess import Popen, PIPE
+
+def cmdexec(cmd):
+ """ return unicode output of executing 'cmd' in a separate process.
+
+ raise cmdexec.Error exeception if the command failed.
+ the exception will provide an 'err' attribute containing
+ the error-output from the command.
+ if the subprocess module does not provide a proper encoding/unicode strings
+ sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
+ """
+ process = subprocess.Popen(cmd, shell=True,
+ universal_newlines=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = process.communicate()
+ if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
+ try:
+ default_encoding = sys.getdefaultencoding() # jython may not have it
+ except AttributeError:
+ default_encoding = sys.stdout.encoding or 'UTF-8'
+ out = unicode(out, process.stdout.encoding or default_encoding)
+ err = unicode(err, process.stderr.encoding or default_encoding)
+ status = process.poll()
+ if status:
+ raise ExecutionFailed(status, status, cmd, out, err)
+ return out
+
+class ExecutionFailed(py.error.Error):
+ def __init__(self, status, systemstatus, cmd, out, err):
+ Exception.__init__(self)
+ self.status = status
+ self.systemstatus = systemstatus
+ self.cmd = cmd
+ self.err = err
+ self.out = out
+
+ def __str__(self):
+ return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
+
+# export the exception under the name 'py.process.cmdexec.Error'
+cmdexec.Error = ExecutionFailed
+try:
+ ExecutionFailed.__module__ = 'py.process.cmdexec'
+ ExecutionFailed.__name__ = 'Error'
+except (AttributeError, TypeError):
+ pass
diff --git a/py/_process/forkedfunc.py b/py/_process/forkedfunc.py
index 1c28530..d23e653 100644
--- a/py/_process/forkedfunc.py
+++ b/py/_process/forkedfunc.py
@@ -1,120 +1,120 @@
-
-"""
- ForkedFunc provides a way to run a function in a forked process
- and get at its return value, stdout and stderr output as well
- as signals and exitstatusus.
-"""
-
-import py
-import os
-import sys
-import marshal
-
-
-def get_unbuffered_io(fd, filename):
- f = open(str(filename), "w")
- if fd != f.fileno():
- os.dup2(f.fileno(), fd)
- class AutoFlush:
- def write(self, data):
- f.write(data)
- f.flush()
- def __getattr__(self, name):
- return getattr(f, name)
- return AutoFlush()
-
-
-class ForkedFunc:
- EXITSTATUS_EXCEPTION = 3
-
-
- def __init__(self, fun, args=None, kwargs=None, nice_level=0,
- child_on_start=None, child_on_exit=None):
- if args is None:
- args = []
- if kwargs is None:
- kwargs = {}
- self.fun = fun
- self.args = args
- self.kwargs = kwargs
- self.tempdir = tempdir = py.path.local.mkdtemp()
- self.RETVAL = tempdir.ensure('retval')
- self.STDOUT = tempdir.ensure('stdout')
- self.STDERR = tempdir.ensure('stderr')
-
- pid = os.fork()
- if pid: # in parent process
- self.pid = pid
- else: # in child process
- self.pid = None
- self._child(nice_level, child_on_start, child_on_exit)
-
- def _child(self, nice_level, child_on_start, child_on_exit):
- # right now we need to call a function, but first we need to
- # map all IO that might happen
- sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT)
- sys.stderr = stderr = get_unbuffered_io(2, self.STDERR)
- retvalf = self.RETVAL.open("wb")
- EXITSTATUS = 0
- try:
- if nice_level:
- os.nice(nice_level)
- try:
- if child_on_start is not None:
- child_on_start()
- retval = self.fun(*self.args, **self.kwargs)
- retvalf.write(marshal.dumps(retval))
- if child_on_exit is not None:
- child_on_exit()
- except:
- excinfo = py.code.ExceptionInfo()
- stderr.write(str(excinfo._getreprcrash()))
- EXITSTATUS = self.EXITSTATUS_EXCEPTION
- finally:
- stdout.close()
- stderr.close()
- retvalf.close()
- os.close(1)
- os.close(2)
- os._exit(EXITSTATUS)
-
- def waitfinish(self, waiter=os.waitpid):
- pid, systemstatus = waiter(self.pid, 0)
- if systemstatus:
- if os.WIFSIGNALED(systemstatus):
- exitstatus = os.WTERMSIG(systemstatus) + 128
- else:
- exitstatus = os.WEXITSTATUS(systemstatus)
- else:
- exitstatus = 0
- signal = systemstatus & 0x7f
- if not exitstatus and not signal:
- retval = self.RETVAL.open('rb')
- try:
- retval_data = retval.read()
- finally:
- retval.close()
- retval = marshal.loads(retval_data)
- else:
- retval = None
- stdout = self.STDOUT.read()
- stderr = self.STDERR.read()
- self._removetemp()
- return Result(exitstatus, signal, retval, stdout, stderr)
-
- def _removetemp(self):
- if self.tempdir.check():
- self.tempdir.remove()
-
- def __del__(self):
- if self.pid is not None: # only clean up in main process
- self._removetemp()
-
-
-class Result(object):
- def __init__(self, exitstatus, signal, retval, stdout, stderr):
- self.exitstatus = exitstatus
- self.signal = signal
- self.retval = retval
- self.out = stdout
- self.err = stderr
+
+"""
+ ForkedFunc provides a way to run a function in a forked process
+ and get at its return value, stdout and stderr output as well
+ as signals and exitstatusus.
+"""
+
+import py
+import os
+import sys
+import marshal
+
+
+def get_unbuffered_io(fd, filename):
+ f = open(str(filename), "w")
+ if fd != f.fileno():
+ os.dup2(f.fileno(), fd)
+ class AutoFlush:
+ def write(self, data):
+ f.write(data)
+ f.flush()
+ def __getattr__(self, name):
+ return getattr(f, name)
+ return AutoFlush()
+
+
+class ForkedFunc:
+ EXITSTATUS_EXCEPTION = 3
+
+
+ def __init__(self, fun, args=None, kwargs=None, nice_level=0,
+ child_on_start=None, child_on_exit=None):
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
+ self.fun = fun
+ self.args = args
+ self.kwargs = kwargs
+ self.tempdir = tempdir = py.path.local.mkdtemp()
+ self.RETVAL = tempdir.ensure('retval')
+ self.STDOUT = tempdir.ensure('stdout')
+ self.STDERR = tempdir.ensure('stderr')
+
+ pid = os.fork()
+ if pid: # in parent process
+ self.pid = pid
+ else: # in child process
+ self.pid = None
+ self._child(nice_level, child_on_start, child_on_exit)
+
+ def _child(self, nice_level, child_on_start, child_on_exit):
+ # right now we need to call a function, but first we need to
+ # map all IO that might happen
+ sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT)
+ sys.stderr = stderr = get_unbuffered_io(2, self.STDERR)
+ retvalf = self.RETVAL.open("wb")
+ EXITSTATUS = 0
+ try:
+ if nice_level:
+ os.nice(nice_level)
+ try:
+ if child_on_start is not None:
+ child_on_start()
+ retval = self.fun(*self.args, **self.kwargs)
+ retvalf.write(marshal.dumps(retval))
+ if child_on_exit is not None:
+ child_on_exit()
+ except:
+ excinfo = py.code.ExceptionInfo()
+ stderr.write(str(excinfo._getreprcrash()))
+ EXITSTATUS = self.EXITSTATUS_EXCEPTION
+ finally:
+ stdout.close()
+ stderr.close()
+ retvalf.close()
+ os.close(1)
+ os.close(2)
+ os._exit(EXITSTATUS)
+
+ def waitfinish(self, waiter=os.waitpid):
+ pid, systemstatus = waiter(self.pid, 0)
+ if systemstatus:
+ if os.WIFSIGNALED(systemstatus):
+ exitstatus = os.WTERMSIG(systemstatus) + 128
+ else:
+ exitstatus = os.WEXITSTATUS(systemstatus)
+ else:
+ exitstatus = 0
+ signal = systemstatus & 0x7f
+ if not exitstatus and not signal:
+ retval = self.RETVAL.open('rb')
+ try:
+ retval_data = retval.read()
+ finally:
+ retval.close()
+ retval = marshal.loads(retval_data)
+ else:
+ retval = None
+ stdout = self.STDOUT.read()
+ stderr = self.STDERR.read()
+ self._removetemp()
+ return Result(exitstatus, signal, retval, stdout, stderr)
+
+ def _removetemp(self):
+ if self.tempdir.check():
+ self.tempdir.remove()
+
+ def __del__(self):
+ if self.pid is not None: # only clean up in main process
+ self._removetemp()
+
+
+class Result(object):
+ def __init__(self, exitstatus, signal, retval, stdout, stderr):
+ self.exitstatus = exitstatus
+ self.signal = signal
+ self.retval = retval
+ self.out = stdout
+ self.err = stderr
diff --git a/py/_process/killproc.py b/py/_process/killproc.py
index 18e8310..5b6032c 100644
--- a/py/_process/killproc.py
+++ b/py/_process/killproc.py
@@ -1,23 +1,23 @@
-import py
-import os, sys
-
-if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
- try:
- import ctypes
- except ImportError:
- def dokill(pid):
- py.process.cmdexec("taskkill /F /PID %d" %(pid,))
- else:
- def dokill(pid):
- PROCESS_TERMINATE = 1
- handle = ctypes.windll.kernel32.OpenProcess(
- PROCESS_TERMINATE, False, pid)
- ctypes.windll.kernel32.TerminateProcess(handle, -1)
- ctypes.windll.kernel32.CloseHandle(handle)
-else:
- def dokill(pid):
- os.kill(pid, 15)
-
-def kill(pid):
- """ kill process by id. """
- dokill(pid)
+import py
+import os, sys
+
+if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
+ try:
+ import ctypes
+ except ImportError:
+ def dokill(pid):
+ py.process.cmdexec("taskkill /F /PID %d" %(pid,))
+ else:
+ def dokill(pid):
+ PROCESS_TERMINATE = 1
+ handle = ctypes.windll.kernel32.OpenProcess(
+ PROCESS_TERMINATE, False, pid)
+ ctypes.windll.kernel32.TerminateProcess(handle, -1)
+ ctypes.windll.kernel32.CloseHandle(handle)
+else:
+ def dokill(pid):
+ os.kill(pid, 15)
+
+def kill(pid):
+ """ kill process by id. """
+ dokill(pid)
diff --git a/py/_std.py b/py/_std.py
index 97a9853..e016bc8 100644
--- a/py/_std.py
+++ b/py/_std.py
@@ -1,18 +1,18 @@
-import sys
-
-class Std(object):
- """ makes top-level python modules available as an attribute,
- importing them on first access.
- """
-
- def __init__(self):
- self.__dict__ = sys.modules
-
- def __getattr__(self, name):
- try:
- m = __import__(name)
- except ImportError:
- raise AttributeError("py.std: could not import %s" % name)
- return m
-
-std = Std()
+import sys
+
+class Std(object):
+ """ makes top-level python modules available as an attribute,
+ importing them on first access.
+ """
+
+ def __init__(self):
+ self.__dict__ = sys.modules
+
+ def __getattr__(self, name):
+ try:
+ m = __import__(name)
+ except ImportError:
+ raise AttributeError("py.std: could not import %s" % name)
+ return m
+
+std = Std()
diff --git a/py/_xmlgen.py b/py/_xmlgen.py
index 1c83545..f4d4116 100644
--- a/py/_xmlgen.py
+++ b/py/_xmlgen.py
@@ -1,255 +1,255 @@
-"""
-module for generating and serializing xml and html structures
-by using simple python objects.
-
-(c) holger krekel, holger at merlinux eu. 2009
-"""
-import sys, re
-
-if sys.version_info >= (3,0):
- def u(s):
- return s
- def unicode(x, errors=None):
- if hasattr(x, '__unicode__'):
- return x.__unicode__()
- return str(x)
-else:
- def u(s):
- return unicode(s)
- unicode = unicode
-
-
-class NamespaceMetaclass(type):
- def __getattr__(self, name):
- if name[:1] == '_':
- raise AttributeError(name)
- if self == Namespace:
- raise ValueError("Namespace class is abstract")
- tagspec = self.__tagspec__
- if tagspec is not None and name not in tagspec:
- raise AttributeError(name)
- classattr = {}
- if self.__stickyname__:
- classattr['xmlname'] = name
- cls = type(name, (self.__tagclass__,), classattr)
- setattr(self, name, cls)
- return cls
-
-class Tag(list):
- class Attr(object):
- def __init__(self, **kwargs):
- self.__dict__.update(kwargs)
-
- def __init__(self, *args, **kwargs):
- super(Tag, self).__init__(args)
- self.attr = self.Attr(**kwargs)
-
- def __unicode__(self):
- return self.unicode(indent=0)
- __str__ = __unicode__
-
- def unicode(self, indent=2):
- l = []
- SimpleUnicodeVisitor(l.append, indent).visit(self)
- return u("").join(l)
-
- def __repr__(self):
- name = self.__class__.__name__
- return "<%r tag object %d>" % (name, id(self))
-
-Namespace = NamespaceMetaclass('Namespace', (object, ), {
- '__tagspec__': None,
- '__tagclass__': Tag,
- '__stickyname__': False,
-})
-
-class HtmlTag(Tag):
- def unicode(self, indent=2):
- l = []
- HtmlVisitor(l.append, indent, shortempty=False).visit(self)
- return u("").join(l)
-
-# exported plain html namespace
-class html(Namespace):
- __tagclass__ = HtmlTag
- __stickyname__ = True
- __tagspec__ = dict([(x,1) for x in (
- 'a,abbr,acronym,address,applet,area,article,aside,audio,b,'
- 'base,basefont,bdi,bdo,big,blink,blockquote,body,br,button,'
- 'canvas,caption,center,cite,code,col,colgroup,command,comment,'
- 'datalist,dd,del,details,dfn,dir,div,dl,dt,em,embed,'
- 'fieldset,figcaption,figure,footer,font,form,frame,frameset,h1,'
- 'h2,h3,h4,h5,h6,head,header,hgroup,hr,html,i,iframe,img,input,'
- 'ins,isindex,kbd,keygen,label,legend,li,link,listing,map,mark,'
- 'marquee,menu,meta,meter,multicol,nav,nobr,noembed,noframes,'
- 'noscript,object,ol,optgroup,option,output,p,param,pre,progress,'
- 'q,rp,rt,ruby,s,samp,script,section,select,small,source,span,'
- 'strike,strong,style,sub,summary,sup,table,tbody,td,textarea,'
- 'tfoot,th,thead,time,title,tr,track,tt,u,ul,xmp,var,video,wbr'
- ).split(',') if x])
-
- class Style(object):
- def __init__(self, **kw):
- for x, y in kw.items():
- x = x.replace('_', '-')
- setattr(self, x, y)
-
-
-class raw(object):
- """just a box that can contain a unicode string that will be
- included directly in the output"""
- def __init__(self, uniobj):
- self.uniobj = uniobj
-
-class SimpleUnicodeVisitor(object):
- """ recursive visitor to write unicode. """
- def __init__(self, write, indent=0, curindent=0, shortempty=True):
- self.write = write
- self.cache = {}
- self.visited = {} # for detection of recursion
- self.indent = indent
- self.curindent = curindent
- self.parents = []
- self.shortempty = shortempty # short empty tags or not
-
- def visit(self, node):
- """ dispatcher on node's class/bases name. """
- cls = node.__class__
- try:
- visitmethod = self.cache[cls]
- except KeyError:
- for subclass in cls.__mro__:
- visitmethod = getattr(self, subclass.__name__, None)
- if visitmethod is not None:
- break
- else:
- visitmethod = self.__object
- self.cache[cls] = visitmethod
- visitmethod(node)
-
- # the default fallback handler is marked private
- # to avoid clashes with the tag name object
- def __object(self, obj):
- #self.write(obj)
- self.write(escape(unicode(obj)))
-
- def raw(self, obj):
- self.write(obj.uniobj)
-
- def list(self, obj):
- assert id(obj) not in self.visited
- self.visited[id(obj)] = 1
- for elem in obj:
- self.visit(elem)
-
- def Tag(self, tag):
- assert id(tag) not in self.visited
- try:
- tag.parent = self.parents[-1]
- except IndexError:
- tag.parent = None
- self.visited[id(tag)] = 1
- tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
- if self.curindent and not self._isinline(tagname):
- self.write("\n" + u(' ') * self.curindent)
- if tag:
- self.curindent += self.indent
- self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
- self.parents.append(tag)
- for x in tag:
- self.visit(x)
- self.parents.pop()
- self.write(u('</%s>') % tagname)
- self.curindent -= self.indent
- else:
- nameattr = tagname+self.attributes(tag)
- if self._issingleton(tagname):
- self.write(u('<%s/>') % (nameattr,))
- else:
- self.write(u('<%s></%s>') % (nameattr, tagname))
-
- def attributes(self, tag):
- # serialize attributes
- attrlist = dir(tag.attr)
- attrlist.sort()
- l = []
- for name in attrlist:
- res = self.repr_attribute(tag.attr, name)
- if res is not None:
- l.append(res)
- l.extend(self.getstyle(tag))
- return u("").join(l)
-
- def repr_attribute(self, attrs, name):
- if name[:2] != '__':
- value = getattr(attrs, name)
- if name.endswith('_'):
- name = name[:-1]
- if isinstance(value, raw):
- insert = value.uniobj
- else:
- insert = escape(unicode(value))
- return ' %s="%s"' % (name, insert)
-
- def getstyle(self, tag):
- """ return attribute list suitable for styling. """
- try:
- styledict = tag.style.__dict__
- except AttributeError:
- return []
- else:
- stylelist = [x+': ' + y for x,y in styledict.items()]
- return [u(' style="%s"') % u('; ').join(stylelist)]
-
- def _issingleton(self, tagname):
- """can (and will) be overridden in subclasses"""
- return self.shortempty
-
- def _isinline(self, tagname):
- """can (and will) be overridden in subclasses"""
- return False
-
-class HtmlVisitor(SimpleUnicodeVisitor):
-
- single = dict([(x, 1) for x in
- ('br,img,area,param,col,hr,meta,link,base,'
- 'input,frame').split(',')])
- inline = dict([(x, 1) for x in
- ('a abbr acronym b basefont bdo big br cite code dfn em font '
- 'i img input kbd label q s samp select small span strike '
- 'strong sub sup textarea tt u var'.split(' '))])
-
- def repr_attribute(self, attrs, name):
- if name == 'class_':
- value = getattr(attrs, name)
- if value is None:
- return
- return super(HtmlVisitor, self).repr_attribute(attrs, name)
-
- def _issingleton(self, tagname):
- return tagname in self.single
-
- def _isinline(self, tagname):
- return tagname in self.inline
-
-
-class _escape:
- def __init__(self):
- self.escape = {
- u('"') : u('&quot;'), u('<') : u('&lt;'), u('>') : u('&gt;'),
- u('&') : u('&amp;'), u("'") : u('&apos;'),
- }
- self.charef_rex = re.compile(u("|").join(self.escape.keys()))
-
- def _replacer(self, match):
- return self.escape[match.group(0)]
-
- def __call__(self, ustring):
- """ xml-escape the given unicode string. """
- try:
- ustring = unicode(ustring)
- except UnicodeDecodeError:
- ustring = unicode(ustring, 'utf-8', errors='replace')
- return self.charef_rex.sub(self._replacer, ustring)
-
-escape = _escape()
+"""
+module for generating and serializing xml and html structures
+by using simple python objects.
+
+(c) holger krekel, holger at merlinux eu. 2009
+"""
+import sys, re
+
+if sys.version_info >= (3,0):
+ def u(s):
+ return s
+ def unicode(x, errors=None):
+ if hasattr(x, '__unicode__'):
+ return x.__unicode__()
+ return str(x)
+else:
+ def u(s):
+ return unicode(s)
+ unicode = unicode
+
+
+class NamespaceMetaclass(type):
+ def __getattr__(self, name):
+ if name[:1] == '_':
+ raise AttributeError(name)
+ if self == Namespace:
+ raise ValueError("Namespace class is abstract")
+ tagspec = self.__tagspec__
+ if tagspec is not None and name not in tagspec:
+ raise AttributeError(name)
+ classattr = {}
+ if self.__stickyname__:
+ classattr['xmlname'] = name
+ cls = type(name, (self.__tagclass__,), classattr)
+ setattr(self, name, cls)
+ return cls
+
+class Tag(list):
+ class Attr(object):
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+ def __init__(self, *args, **kwargs):
+ super(Tag, self).__init__(args)
+ self.attr = self.Attr(**kwargs)
+
+ def __unicode__(self):
+ return self.unicode(indent=0)
+ __str__ = __unicode__
+
+ def unicode(self, indent=2):
+ l = []
+ SimpleUnicodeVisitor(l.append, indent).visit(self)
+ return u("").join(l)
+
+ def __repr__(self):
+ name = self.__class__.__name__
+ return "<%r tag object %d>" % (name, id(self))
+
+Namespace = NamespaceMetaclass('Namespace', (object, ), {
+ '__tagspec__': None,
+ '__tagclass__': Tag,
+ '__stickyname__': False,
+})
+
+class HtmlTag(Tag):
+ def unicode(self, indent=2):
+ l = []
+ HtmlVisitor(l.append, indent, shortempty=False).visit(self)
+ return u("").join(l)
+
+# exported plain html namespace
+class html(Namespace):
+ __tagclass__ = HtmlTag
+ __stickyname__ = True
+ __tagspec__ = dict([(x,1) for x in (
+ 'a,abbr,acronym,address,applet,area,article,aside,audio,b,'
+ 'base,basefont,bdi,bdo,big,blink,blockquote,body,br,button,'
+ 'canvas,caption,center,cite,code,col,colgroup,command,comment,'
+ 'datalist,dd,del,details,dfn,dir,div,dl,dt,em,embed,'
+ 'fieldset,figcaption,figure,footer,font,form,frame,frameset,h1,'
+ 'h2,h3,h4,h5,h6,head,header,hgroup,hr,html,i,iframe,img,input,'
+ 'ins,isindex,kbd,keygen,label,legend,li,link,listing,map,mark,'
+ 'marquee,menu,meta,meter,multicol,nav,nobr,noembed,noframes,'
+ 'noscript,object,ol,optgroup,option,output,p,param,pre,progress,'
+ 'q,rp,rt,ruby,s,samp,script,section,select,small,source,span,'
+ 'strike,strong,style,sub,summary,sup,table,tbody,td,textarea,'
+ 'tfoot,th,thead,time,title,tr,track,tt,u,ul,xmp,var,video,wbr'
+ ).split(',') if x])
+
+ class Style(object):
+ def __init__(self, **kw):
+ for x, y in kw.items():
+ x = x.replace('_', '-')
+ setattr(self, x, y)
+
+
+class raw(object):
+ """just a box that can contain a unicode string that will be
+ included directly in the output"""
+ def __init__(self, uniobj):
+ self.uniobj = uniobj
+
+class SimpleUnicodeVisitor(object):
+ """ recursive visitor to write unicode. """
+ def __init__(self, write, indent=0, curindent=0, shortempty=True):
+ self.write = write
+ self.cache = {}
+ self.visited = {} # for detection of recursion
+ self.indent = indent
+ self.curindent = curindent
+ self.parents = []
+ self.shortempty = shortempty # short empty tags or not
+
+ def visit(self, node):
+ """ dispatcher on node's class/bases name. """
+ cls = node.__class__
+ try:
+ visitmethod = self.cache[cls]
+ except KeyError:
+ for subclass in cls.__mro__:
+ visitmethod = getattr(self, subclass.__name__, None)
+ if visitmethod is not None:
+ break
+ else:
+ visitmethod = self.__object
+ self.cache[cls] = visitmethod
+ visitmethod(node)
+
+ # the default fallback handler is marked private
+ # to avoid clashes with the tag name object
+ def __object(self, obj):
+ #self.write(obj)
+ self.write(escape(unicode(obj)))
+
+ def raw(self, obj):
+ self.write(obj.uniobj)
+
+ def list(self, obj):
+ assert id(obj) not in self.visited
+ self.visited[id(obj)] = 1
+ for elem in obj:
+ self.visit(elem)
+
+ def Tag(self, tag):
+ assert id(tag) not in self.visited
+ try:
+ tag.parent = self.parents[-1]
+ except IndexError:
+ tag.parent = None
+ self.visited[id(tag)] = 1
+ tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
+ if self.curindent and not self._isinline(tagname):
+ self.write("\n" + u(' ') * self.curindent)
+ if tag:
+ self.curindent += self.indent
+ self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
+ self.parents.append(tag)
+ for x in tag:
+ self.visit(x)
+ self.parents.pop()
+ self.write(u('</%s>') % tagname)
+ self.curindent -= self.indent
+ else:
+ nameattr = tagname+self.attributes(tag)
+ if self._issingleton(tagname):
+ self.write(u('<%s/>') % (nameattr,))
+ else:
+ self.write(u('<%s></%s>') % (nameattr, tagname))
+
+ def attributes(self, tag):
+ # serialize attributes
+ attrlist = dir(tag.attr)
+ attrlist.sort()
+ l = []
+ for name in attrlist:
+ res = self.repr_attribute(tag.attr, name)
+ if res is not None:
+ l.append(res)
+ l.extend(self.getstyle(tag))
+ return u("").join(l)
+
+ def repr_attribute(self, attrs, name):
+ if name[:2] != '__':
+ value = getattr(attrs, name)
+ if name.endswith('_'):
+ name = name[:-1]
+ if isinstance(value, raw):
+ insert = value.uniobj
+ else:
+ insert = escape(unicode(value))
+ return ' %s="%s"' % (name, insert)
+
+ def getstyle(self, tag):
+ """ return attribute list suitable for styling. """
+ try:
+ styledict = tag.style.__dict__
+ except AttributeError:
+ return []
+ else:
+ stylelist = [x+': ' + y for x,y in styledict.items()]
+ return [u(' style="%s"') % u('; ').join(stylelist)]
+
+ def _issingleton(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return self.shortempty
+
+ def _isinline(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return False
+
+class HtmlVisitor(SimpleUnicodeVisitor):
+
+ single = dict([(x, 1) for x in
+ ('br,img,area,param,col,hr,meta,link,base,'
+ 'input,frame').split(',')])
+ inline = dict([(x, 1) for x in
+ ('a abbr acronym b basefont bdo big br cite code dfn em font '
+ 'i img input kbd label q s samp select small span strike '
+ 'strong sub sup textarea tt u var'.split(' '))])
+
+ def repr_attribute(self, attrs, name):
+ if name == 'class_':
+ value = getattr(attrs, name)
+ if value is None:
+ return
+ return super(HtmlVisitor, self).repr_attribute(attrs, name)
+
+ def _issingleton(self, tagname):
+ return tagname in self.single
+
+ def _isinline(self, tagname):
+ return tagname in self.inline
+
+
+class _escape:
+ def __init__(self):
+ self.escape = {
+ u('"') : u('&quot;'), u('<') : u('&lt;'), u('>') : u('&gt;'),
+ u('&') : u('&amp;'), u("'") : u('&apos;'),
+ }
+ self.charef_rex = re.compile(u("|").join(self.escape.keys()))
+
+ def _replacer(self, match):
+ return self.escape[match.group(0)]
+
+ def __call__(self, ustring):
+ """ xml-escape the given unicode string. """
+ try:
+ ustring = unicode(ustring)
+ except UnicodeDecodeError:
+ ustring = unicode(ustring, 'utf-8', errors='replace')
+ return self.charef_rex.sub(self._replacer, ustring)
+
+escape = _escape()
diff --git a/py/test.py b/py/test.py
index aa5beb1..84e8881 100644
--- a/py/test.py
+++ b/py/test.py
@@ -1,10 +1,10 @@
-import sys
-if __name__ == '__main__':
- import pytest
- sys.exit(pytest.main())
-else:
- import sys, pytest
- sys.modules['py.test'] = pytest
-
-# for more API entry points see the 'tests' definition
-# in __init__.py
+import sys
+if __name__ == '__main__':
+ import pytest
+ sys.exit(pytest.main())
+else:
+ import sys, pytest
+ sys.modules['py.test'] = pytest
+
+# for more API entry points see the 'tests' definition
+# in __init__.py
diff --git a/setup.cfg b/setup.cfg
index be0b2a5..d146a93 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,11 +1,14 @@
-[wheel]
-universal = 1
-
-[devpi:upload]
-formats = sdist.tgz,bdist_wheel
-
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
-
+[wheel]
+universal = 1
+
+[metadata]
+license_file = LICENSE
+
+[devpi:upload]
+formats = sdist.tgz,bdist_wheel
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/setup.py b/setup.py
index 5bcf754..5e11480 100644
--- a/setup.py
+++ b/setup.py
@@ -1,50 +1,50 @@
-import os
-import sys
-
-from setuptools import setup
-
-
-def get_version():
- p = os.path.join(os.path.dirname(
- os.path.abspath(__file__)), "py", "__init__.py")
- with open(p) as f:
- for line in f.readlines():
- if "__version__" in line:
- return line.strip().split("=")[-1].strip(" '")
- raise ValueError("could not read version")
-
-
-def main():
- setup(
- name='py',
- description='library with cross-python path, ini-parsing, io, code, log facilities',
- long_description=open('README.rst').read(),
- version=get_version(),
- url='http://pylib.readthedocs.org/',
- license='MIT license',
- platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
- author='holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others',
- author_email='pytest-dev@python.org',
- classifiers=['Development Status :: 6 - Mature',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved :: MIT License',
- 'Operating System :: POSIX',
- 'Operating System :: Microsoft :: Windows',
- 'Operating System :: MacOS :: MacOS X',
- 'Topic :: Software Development :: Testing',
- 'Topic :: Software Development :: Libraries',
- 'Topic :: Utilities',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3'],
- packages=['py',
- 'py._code',
- 'py._io',
- 'py._log',
- 'py._path',
- 'py._process',
- ],
- zip_safe=False,
- )
-
-if __name__ == '__main__':
- main()
+import os
+import sys
+
+from setuptools import setup
+
+
+def get_version():
+ p = os.path.join(os.path.dirname(
+ os.path.abspath(__file__)), "py", "__init__.py")
+ with open(p) as f:
+ for line in f.readlines():
+ if "__version__" in line:
+ return line.strip().split("=")[-1].strip(" '")
+ raise ValueError("could not read version")
+
+
+def main():
+ setup(
+ name='py',
+ description='library with cross-python path, ini-parsing, io, code, log facilities',
+ long_description=open('README.rst').read(),
+ version=get_version(),
+ url='http://py.readthedocs.io/',
+ license='MIT license',
+ platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
+ author='holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others',
+ author_email='pytest-dev@python.org',
+ classifiers=['Development Status :: 6 - Mature',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Operating System :: POSIX',
+ 'Operating System :: Microsoft :: Windows',
+ 'Operating System :: MacOS :: MacOS X',
+ 'Topic :: Software Development :: Testing',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: Utilities',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3'],
+ packages=['py',
+ 'py._code',
+ 'py._io',
+ 'py._log',
+ 'py._path',
+ 'py._process',
+ ],
+ zip_safe=False,
+ )
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/__pycache__/conftest.cpython-27-PYTEST.pyc b/testing/__pycache__/conftest.cpython-27-PYTEST.pyc
deleted file mode 100644
index 21fc9c1..0000000
--- a/testing/__pycache__/conftest.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/__pycache__/conftest.cpython-35-PYTEST.pyc b/testing/__pycache__/conftest.cpython-35-PYTEST.pyc
deleted file mode 100644
index 22e688f..0000000
--- a/testing/__pycache__/conftest.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/__pycache__/conftest.cpython-35.pyc b/testing/__pycache__/conftest.cpython-35.pyc
deleted file mode 100644
index eec1665..0000000
--- a/testing/__pycache__/conftest.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/__pycache__/test_iniconfig.cpython-27-PYTEST.pyc b/testing/__pycache__/test_iniconfig.cpython-27-PYTEST.pyc
deleted file mode 100644
index aa82c34..0000000
--- a/testing/__pycache__/test_iniconfig.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/__pycache__/test_iniconfig.cpython-35-PYTEST.pyc b/testing/__pycache__/test_iniconfig.cpython-35-PYTEST.pyc
deleted file mode 100644
index f6b804c..0000000
--- a/testing/__pycache__/test_iniconfig.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_assertion.cpython-27-PYTEST.pyc b/testing/code/__pycache__/test_assertion.cpython-27-PYTEST.pyc
deleted file mode 100644
index bf15428..0000000
--- a/testing/code/__pycache__/test_assertion.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_assertion.cpython-35-PYTEST.pyc b/testing/code/__pycache__/test_assertion.cpython-35-PYTEST.pyc
deleted file mode 100644
index efcf73c..0000000
--- a/testing/code/__pycache__/test_assertion.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_code.cpython-27-PYTEST.pyc b/testing/code/__pycache__/test_code.cpython-27-PYTEST.pyc
deleted file mode 100644
index 0ce9c7b..0000000
--- a/testing/code/__pycache__/test_code.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_code.cpython-35-PYTEST.pyc b/testing/code/__pycache__/test_code.cpython-35-PYTEST.pyc
deleted file mode 100644
index c543cec..0000000
--- a/testing/code/__pycache__/test_code.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_excinfo.cpython-27-PYTEST.pyc b/testing/code/__pycache__/test_excinfo.cpython-27-PYTEST.pyc
deleted file mode 100644
index a966d10..0000000
--- a/testing/code/__pycache__/test_excinfo.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_excinfo.cpython-35-PYTEST.pyc b/testing/code/__pycache__/test_excinfo.cpython-35-PYTEST.pyc
deleted file mode 100644
index 148a59c..0000000
--- a/testing/code/__pycache__/test_excinfo.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_source.cpython-27-PYTEST.pyc b/testing/code/__pycache__/test_source.cpython-27-PYTEST.pyc
deleted file mode 100644
index 2643849..0000000
--- a/testing/code/__pycache__/test_source.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/__pycache__/test_source.cpython-35-PYTEST.pyc b/testing/code/__pycache__/test_source.cpython-35-PYTEST.pyc
deleted file mode 100644
index 053e995..0000000
--- a/testing/code/__pycache__/test_source.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/code/test_assertion.py b/testing/code/test_assertion.py
index e2154d0..5e02079 100644
--- a/testing/code/test_assertion.py
+++ b/testing/code/test_assertion.py
@@ -1,308 +1,308 @@
-import pytest, py
-
-def exvalue():
- return py.std.sys.exc_info()[1]
-
-def f():
- return 2
-
-def test_assert():
- try:
- assert f() == 3
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith('assert 2 == 3\n')
-
-
-def test_assert_within_finally():
- excinfo = py.test.raises(ZeroDivisionError, """
- try:
- 1/0
- finally:
- i = 42
- """)
- s = excinfo.exconly()
- assert py.std.re.search("division.+by zero", s) is not None
-
- #def g():
- # A.f()
- #excinfo = getexcinfo(TypeError, g)
- #msg = getmsg(excinfo)
- #assert msg.find("must be called with A") != -1
-
-
-def test_assert_multiline_1():
- try:
- assert (f() ==
- 3)
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith('assert 2 == 3\n')
-
-def test_assert_multiline_2():
- try:
- assert (f() == (4,
- 3)[-1])
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith('assert 2 ==')
-
-def test_in():
- try:
- assert "hi" in [1, 2]
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith("assert 'hi' in")
-
-def test_is():
- try:
- assert 1 is 2
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith("assert 1 is 2")
-
-
-@py.test.mark.skipif("sys.version_info < (2,6)")
-def test_attrib():
- class Foo(object):
- b = 1
- i = Foo()
- try:
- assert i.b == 2
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith("assert 1 == 2")
-
-@py.test.mark.skipif("sys.version_info < (2,6)")
-def test_attrib_inst():
- class Foo(object):
- b = 1
- try:
- assert Foo().b == 2
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith("assert 1 == 2")
-
-def test_len():
- l = list(range(42))
- try:
- assert len(l) == 100
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert s.startswith("assert 42 == 100")
- assert "where 42 = len([" in s
-
-
-def test_assert_keyword_arg():
- def f(x=3):
- return False
- try:
- assert f(x=5)
- except AssertionError:
- e = exvalue()
- assert "x=5" in e.msg
-
-# These tests should both fail, but should fail nicely...
-class WeirdRepr:
- def __repr__(self):
- return '<WeirdRepr\nsecond line>'
-
-def bug_test_assert_repr():
- v = WeirdRepr()
- try:
- assert v == 1
- except AssertionError:
- e = exvalue()
- assert e.msg.find('WeirdRepr') != -1
- assert e.msg.find('second line') != -1
- assert 0
-
-def test_assert_non_string():
- try:
- assert 0, ['list']
- except AssertionError:
- e = exvalue()
- assert e.msg.find("list") != -1
-
-def test_assert_implicit_multiline():
- try:
- x = [1,2,3]
- assert x != [1,
- 2, 3]
- except AssertionError:
- e = exvalue()
- assert e.msg.find('assert [1, 2, 3] !=') != -1
-
-
-def test_assert_with_brokenrepr_arg():
- class BrokenRepr:
- def __repr__(self): 0 / 0
- e = AssertionError(BrokenRepr())
- if e.msg.find("broken __repr__") == -1:
- py.test.fail("broken __repr__ not handle correctly")
-
-def test_multiple_statements_per_line():
- try:
- a = 1; assert a == 2
- except AssertionError:
- e = exvalue()
- assert "assert 1 == 2" in e.msg
-
-def test_power():
- try:
- assert 2**3 == 7
- except AssertionError:
- e = exvalue()
- assert "assert (2 ** 3) == 7" in e.msg
-
-
-class TestView:
-
- def setup_class(cls):
- cls.View = py.test.importorskip("py._code._assertionold").View
-
- def test_class_dispatch(self):
- ### Use a custom class hierarchy with existing instances
-
- class Picklable(self.View):
- pass
-
- class Simple(Picklable):
- __view__ = object
- def pickle(self):
- return repr(self.__obj__)
-
- class Seq(Picklable):
- __view__ = list, tuple, dict
- def pickle(self):
- return ';'.join(
- [Picklable(item).pickle() for item in self.__obj__])
-
- class Dict(Seq):
- __view__ = dict
- def pickle(self):
- return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
-
- assert Picklable(123).pickle() == '123'
- assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
- assert Picklable({1:2}).pickle() == '1!2'
-
- def test_viewtype_class_hierarchy(self):
- # Use a custom class hierarchy based on attributes of existing instances
- class Operation:
- "Existing class that I don't want to change."
- def __init__(self, opname, *args):
- self.opname = opname
- self.args = args
-
- existing = [Operation('+', 4, 5),
- Operation('getitem', '', 'join'),
- Operation('setattr', 'x', 'y', 3),
- Operation('-', 12, 1)]
-
- class PyOp(self.View):
- def __viewkey__(self):
- return self.opname
- def generate(self):
- return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
-
- class PyBinaryOp(PyOp):
- __view__ = ('+', '-', '*', '/')
- def generate(self):
- return '%s %s %s' % (self.args[0], self.opname, self.args[1])
-
- codelines = [PyOp(op).generate() for op in existing]
- assert codelines == ["4 + 5", "getitem('', 'join')",
- "setattr('x', 'y', 3)", "12 - 1"]
-
-def test_underscore_api():
- py.code._AssertionError
- py.code._reinterpret_old # used by pypy
- py.code._reinterpret
-
-@py.test.mark.skipif("sys.version_info < (2,6)")
-def test_assert_customizable_reprcompare(monkeypatch):
- util = pytest.importorskip("_pytest.assertion.util")
- monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
- try:
- assert 3 == 4
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert "hello" in s
-
-def test_assert_long_source_1():
- try:
- assert len == [
- (None, ['somet text', 'more text']),
- ]
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert 're-run' not in s
- assert 'somet text' in s
-
-def test_assert_long_source_2():
- try:
- assert(len == [
- (None, ['somet text', 'more text']),
- ])
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert 're-run' not in s
- assert 'somet text' in s
-
-def test_assert_raise_alias(testdir):
- testdir.makepyfile("""
- import sys
- EX = AssertionError
- def test_hello():
- raise EX("hello"
- "multi"
- "line")
- """)
- result = testdir.runpytest()
- result.stdout.fnmatch_lines([
- "*def test_hello*",
- "*raise EX*",
- "*1 failed*",
- ])
-
-
-@pytest.mark.skipif("sys.version_info < (2,5)")
-def test_assert_raise_subclass():
- class SomeEx(AssertionError):
- def __init__(self, *args):
- super(SomeEx, self).__init__()
- try:
- raise SomeEx("hello")
- except AssertionError:
- s = str(exvalue())
- assert 're-run' not in s
- assert 'could not determine' in s
-
-def test_assert_raises_in_nonzero_of_object_pytest_issue10():
- class A(object):
- def __nonzero__(self):
- raise ValueError(42)
- def __lt__(self, other):
- return A()
- def __repr__(self):
- return "<MY42 object>"
- def myany(x):
- return True
- try:
- assert not(myany(A() < 0))
- except AssertionError:
- e = exvalue()
- s = str(e)
- assert "<MY42 object> < 0" in s
+import pytest, py
+
+def exvalue():
+ return py.std.sys.exc_info()[1]
+
+def f():
+ return 2
+
+def test_assert():
+ try:
+ assert f() == 3
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 == 3\n')
+
+
+def test_assert_within_finally():
+ excinfo = py.test.raises(ZeroDivisionError, """
+ try:
+ 1/0
+ finally:
+ i = 42
+ """)
+ s = excinfo.exconly()
+ assert py.std.re.search("division.+by zero", s) is not None
+
+ #def g():
+ # A.f()
+ #excinfo = getexcinfo(TypeError, g)
+ #msg = getmsg(excinfo)
+ #assert msg.find("must be called with A") != -1
+
+
+def test_assert_multiline_1():
+ try:
+ assert (f() ==
+ 3)
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 == 3\n')
+
+def test_assert_multiline_2():
+ try:
+ assert (f() == (4,
+ 3)[-1])
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 ==')
+
+def test_in():
+ try:
+ assert "hi" in [1, 2]
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 'hi' in")
+
+def test_is():
+ try:
+ assert 1 is 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 is 2")
+
+
+@py.test.mark.skipif("sys.version_info < (2,6)")
+def test_attrib():
+ class Foo(object):
+ b = 1
+ i = Foo()
+ try:
+ assert i.b == 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 == 2")
+
+@py.test.mark.skipif("sys.version_info < (2,6)")
+def test_attrib_inst():
+ class Foo(object):
+ b = 1
+ try:
+ assert Foo().b == 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 == 2")
+
+def test_len():
+ l = list(range(42))
+ try:
+ assert len(l) == 100
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 42 == 100")
+ assert "where 42 = len([" in s
+
+
+def test_assert_keyword_arg():
+ def f(x=3):
+ return False
+ try:
+ assert f(x=5)
+ except AssertionError:
+ e = exvalue()
+ assert "x=5" in str(e)
+
+# These tests should both fail, but should fail nicely...
+class WeirdRepr:
+ def __repr__(self):
+ return '<WeirdRepr\nsecond line>'
+
+def bug_test_assert_repr():
+ v = WeirdRepr()
+ try:
+ assert v == 1
+ except AssertionError:
+ e = exvalue()
+ assert str(e).find('WeirdRepr') != -1
+ assert str(e).find('second line') != -1
+ assert 0
+
+def test_assert_non_string():
+ try:
+ assert 0, ['list']
+ except AssertionError:
+ e = exvalue()
+ assert str(e).find("list") != -1
+
+def test_assert_implicit_multiline():
+ try:
+ x = [1,2,3]
+ assert x != [1,
+ 2, 3]
+ except AssertionError:
+ e = exvalue()
+ assert str(e).find('assert [1, 2, 3] !=') != -1
+
+
+def test_assert_with_brokenrepr_arg():
+ class BrokenRepr:
+ def __repr__(self): 0 / 0
+ e = AssertionError(BrokenRepr())
+ if e.msg.find("broken __repr__") == -1:
+ py.test.fail("broken __repr__ not handle correctly")
+
+def test_multiple_statements_per_line():
+ try:
+ a = 1; assert a == 2
+ except AssertionError:
+ e = exvalue()
+ assert "assert 1 == 2" in str(e)
+
+def test_power():
+ try:
+ assert 2**3 == 7
+ except AssertionError:
+ e = exvalue()
+ assert "assert (2 ** 3) == 7" in str(e)
+
+
+class TestView:
+
+ def setup_class(cls):
+ cls.View = py.test.importorskip("py._code._assertionold").View
+
+ def test_class_dispatch(self):
+ ### Use a custom class hierarchy with existing instances
+
+ class Picklable(self.View):
+ pass
+
+ class Simple(Picklable):
+ __view__ = object
+ def pickle(self):
+ return repr(self.__obj__)
+
+ class Seq(Picklable):
+ __view__ = list, tuple, dict
+ def pickle(self):
+ return ';'.join(
+ [Picklable(item).pickle() for item in self.__obj__])
+
+ class Dict(Seq):
+ __view__ = dict
+ def pickle(self):
+ return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
+
+ assert Picklable(123).pickle() == '123'
+ assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
+ assert Picklable({1:2}).pickle() == '1!2'
+
+ def test_viewtype_class_hierarchy(self):
+ # Use a custom class hierarchy based on attributes of existing instances
+ class Operation:
+ "Existing class that I don't want to change."
+ def __init__(self, opname, *args):
+ self.opname = opname
+ self.args = args
+
+ existing = [Operation('+', 4, 5),
+ Operation('getitem', '', 'join'),
+ Operation('setattr', 'x', 'y', 3),
+ Operation('-', 12, 1)]
+
+ class PyOp(self.View):
+ def __viewkey__(self):
+ return self.opname
+ def generate(self):
+ return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
+
+ class PyBinaryOp(PyOp):
+ __view__ = ('+', '-', '*', '/')
+ def generate(self):
+ return '%s %s %s' % (self.args[0], self.opname, self.args[1])
+
+ codelines = [PyOp(op).generate() for op in existing]
+ assert codelines == ["4 + 5", "getitem('', 'join')",
+ "setattr('x', 'y', 3)", "12 - 1"]
+
+def test_underscore_api():
+ py.code._AssertionError
+ py.code._reinterpret_old # used by pypy
+ py.code._reinterpret
+
+@py.test.mark.skipif("sys.version_info < (2,6)")
+def test_assert_customizable_reprcompare(monkeypatch):
+ util = pytest.importorskip("_pytest.assertion.util")
+ monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
+ try:
+ assert 3 == 4
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert "hello" in s
+
+def test_assert_long_source_1():
+ try:
+ assert len == [
+ (None, ['somet text', 'more text']),
+ ]
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert 're-run' not in s
+ assert 'somet text' in s
+
+def test_assert_long_source_2():
+ try:
+ assert(len == [
+ (None, ['somet text', 'more text']),
+ ])
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert 're-run' not in s
+ assert 'somet text' in s
+
+def test_assert_raise_alias(testdir):
+ testdir.makepyfile("""
+ import sys
+ EX = AssertionError
+ def test_hello():
+ raise EX("hello"
+ "multi"
+ "line")
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*def test_hello*",
+ "*raise EX*",
+ "*1 failed*",
+ ])
+
+
+@pytest.mark.skipif("sys.version_info < (2,5)")
+def test_assert_raise_subclass():
+ class SomeEx(AssertionError):
+ def __init__(self, *args):
+ super(SomeEx, self).__init__()
+ try:
+ raise SomeEx("hello")
+ except AssertionError as e:
+ s = str(e)
+ assert 're-run' not in s
+ assert 'could not determine' in s
+
+def test_assert_raises_in_nonzero_of_object_pytest_issue10():
+ class A(object):
+ def __nonzero__(self):
+ raise ValueError(42)
+ def __lt__(self, other):
+ return A()
+ def __repr__(self):
+ return "<MY42 object>"
+ def myany(x):
+ return True
+ try:
+ assert not(myany(A() < 0))
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert "<MY42 object> < 0" in s
diff --git a/testing/code/test_code.py b/testing/code/test_code.py
index 28ec628..61e3ee7 100644
--- a/testing/code/test_code.py
+++ b/testing/code/test_code.py
@@ -1,159 +1,159 @@
-import py
-import sys
-
-def test_ne():
- code1 = py.code.Code(compile('foo = "bar"', '', 'exec'))
- assert code1 == code1
- code2 = py.code.Code(compile('foo = "baz"', '', 'exec'))
- assert code2 != code1
-
-def test_code_gives_back_name_for_not_existing_file():
- name = 'abc-123'
- co_code = compile("pass\n", name, 'exec')
- assert co_code.co_filename == name
- code = py.code.Code(co_code)
- assert str(code.path) == name
- assert code.fullsource is None
-
-def test_code_with_class():
- class A:
- pass
- py.test.raises(TypeError, "py.code.Code(A)")
-
-if True:
- def x():
- pass
-
-def test_code_fullsource():
- code = py.code.Code(x)
- full = code.fullsource
- assert 'test_code_fullsource()' in str(full)
-
-def test_code_source():
- code = py.code.Code(x)
- src = code.source()
- expected = """def x():
- pass"""
- assert str(src) == expected
-
-def test_frame_getsourcelineno_myself():
- def func():
- return sys._getframe(0)
- f = func()
- f = py.code.Frame(f)
- source, lineno = f.code.fullsource, f.lineno
- assert source[lineno].startswith(" return sys._getframe(0)")
-
-def test_getstatement_empty_fullsource():
- def func():
- return sys._getframe(0)
- f = func()
- f = py.code.Frame(f)
- prop = f.code.__class__.fullsource
- try:
- f.code.__class__.fullsource = None
- assert f.statement == py.code.Source("")
- finally:
- f.code.__class__.fullsource = prop
-
-def test_code_from_func():
- co = py.code.Code(test_frame_getsourcelineno_myself)
- assert co.firstlineno
- assert co.path
-
-
-
-def test_builtin_patch_unpatch(monkeypatch):
- cpy_builtin = py.builtin.builtins
- comp = cpy_builtin.compile
- def mycompile(*args, **kwargs):
- return comp(*args, **kwargs)
- class Sub(AssertionError):
- pass
- monkeypatch.setattr(cpy_builtin, 'AssertionError', Sub)
- monkeypatch.setattr(cpy_builtin, 'compile', mycompile)
- py.code.patch_builtins()
- assert cpy_builtin.AssertionError != Sub
- assert cpy_builtin.compile != mycompile
- py.code.unpatch_builtins()
- assert cpy_builtin.AssertionError is Sub
- assert cpy_builtin.compile == mycompile
-
-
-def test_unicode_handling():
- value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
- def f():
- raise Exception(value)
- excinfo = py.test.raises(Exception, f)
- s = str(excinfo)
- if sys.version_info[0] < 3:
- u = unicode(excinfo)
-
-def test_code_getargs():
- def f1(x):
- pass
- c1 = py.code.Code(f1)
- assert c1.getargs(var=True) == ('x',)
-
- def f2(x, *y):
- pass
- c2 = py.code.Code(f2)
- assert c2.getargs(var=True) == ('x', 'y')
-
- def f3(x, **z):
- pass
- c3 = py.code.Code(f3)
- assert c3.getargs(var=True) == ('x', 'z')
-
- def f4(x, *y, **z):
- pass
- c4 = py.code.Code(f4)
- assert c4.getargs(var=True) == ('x', 'y', 'z')
-
-
-def test_frame_getargs():
- def f1(x):
- return sys._getframe(0)
- fr1 = py.code.Frame(f1('a'))
- assert fr1.getargs(var=True) == [('x', 'a')]
-
- def f2(x, *y):
- return sys._getframe(0)
- fr2 = py.code.Frame(f2('a', 'b', 'c'))
- assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
-
- def f3(x, **z):
- return sys._getframe(0)
- fr3 = py.code.Frame(f3('a', b='c'))
- assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
-
- def f4(x, *y, **z):
- return sys._getframe(0)
- fr4 = py.code.Frame(f4('a', 'b', c='d'))
- assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
- ('z', {'c': 'd'})]
-
-
-class TestExceptionInfo:
-
- def test_bad_getsource(self):
- try:
- if False: pass
- else: assert False
- except AssertionError:
- exci = py.code.ExceptionInfo()
- assert exci.getrepr()
-
-
-class TestTracebackEntry:
-
- def test_getsource(self):
- try:
- if False: pass
- else: assert False
- except AssertionError:
- exci = py.code.ExceptionInfo()
- entry = exci.traceback[0]
- source = entry.getsource()
- assert len(source) == 4
- assert 'else: assert False' in source[3]
+import py
+import sys
+
+def test_ne():
+ code1 = py.code.Code(compile('foo = "bar"', '', 'exec'))
+ assert code1 == code1
+ code2 = py.code.Code(compile('foo = "baz"', '', 'exec'))
+ assert code2 != code1
+
+def test_code_gives_back_name_for_not_existing_file():
+ name = 'abc-123'
+ co_code = compile("pass\n", name, 'exec')
+ assert co_code.co_filename == name
+ code = py.code.Code(co_code)
+ assert str(code.path) == name
+ assert code.fullsource is None
+
+def test_code_with_class():
+ class A:
+ pass
+ py.test.raises(TypeError, "py.code.Code(A)")
+
+if True:
+ def x():
+ pass
+
+def test_code_fullsource():
+ code = py.code.Code(x)
+ full = code.fullsource
+ assert 'test_code_fullsource()' in str(full)
+
+def test_code_source():
+ code = py.code.Code(x)
+ src = code.source()
+ expected = """def x():
+ pass"""
+ assert str(src) == expected
+
+def test_frame_getsourcelineno_myself():
+ def func():
+ return sys._getframe(0)
+ f = func()
+ f = py.code.Frame(f)
+ source, lineno = f.code.fullsource, f.lineno
+ assert source[lineno].startswith(" return sys._getframe(0)")
+
+def test_getstatement_empty_fullsource():
+ def func():
+ return sys._getframe(0)
+ f = func()
+ f = py.code.Frame(f)
+ prop = f.code.__class__.fullsource
+ try:
+ f.code.__class__.fullsource = None
+ assert f.statement == py.code.Source("")
+ finally:
+ f.code.__class__.fullsource = prop
+
+def test_code_from_func():
+ co = py.code.Code(test_frame_getsourcelineno_myself)
+ assert co.firstlineno
+ assert co.path
+
+
+
+def test_builtin_patch_unpatch(monkeypatch):
+ cpy_builtin = py.builtin.builtins
+ comp = cpy_builtin.compile
+ def mycompile(*args, **kwargs):
+ return comp(*args, **kwargs)
+ class Sub(AssertionError):
+ pass
+ monkeypatch.setattr(cpy_builtin, 'AssertionError', Sub)
+ monkeypatch.setattr(cpy_builtin, 'compile', mycompile)
+ py.code.patch_builtins()
+ assert cpy_builtin.AssertionError != Sub
+ assert cpy_builtin.compile != mycompile
+ py.code.unpatch_builtins()
+ assert cpy_builtin.AssertionError is Sub
+ assert cpy_builtin.compile == mycompile
+
+
+def test_unicode_handling():
+ value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
+ def f():
+ raise Exception(value)
+ excinfo = py.test.raises(Exception, f)
+ s = str(excinfo)
+ if sys.version_info[0] < 3:
+ u = unicode(excinfo)
+
+def test_code_getargs():
+ def f1(x):
+ pass
+ c1 = py.code.Code(f1)
+ assert c1.getargs(var=True) == ('x',)
+
+ def f2(x, *y):
+ pass
+ c2 = py.code.Code(f2)
+ assert c2.getargs(var=True) == ('x', 'y')
+
+ def f3(x, **z):
+ pass
+ c3 = py.code.Code(f3)
+ assert c3.getargs(var=True) == ('x', 'z')
+
+ def f4(x, *y, **z):
+ pass
+ c4 = py.code.Code(f4)
+ assert c4.getargs(var=True) == ('x', 'y', 'z')
+
+
+def test_frame_getargs():
+ def f1(x):
+ return sys._getframe(0)
+ fr1 = py.code.Frame(f1('a'))
+ assert fr1.getargs(var=True) == [('x', 'a')]
+
+ def f2(x, *y):
+ return sys._getframe(0)
+ fr2 = py.code.Frame(f2('a', 'b', 'c'))
+ assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
+
+ def f3(x, **z):
+ return sys._getframe(0)
+ fr3 = py.code.Frame(f3('a', b='c'))
+ assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
+
+ def f4(x, *y, **z):
+ return sys._getframe(0)
+ fr4 = py.code.Frame(f4('a', 'b', c='d'))
+ assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
+ ('z', {'c': 'd'})]
+
+
+class TestExceptionInfo:
+
+ def test_bad_getsource(self):
+ try:
+ if False: pass
+ else: assert False
+ except AssertionError:
+ exci = py.code.ExceptionInfo()
+ assert exci.getrepr()
+
+
+class TestTracebackEntry:
+
+ def test_getsource(self):
+ try:
+ if False: pass
+ else: assert False
+ except AssertionError:
+ exci = py.code.ExceptionInfo()
+ entry = exci.traceback[0]
+ source = entry.getsource()
+ assert len(source) == 4
+ assert 'else: assert False' in source[3]
diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py
index 8f19f65..2008309 100644
--- a/testing/code/test_excinfo.py
+++ b/testing/code/test_excinfo.py
@@ -1,915 +1,915 @@
-# -*- coding: utf-8 -*-
-
-import py
-from py._code.code import FormattedExcinfo, ReprExceptionInfo
-queue = py.builtin._tryimport('queue', 'Queue')
-
-failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
-from test_source import astonly
-
-try:
- import importlib
-except ImportError:
- invalidate_import_caches = None
-else:
- invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
-
-import pytest
-pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
-
-class TWMock:
- def __init__(self):
- self.lines = []
- def sep(self, sep, line=None):
- self.lines.append((sep, line))
- def line(self, line, **kw):
- self.lines.append(line)
- def markup(self, text, **kw):
- return text
-
- fullwidth = 80
-
-def test_excinfo_simple():
- try:
- raise ValueError
- except ValueError:
- info = py.code.ExceptionInfo()
- assert info.type == ValueError
-
-def test_excinfo_getstatement():
- def g():
- raise ValueError
- def f():
- g()
- try:
- f()
- except ValueError:
- excinfo = py.code.ExceptionInfo()
- linenumbers = [py.code.getrawcode(f).co_firstlineno-1+3,
- py.code.getrawcode(f).co_firstlineno-1+1,
- py.code.getrawcode(g).co_firstlineno-1+1,]
- l = list(excinfo.traceback)
- foundlinenumbers = [x.lineno for x in l]
- assert foundlinenumbers == linenumbers
- #for x in info:
- # print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
- #xxx
-
-# testchain for getentries test below
-def f():
- #
- raise ValueError
- #
-def g():
- #
- __tracebackhide__ = True
- f()
- #
-def h():
- #
- g()
- #
-
-class TestTraceback_f_g_h:
- def setup_method(self, method):
- try:
- h()
- except ValueError:
- self.excinfo = py.code.ExceptionInfo()
-
- def test_traceback_entries(self):
- tb = self.excinfo.traceback
- entries = list(tb)
- assert len(tb) == 4 # maybe fragile test
- assert len(entries) == 4 # maybe fragile test
- names = ['f', 'g', 'h']
- for entry in entries:
- try:
- names.remove(entry.frame.code.name)
- except ValueError:
- pass
- assert not names
-
- def test_traceback_entry_getsource(self):
- tb = self.excinfo.traceback
- s = str(tb[-1].getsource() )
- assert s.startswith("def f():")
- assert s.endswith("raise ValueError")
-
- @astonly
- @failsonjython
- def test_traceback_entry_getsource_in_construct(self):
- source = py.code.Source("""\
- def xyz():
- try:
- raise ValueError
- except somenoname:
- pass
- xyz()
- """)
- try:
- exec (source.compile())
- except NameError:
- tb = py.code.ExceptionInfo().traceback
- print (tb[-1].getsource())
- s = str(tb[-1].getsource())
- assert s.startswith("def xyz():\n try:")
- assert s.strip().endswith("except somenoname:")
-
- def test_traceback_cut(self):
- co = py.code.Code(f)
- path, firstlineno = co.path, co.firstlineno
- traceback = self.excinfo.traceback
- newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
- assert len(newtraceback) == 1
- newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
- assert len(newtraceback) == 1
-
- def test_traceback_cut_excludepath(self, testdir):
- p = testdir.makepyfile("def f(): raise ValueError")
- excinfo = py.test.raises(ValueError, "p.pyimport().f()")
- basedir = py.path.local(py.test.__file__).dirpath()
- newtraceback = excinfo.traceback.cut(excludepath=basedir)
- for x in newtraceback:
- if hasattr(x, 'path'):
- assert not py.path.local(x.path).relto(basedir)
- assert newtraceback[-1].frame.code.path == p
-
- def test_traceback_filter(self):
- traceback = self.excinfo.traceback
- ntraceback = traceback.filter()
- assert len(ntraceback) == len(traceback) - 1
-
- def test_traceback_recursion_index(self):
- def f(n):
- if n < 10:
- n += 1
- f(n)
- excinfo = py.test.raises(RuntimeError, f, 8)
- traceback = excinfo.traceback
- recindex = traceback.recursionindex()
- assert recindex == 3
-
- def test_traceback_only_specific_recursion_errors(self, monkeypatch):
- def f(n):
- if n == 0:
- raise RuntimeError("hello")
- f(n-1)
-
- excinfo = pytest.raises(RuntimeError, f, 100)
- monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
- repr = excinfo.getrepr()
- assert "RuntimeError: hello" in str(repr.reprcrash)
-
- def test_traceback_no_recursion_index(self):
- def do_stuff():
- raise RuntimeError
- def reraise_me():
- import sys
- exc, val, tb = sys.exc_info()
- py.builtin._reraise(exc, val, tb)
- def f(n):
- try:
- do_stuff()
- except:
- reraise_me()
- excinfo = py.test.raises(RuntimeError, f, 8)
- traceback = excinfo.traceback
- recindex = traceback.recursionindex()
- assert recindex is None
-
- def test_traceback_messy_recursion(self):
- #XXX: simplified locally testable version
- decorator = py.test.importorskip('decorator').decorator
-
- def log(f, *k, **kw):
- print('%s %s' % (k, kw))
- f(*k, **kw)
- log = decorator(log)
-
- def fail():
- raise ValueError('')
-
- fail = log(log(fail))
-
- excinfo = py.test.raises(ValueError, fail)
- assert excinfo.traceback.recursionindex() is None
-
-
-
- def test_traceback_getcrashentry(self):
- def i():
- __tracebackhide__ = True
- raise ValueError
- def h():
- i()
- def g():
- __tracebackhide__ = True
- h()
- def f():
- g()
-
- excinfo = py.test.raises(ValueError, f)
- tb = excinfo.traceback
- entry = tb.getcrashentry()
- co = py.code.Code(h)
- assert entry.frame.code.path == co.path
- assert entry.lineno == co.firstlineno + 1
- assert entry.frame.code.name == 'h'
-
- def test_traceback_getcrashentry_empty(self):
- def g():
- __tracebackhide__ = True
- raise ValueError
- def f():
- __tracebackhide__ = True
- g()
-
- excinfo = py.test.raises(ValueError, f)
- tb = excinfo.traceback
- entry = tb.getcrashentry()
- co = py.code.Code(g)
- assert entry.frame.code.path == co.path
- assert entry.lineno == co.firstlineno + 2
- assert entry.frame.code.name == 'g'
-
-def hello(x):
- x + 5
-
-def test_tbentry_reinterpret():
- try:
- hello("hello")
- except TypeError:
- excinfo = py.code.ExceptionInfo()
- tbentry = excinfo.traceback[-1]
- msg = tbentry.reinterpret()
- assert msg.startswith("TypeError: ('hello' + 5)")
-
-def test_excinfo_exconly():
- excinfo = py.test.raises(ValueError, h)
- assert excinfo.exconly().startswith('ValueError')
- excinfo = py.test.raises(ValueError,
- "raise ValueError('hello\\nworld')")
- msg = excinfo.exconly(tryshort=True)
- assert msg.startswith('ValueError')
- assert msg.endswith("world")
-
-def test_excinfo_repr():
- excinfo = py.test.raises(ValueError, h)
- s = repr(excinfo)
- assert s == "<ExceptionInfo ValueError tblen=4>"
-
-def test_excinfo_str():
- excinfo = py.test.raises(ValueError, h)
- s = str(excinfo)
- assert s.startswith(__file__[:-9]) # pyc file and $py.class
- assert s.endswith("ValueError")
- assert len(s.split(":")) >= 3 # on windows it's 4
-
-def test_excinfo_errisinstance():
- excinfo = py.test.raises(ValueError, h)
- assert excinfo.errisinstance(ValueError)
-
-def test_excinfo_no_sourcecode():
- try:
- exec ("raise ValueError()")
- except ValueError:
- excinfo = py.code.ExceptionInfo()
- s = str(excinfo.traceback[-1])
- if py.std.sys.version_info < (2,5):
- assert s == " File '<string>':1 in ?\n ???\n"
- else:
- assert s == " File '<string>':1 in <module>\n ???\n"
-
-def test_excinfo_no_python_sourcecode(tmpdir):
- #XXX: simplified locally testable version
- tmpdir.join('test.txt').write("{{ h()}}:")
-
- jinja2 = py.test.importorskip('jinja2')
- loader = jinja2.FileSystemLoader(str(tmpdir))
- env = jinja2.Environment(loader=loader)
- template = env.get_template('test.txt')
- excinfo = py.test.raises(ValueError,
- template.render, h=h)
- for item in excinfo.traceback:
- print(item) #XXX: for some reason jinja.Template.render is printed in full
- item.source # shouldnt fail
- if item.path.basename == 'test.txt':
- assert str(item.source) == '{{ h()}}:'
-
-
-def test_entrysource_Queue_example():
- try:
- queue.Queue().get(timeout=0.001)
- except queue.Empty:
- excinfo = py.code.ExceptionInfo()
- entry = excinfo.traceback[-1]
- source = entry.getsource()
- assert source is not None
- s = str(source).strip()
- assert s.startswith("def get")
-
-def test_codepath_Queue_example():
- try:
- queue.Queue().get(timeout=0.001)
- except queue.Empty:
- excinfo = py.code.ExceptionInfo()
- entry = excinfo.traceback[-1]
- path = entry.path
- assert isinstance(path, py.path.local)
- assert path.basename.lower() == "queue.py"
- assert path.check()
-
-class TestFormattedExcinfo:
- def pytest_funcarg__importasmod(self, request):
- def importasmod(source):
- source = py.code.Source(source)
- tmpdir = request.getfuncargvalue("tmpdir")
- modpath = tmpdir.join("mod.py")
- tmpdir.ensure("__init__.py")
- modpath.write(source)
- if invalidate_import_caches is not None:
- invalidate_import_caches()
- return modpath.pyimport()
- return importasmod
-
- def excinfo_from_exec(self, source):
- source = py.code.Source(source).strip()
- try:
- exec (source.compile())
- except KeyboardInterrupt:
- raise
- except:
- return py.code.ExceptionInfo()
- assert 0, "did not raise"
-
- def test_repr_source(self):
- pr = FormattedExcinfo()
- source = py.code.Source("""
- def f(x):
- pass
- """).strip()
- pr.flow_marker = "|"
- lines = pr.get_source(source, 0)
- assert len(lines) == 2
- assert lines[0] == "| def f(x):"
- assert lines[1] == " pass"
-
- def test_repr_source_excinfo(self):
- """ check if indentation is right """
- pr = FormattedExcinfo()
- excinfo = self.excinfo_from_exec("""
- def f():
- assert 0
- f()
- """)
- pr = FormattedExcinfo()
- source = pr._getentrysource(excinfo.traceback[-1])
- lines = pr.get_source(source, 1, excinfo)
- assert lines == [
- ' def f():',
- '> assert 0',
- 'E assert 0'
- ]
-
-
- def test_repr_source_not_existing(self):
- pr = FormattedExcinfo()
- co = compile("raise ValueError()", "", "exec")
- try:
- exec (co)
- except ValueError:
- excinfo = py.code.ExceptionInfo()
- repr = pr.repr_excinfo(excinfo)
- assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
-
- def test_repr_many_line_source_not_existing(self):
- pr = FormattedExcinfo()
- co = compile("""
-a = 1
-raise ValueError()
-""", "", "exec")
- try:
- exec (co)
- except ValueError:
- excinfo = py.code.ExceptionInfo()
- repr = pr.repr_excinfo(excinfo)
- assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
-
- def test_repr_source_failing_fullsource(self):
- pr = FormattedExcinfo()
-
- class FakeCode(object):
- class raw:
- co_filename = '?'
- path = '?'
- firstlineno = 5
-
- def fullsource(self):
- return None
- fullsource = property(fullsource)
-
- class FakeFrame(object):
- code = FakeCode()
- f_locals = {}
- f_globals = {}
-
- class FakeTracebackEntry(py.code.Traceback.Entry):
- def __init__(self, tb):
- self.lineno = 5+3
-
- @property
- def frame(self):
- return FakeFrame()
-
- class Traceback(py.code.Traceback):
- Entry = FakeTracebackEntry
-
- class FakeExcinfo(py.code.ExceptionInfo):
- typename = "Foo"
- def __init__(self):
- pass
-
- def exconly(self, tryshort):
- return "EXC"
- def errisinstance(self, cls):
- return False
-
- excinfo = FakeExcinfo()
- class FakeRawTB(object):
- tb_next = None
- tb = FakeRawTB()
- excinfo.traceback = Traceback(tb)
-
- fail = IOError()
- repr = pr.repr_excinfo(excinfo)
- assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
-
- fail = py.error.ENOENT
- repr = pr.repr_excinfo(excinfo)
- assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
-
-
- def test_repr_local(self):
- p = FormattedExcinfo(showlocals=True)
- loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
- reprlocals = p.repr_locals(loc)
- assert reprlocals.lines
- assert reprlocals.lines[0] == '__builtins__ = <builtins>'
- assert reprlocals.lines[1] == 'x = 3'
- assert reprlocals.lines[2] == 'y = 5'
- assert reprlocals.lines[3] == 'z = 7'
-
- def test_repr_tracebackentry_lines(self, importasmod):
- mod = importasmod("""
- def func1():
- raise ValueError("hello\\nworld")
- """)
- excinfo = py.test.raises(ValueError, mod.func1)
- excinfo.traceback = excinfo.traceback.filter()
- p = FormattedExcinfo()
- reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
-
- # test as intermittent entry
- lines = reprtb.lines
- assert lines[0] == ' def func1():'
- assert lines[1] == '> raise ValueError("hello\\nworld")'
-
- # test as last entry
- p = FormattedExcinfo(showlocals=True)
- repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
- lines = repr_entry.lines
- assert lines[0] == ' def func1():'
- assert lines[1] == '> raise ValueError("hello\\nworld")'
- assert lines[2] == 'E ValueError: hello'
- assert lines[3] == 'E world'
- assert not lines[4:]
-
- loc = repr_entry.reprlocals is not None
- loc = repr_entry.reprfileloc
- assert loc.path == mod.__file__
- assert loc.lineno == 3
- #assert loc.message == "ValueError: hello"
-
- def test_repr_tracebackentry_lines(self, importasmod):
- mod = importasmod("""
- def func1(m, x, y, z):
- raise ValueError("hello\\nworld")
- """)
- excinfo = py.test.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
- excinfo.traceback = excinfo.traceback.filter()
- entry = excinfo.traceback[-1]
- p = FormattedExcinfo(funcargs=True)
- reprfuncargs = p.repr_args(entry)
- assert reprfuncargs.args[0] == ('m', repr("m"*90))
- assert reprfuncargs.args[1] == ('x', '5')
- assert reprfuncargs.args[2] == ('y', '13')
- assert reprfuncargs.args[3] == ('z', repr("z" * 120))
-
- p = FormattedExcinfo(funcargs=True)
- repr_entry = p.repr_traceback_entry(entry)
- assert repr_entry.reprfuncargs.args == reprfuncargs.args
- tw = TWMock()
- repr_entry.toterminal(tw)
- assert tw.lines[0] == "m = " + repr('m' * 90)
- assert tw.lines[1] == "x = 5, y = 13"
- assert tw.lines[2] == "z = " + repr('z' * 120)
-
- def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
- mod = importasmod("""
- def func1(x, *y, **z):
- raise ValueError("hello\\nworld")
- """)
- excinfo = py.test.raises(ValueError, mod.func1, 'a', 'b', c='d')
- excinfo.traceback = excinfo.traceback.filter()
- entry = excinfo.traceback[-1]
- p = FormattedExcinfo(funcargs=True)
- reprfuncargs = p.repr_args(entry)
- assert reprfuncargs.args[0] == ('x', repr('a'))
- assert reprfuncargs.args[1] == ('y', repr(('b',)))
- assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
-
- p = FormattedExcinfo(funcargs=True)
- repr_entry = p.repr_traceback_entry(entry)
- assert repr_entry.reprfuncargs.args == reprfuncargs.args
- tw = TWMock()
- repr_entry.toterminal(tw)
- assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
-
- def test_repr_tracebackentry_short(self, importasmod):
- mod = importasmod("""
- def func1():
- raise ValueError("hello")
- def entry():
- func1()
- """)
- excinfo = py.test.raises(ValueError, mod.entry)
- p = FormattedExcinfo(style="short")
- reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
- lines = reprtb.lines
- basename = py.path.local(mod.__file__).basename
- assert lines[0] == ' func1()'
- assert basename in str(reprtb.reprfileloc.path)
- assert reprtb.reprfileloc.lineno == 5
-
- # test last entry
- p = FormattedExcinfo(style="short")
- reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
- lines = reprtb.lines
- assert lines[0] == ' raise ValueError("hello")'
- assert lines[1] == 'E ValueError: hello'
- assert basename in str(reprtb.reprfileloc.path)
- assert reprtb.reprfileloc.lineno == 3
-
- def test_repr_tracebackentry_no(self, importasmod):
- mod = importasmod("""
- def func1():
- raise ValueError("hello")
- def entry():
- func1()
- """)
- excinfo = py.test.raises(ValueError, mod.entry)
- p = FormattedExcinfo(style="no")
- p.repr_traceback_entry(excinfo.traceback[-2])
-
- p = FormattedExcinfo(style="no")
- reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
- lines = reprentry.lines
- assert lines[0] == 'E ValueError: hello'
- assert not lines[1:]
-
- def test_repr_traceback_tbfilter(self, importasmod):
- mod = importasmod("""
- def f(x):
- raise ValueError(x)
- def entry():
- f(0)
- """)
- excinfo = py.test.raises(ValueError, mod.entry)
- p = FormattedExcinfo(tbfilter=True)
- reprtb = p.repr_traceback(excinfo)
- assert len(reprtb.reprentries) == 2
- p = FormattedExcinfo(tbfilter=False)
- reprtb = p.repr_traceback(excinfo)
- assert len(reprtb.reprentries) == 3
-
- def test_traceback_short_no_source(self, importasmod, monkeypatch):
- mod = importasmod("""
- def func1():
- raise ValueError("hello")
- def entry():
- func1()
- """)
- try:
- mod.entry()
- except ValueError:
- excinfo = py.code.ExceptionInfo()
- from py._code.code import Code
- monkeypatch.setattr(Code, 'path', 'bogus')
- excinfo.traceback[0].frame.code.path = "bogus"
- p = FormattedExcinfo(style="short")
- reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
- lines = reprtb.lines
- last_p = FormattedExcinfo(style="short")
- last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
- last_lines = last_reprtb.lines
- monkeypatch.undo()
- basename = py.path.local(mod.__file__).basename
- assert lines[0] == ' func1()'
-
- assert last_lines[0] == ' raise ValueError("hello")'
- assert last_lines[1] == 'E ValueError: hello'
-
- def test_repr_traceback_and_excinfo(self, importasmod):
- mod = importasmod("""
- def f(x):
- raise ValueError(x)
- def entry():
- f(0)
- """)
- excinfo = py.test.raises(ValueError, mod.entry)
-
- for style in ("long", "short"):
- p = FormattedExcinfo(style=style)
- reprtb = p.repr_traceback(excinfo)
- assert len(reprtb.reprentries) == 2
- assert reprtb.style == style
- assert not reprtb.extraline
- repr = p.repr_excinfo(excinfo)
- assert repr.reprtraceback
- assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
- assert repr.reprcrash.path.endswith("mod.py")
- assert repr.reprcrash.message == "ValueError: 0"
-
- def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
- mod = importasmod("""
- def f(x):
- raise ValueError(x)
- def entry():
- f(0)
- """)
- excinfo = py.test.raises(ValueError, mod.entry)
-
- p = FormattedExcinfo()
- def raiseos():
- raise OSError(2)
- monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
- assert p._makepath(__file__) == __file__
- reprtb = p.repr_traceback(excinfo)
-
- def test_repr_excinfo_addouterr(self, importasmod):
- mod = importasmod("""
- def entry():
- raise ValueError()
- """)
- excinfo = py.test.raises(ValueError, mod.entry)
- repr = excinfo.getrepr()
- repr.addsection("title", "content")
- twmock = TWMock()
- repr.toterminal(twmock)
- assert twmock.lines[-1] == "content"
- assert twmock.lines[-2] == ("-", "title")
-
- def test_repr_excinfo_reprcrash(self, importasmod):
- mod = importasmod("""
- def entry():
- raise ValueError()
- """)
- excinfo = py.test.raises(ValueError, mod.entry)
- repr = excinfo.getrepr()
- assert repr.reprcrash.path.endswith("mod.py")
- assert repr.reprcrash.lineno == 3
- assert repr.reprcrash.message == "ValueError"
- assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
-
- def test_repr_traceback_recursion(self, importasmod):
- mod = importasmod("""
- def rec2(x):
- return rec1(x+1)
- def rec1(x):
- return rec2(x-1)
- def entry():
- rec1(42)
- """)
- excinfo = py.test.raises(RuntimeError, mod.entry)
-
- for style in ("short", "long", "no"):
- p = FormattedExcinfo(style="short")
- reprtb = p.repr_traceback(excinfo)
- assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
- assert str(reprtb)
-
- def test_tb_entry_AssertionError(self, importasmod):
- # probably this test is a bit redundant
- # as py/magic/testing/test_assertion.py
- # already tests correctness of
- # assertion-reinterpretation logic
- mod = importasmod("""
- def somefunc():
- x = 1
- assert x == 2
- """)
- excinfo = py.test.raises(AssertionError, mod.somefunc)
-
- p = FormattedExcinfo()
- reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
- lines = reprentry.lines
- assert lines[-1] == "E assert 1 == 2"
-
- def test_reprexcinfo_getrepr(self, importasmod):
- mod = importasmod("""
- def f(x):
- raise ValueError(x)
- def entry():
- f(0)
- """)
- try:
- mod.entry()
- except ValueError:
- excinfo = py.code.ExceptionInfo()
-
- for style in ("short", "long", "no"):
- for showlocals in (True, False):
- repr = excinfo.getrepr(style=style, showlocals=showlocals)
- assert isinstance(repr, ReprExceptionInfo)
- assert repr.reprtraceback.style == style
-
- def test_reprexcinfo_unicode(self):
- from py._code.code import TerminalRepr
- class MyRepr(TerminalRepr):
- def toterminal(self, tw):
- tw.line(py.builtin._totext("я", "utf-8"))
- x = py.builtin._totext(MyRepr())
- assert x == py.builtin._totext("я", "utf-8")
-
- def test_toterminal_long(self, importasmod):
- mod = importasmod("""
- def g(x):
- raise ValueError(x)
- def f():
- g(3)
- """)
- excinfo = py.test.raises(ValueError, mod.f)
- excinfo.traceback = excinfo.traceback.filter()
- repr = excinfo.getrepr()
- tw = TWMock()
- repr.toterminal(tw)
- assert tw.lines[0] == ""
- tw.lines.pop(0)
- assert tw.lines[0] == " def f():"
- assert tw.lines[1] == "> g(3)"
- assert tw.lines[2] == ""
- assert tw.lines[3].endswith("mod.py:5: ")
- assert tw.lines[4] == ("_ ", None)
- assert tw.lines[5] == ""
- assert tw.lines[6] == " def g(x):"
- assert tw.lines[7] == "> raise ValueError(x)"
- assert tw.lines[8] == "E ValueError: 3"
- assert tw.lines[9] == ""
- assert tw.lines[10].endswith("mod.py:3: ValueError")
-
- def test_toterminal_long_missing_source(self, importasmod, tmpdir):
- mod = importasmod("""
- def g(x):
- raise ValueError(x)
- def f():
- g(3)
- """)
- excinfo = py.test.raises(ValueError, mod.f)
- tmpdir.join('mod.py').remove()
- excinfo.traceback = excinfo.traceback.filter()
- repr = excinfo.getrepr()
- tw = TWMock()
- repr.toterminal(tw)
- assert tw.lines[0] == ""
- tw.lines.pop(0)
- assert tw.lines[0] == "> ???"
- assert tw.lines[1] == ""
- assert tw.lines[2].endswith("mod.py:5: ")
- assert tw.lines[3] == ("_ ", None)
- assert tw.lines[4] == ""
- assert tw.lines[5] == "> ???"
- assert tw.lines[6] == "E ValueError: 3"
- assert tw.lines[7] == ""
- assert tw.lines[8].endswith("mod.py:3: ValueError")
-
- def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
- mod = importasmod("""
- def g(x):
- raise ValueError(x)
- def f():
- g(3)
- """)
- excinfo = py.test.raises(ValueError, mod.f)
- tmpdir.join('mod.py').write('asdf')
- excinfo.traceback = excinfo.traceback.filter()
- repr = excinfo.getrepr()
- tw = TWMock()
- repr.toterminal(tw)
- assert tw.lines[0] == ""
- tw.lines.pop(0)
- assert tw.lines[0] == "> ???"
- assert tw.lines[1] == ""
- assert tw.lines[2].endswith("mod.py:5: ")
- assert tw.lines[3] == ("_ ", None)
- assert tw.lines[4] == ""
- assert tw.lines[5] == "> ???"
- assert tw.lines[6] == "E ValueError: 3"
- assert tw.lines[7] == ""
- assert tw.lines[8].endswith("mod.py:3: ValueError")
-
- def test_toterminal_long_filenames(self, importasmod):
- mod = importasmod("""
- def f():
- raise ValueError()
- """)
- excinfo = py.test.raises(ValueError, mod.f)
- tw = TWMock()
- path = py.path.local(mod.__file__)
- old = path.dirpath().chdir()
- try:
- repr = excinfo.getrepr(abspath=False)
- repr.toterminal(tw)
- line = tw.lines[-1]
- x = py.path.local().bestrelpath(path)
- if len(x) < len(str(path)):
- assert line == "mod.py:3: ValueError"
-
- repr = excinfo.getrepr(abspath=True)
- repr.toterminal(tw)
- line = tw.lines[-1]
- assert line == "%s:3: ValueError" %(path,)
- finally:
- old.chdir()
-
- @py.test.mark.multi(reproptions=[
- {'style': style, 'showlocals': showlocals,
- 'funcargs': funcargs, 'tbfilter': tbfilter
- } for style in ("long", "short", "no")
- for showlocals in (True, False)
- for tbfilter in (True, False)
- for funcargs in (True, False)])
- def test_format_excinfo(self, importasmod, reproptions):
- mod = importasmod("""
- def g(x):
- raise ValueError(x)
- def f():
- g(3)
- """)
- excinfo = py.test.raises(ValueError, mod.f)
- tw = py.io.TerminalWriter(stringio=True)
- repr = excinfo.getrepr(**reproptions)
- repr.toterminal(tw)
- assert tw.stringio.getvalue()
-
-
- def test_native_style(self):
- excinfo = self.excinfo_from_exec("""
- assert 0
- """)
- repr = excinfo.getrepr(style='native')
- assert "assert 0" in str(repr.reprcrash)
- s = str(repr)
- assert s.startswith('Traceback (most recent call last):\n File')
- assert s.endswith('\nAssertionError: assert 0')
- assert 'exec (source.compile())' in s
- # python 2.4 fails to get the source line for the assert
- if py.std.sys.version_info >= (2, 5):
- assert s.count('assert 0') == 2
-
- def test_traceback_repr_style(self, importasmod):
- mod = importasmod("""
- def f():
- g()
- def g():
- h()
- def h():
- i()
- def i():
- raise ValueError()
- """)
- excinfo = py.test.raises(ValueError, mod.f)
- excinfo.traceback = excinfo.traceback.filter()
- excinfo.traceback[1].set_repr_style("short")
- excinfo.traceback[2].set_repr_style("short")
- r = excinfo.getrepr(style="long")
- tw = TWMock()
- r.toterminal(tw)
- for line in tw.lines: print (line)
- assert tw.lines[0] == ""
- assert tw.lines[1] == " def f():"
- assert tw.lines[2] == "> g()"
- assert tw.lines[3] == ""
- assert tw.lines[4].endswith("mod.py:3: ")
- assert tw.lines[5] == ("_ ", None)
- assert tw.lines[6].endswith("in g")
- assert tw.lines[7] == " h()"
- assert tw.lines[8].endswith("in h")
- assert tw.lines[9] == " i()"
- assert tw.lines[10] == ("_ ", None)
- assert tw.lines[11] == ""
- assert tw.lines[12] == " def i():"
- assert tw.lines[13] == "> raise ValueError()"
- assert tw.lines[14] == "E ValueError"
- assert tw.lines[15] == ""
- assert tw.lines[16].endswith("mod.py:9: ValueError")
+# -*- coding: utf-8 -*-
+
+import py
+from py._code.code import FormattedExcinfo, ReprExceptionInfo
+queue = py.builtin._tryimport('queue', 'Queue')
+
+failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
+from test_source import astonly
+
+try:
+ import importlib
+except ImportError:
+ invalidate_import_caches = None
+else:
+ invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
+
+import pytest
+pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
+
+class TWMock:
+ def __init__(self):
+ self.lines = []
+ def sep(self, sep, line=None):
+ self.lines.append((sep, line))
+ def line(self, line, **kw):
+ self.lines.append(line)
+ def markup(self, text, **kw):
+ return text
+
+ fullwidth = 80
+
+def test_excinfo_simple():
+ try:
+ raise ValueError
+ except ValueError:
+ info = py.code.ExceptionInfo()
+ assert info.type == ValueError
+
+def test_excinfo_getstatement():
+ def g():
+ raise ValueError
+ def f():
+ g()
+ try:
+ f()
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ linenumbers = [py.code.getrawcode(f).co_firstlineno-1+3,
+ py.code.getrawcode(f).co_firstlineno-1+1,
+ py.code.getrawcode(g).co_firstlineno-1+1,]
+ l = list(excinfo.traceback)
+ foundlinenumbers = [x.lineno for x in l]
+ assert foundlinenumbers == linenumbers
+ #for x in info:
+ # print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
+ #xxx
+
+# testchain for getentries test below
+def f():
+ #
+ raise ValueError
+ #
+def g():
+ #
+ __tracebackhide__ = True
+ f()
+ #
+def h():
+ #
+ g()
+ #
+
+class TestTraceback_f_g_h:
+ def setup_method(self, method):
+ try:
+ h()
+ except ValueError:
+ self.excinfo = py.code.ExceptionInfo()
+
+ def test_traceback_entries(self):
+ tb = self.excinfo.traceback
+ entries = list(tb)
+ assert len(tb) == 4 # maybe fragile test
+ assert len(entries) == 4 # maybe fragile test
+ names = ['f', 'g', 'h']
+ for entry in entries:
+ try:
+ names.remove(entry.frame.code.name)
+ except ValueError:
+ pass
+ assert not names
+
+ def test_traceback_entry_getsource(self):
+ tb = self.excinfo.traceback
+ s = str(tb[-1].getsource() )
+ assert s.startswith("def f():")
+ assert s.endswith("raise ValueError")
+
+ @astonly
+ @failsonjython
+ def test_traceback_entry_getsource_in_construct(self):
+ source = py.code.Source("""\
+ def xyz():
+ try:
+ raise ValueError
+ except somenoname:
+ pass
+ xyz()
+ """)
+ try:
+ exec (source.compile())
+ except NameError:
+ tb = py.code.ExceptionInfo().traceback
+ print (tb[-1].getsource())
+ s = str(tb[-1].getsource())
+ assert s.startswith("def xyz():\n try:")
+ assert s.strip().endswith("except somenoname:")
+
+ def test_traceback_cut(self):
+ co = py.code.Code(f)
+ path, firstlineno = co.path, co.firstlineno
+ traceback = self.excinfo.traceback
+ newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
+ assert len(newtraceback) == 1
+ newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
+ assert len(newtraceback) == 1
+
+ def test_traceback_cut_excludepath(self, testdir):
+ p = testdir.makepyfile("def f(): raise ValueError")
+ excinfo = py.test.raises(ValueError, "p.pyimport().f()")
+ basedir = py.path.local(py.test.__file__).dirpath()
+ newtraceback = excinfo.traceback.cut(excludepath=basedir)
+ for x in newtraceback:
+ if hasattr(x, 'path'):
+ assert not py.path.local(x.path).relto(basedir)
+ assert newtraceback[-1].frame.code.path == p
+
+ def test_traceback_filter(self):
+ traceback = self.excinfo.traceback
+ ntraceback = traceback.filter()
+ assert len(ntraceback) == len(traceback) - 1
+
+ def test_traceback_recursion_index(self):
+ def f(n):
+ if n < 10:
+ n += 1
+ f(n)
+ excinfo = py.test.raises(RuntimeError, f, 8)
+ traceback = excinfo.traceback
+ recindex = traceback.recursionindex()
+ assert recindex == 3
+
+ def test_traceback_only_specific_recursion_errors(self, monkeypatch):
+ def f(n):
+ if n == 0:
+ raise RuntimeError("hello")
+ f(n-1)
+
+ excinfo = pytest.raises(RuntimeError, f, 100)
+ monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
+ repr = excinfo.getrepr()
+ assert "RuntimeError: hello" in str(repr.reprcrash)
+
+ def test_traceback_no_recursion_index(self):
+ def do_stuff():
+ raise RuntimeError
+ def reraise_me():
+ import sys
+ exc, val, tb = sys.exc_info()
+ py.builtin._reraise(exc, val, tb)
+ def f(n):
+ try:
+ do_stuff()
+ except:
+ reraise_me()
+ excinfo = py.test.raises(RuntimeError, f, 8)
+ traceback = excinfo.traceback
+ recindex = traceback.recursionindex()
+ assert recindex is None
+
+ def test_traceback_messy_recursion(self):
+ #XXX: simplified locally testable version
+ decorator = py.test.importorskip('decorator').decorator
+
+ def log(f, *k, **kw):
+ print('%s %s' % (k, kw))
+ f(*k, **kw)
+ log = decorator(log)
+
+ def fail():
+ raise ValueError('')
+
+ fail = log(log(fail))
+
+ excinfo = py.test.raises(ValueError, fail)
+ assert excinfo.traceback.recursionindex() is None
+
+
+
+ def test_traceback_getcrashentry(self):
+ def i():
+ __tracebackhide__ = True
+ raise ValueError
+ def h():
+ i()
+ def g():
+ __tracebackhide__ = True
+ h()
+ def f():
+ g()
+
+ excinfo = py.test.raises(ValueError, f)
+ tb = excinfo.traceback
+ entry = tb.getcrashentry()
+ co = py.code.Code(h)
+ assert entry.frame.code.path == co.path
+ assert entry.lineno == co.firstlineno + 1
+ assert entry.frame.code.name == 'h'
+
+ def test_traceback_getcrashentry_empty(self):
+ def g():
+ __tracebackhide__ = True
+ raise ValueError
+ def f():
+ __tracebackhide__ = True
+ g()
+
+ excinfo = py.test.raises(ValueError, f)
+ tb = excinfo.traceback
+ entry = tb.getcrashentry()
+ co = py.code.Code(g)
+ assert entry.frame.code.path == co.path
+ assert entry.lineno == co.firstlineno + 2
+ assert entry.frame.code.name == 'g'
+
+def hello(x):
+ x + 5
+
+def test_tbentry_reinterpret():
+ try:
+ hello("hello")
+ except TypeError:
+ excinfo = py.code.ExceptionInfo()
+ tbentry = excinfo.traceback[-1]
+ msg = tbentry.reinterpret()
+ assert msg.startswith("TypeError: ('hello' + 5)")
+
+def test_excinfo_exconly():
+ excinfo = py.test.raises(ValueError, h)
+ assert excinfo.exconly().startswith('ValueError')
+ excinfo = py.test.raises(ValueError,
+ "raise ValueError('hello\\nworld')")
+ msg = excinfo.exconly(tryshort=True)
+ assert msg.startswith('ValueError')
+ assert msg.endswith("world")
+
+def test_excinfo_repr():
+ excinfo = py.test.raises(ValueError, h)
+ s = repr(excinfo)
+ assert s == "<ExceptionInfo ValueError tblen=4>"
+
+def test_excinfo_str():
+ excinfo = py.test.raises(ValueError, h)
+ s = str(excinfo)
+ assert s.startswith(__file__[:-9]) # pyc file and $py.class
+ assert s.endswith("ValueError")
+ assert len(s.split(":")) >= 3 # on windows it's 4
+
+def test_excinfo_errisinstance():
+ excinfo = py.test.raises(ValueError, h)
+ assert excinfo.errisinstance(ValueError)
+
+def test_excinfo_no_sourcecode():
+ try:
+ exec ("raise ValueError()")
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ s = str(excinfo.traceback[-1])
+ if py.std.sys.version_info < (2,5):
+ assert s == " File '<string>':1 in ?\n ???\n"
+ else:
+ assert s == " File '<string>':1 in <module>\n ???\n"
+
+def test_excinfo_no_python_sourcecode(tmpdir):
+ #XXX: simplified locally testable version
+ tmpdir.join('test.txt').write("{{ h()}}:")
+
+ jinja2 = py.test.importorskip('jinja2')
+ loader = jinja2.FileSystemLoader(str(tmpdir))
+ env = jinja2.Environment(loader=loader)
+ template = env.get_template('test.txt')
+ excinfo = py.test.raises(ValueError,
+ template.render, h=h)
+ for item in excinfo.traceback:
+ print(item) #XXX: for some reason jinja.Template.render is printed in full
+ item.source # shouldnt fail
+ if item.path.basename == 'test.txt':
+ assert str(item.source) == '{{ h()}}:'
+
+
+def test_entrysource_Queue_example():
+ try:
+ queue.Queue().get(timeout=0.001)
+ except queue.Empty:
+ excinfo = py.code.ExceptionInfo()
+ entry = excinfo.traceback[-1]
+ source = entry.getsource()
+ assert source is not None
+ s = str(source).strip()
+ assert s.startswith("def get")
+
+def test_codepath_Queue_example():
+ try:
+ queue.Queue().get(timeout=0.001)
+ except queue.Empty:
+ excinfo = py.code.ExceptionInfo()
+ entry = excinfo.traceback[-1]
+ path = entry.path
+ assert isinstance(path, py.path.local)
+ assert path.basename.lower() == "queue.py"
+ assert path.check()
+
+class TestFormattedExcinfo:
+ def pytest_funcarg__importasmod(self, request):
+ def importasmod(source):
+ source = py.code.Source(source)
+ tmpdir = request.getfuncargvalue("tmpdir")
+ modpath = tmpdir.join("mod.py")
+ tmpdir.ensure("__init__.py")
+ modpath.write(source)
+ if invalidate_import_caches is not None:
+ invalidate_import_caches()
+ return modpath.pyimport()
+ return importasmod
+
+ def excinfo_from_exec(self, source):
+ source = py.code.Source(source).strip()
+ try:
+ exec (source.compile())
+ except KeyboardInterrupt:
+ raise
+ except:
+ return py.code.ExceptionInfo()
+ assert 0, "did not raise"
+
+ def test_repr_source(self):
+ pr = FormattedExcinfo()
+ source = py.code.Source("""
+ def f(x):
+ pass
+ """).strip()
+ pr.flow_marker = "|"
+ lines = pr.get_source(source, 0)
+ assert len(lines) == 2
+ assert lines[0] == "| def f(x):"
+ assert lines[1] == " pass"
+
+ def test_repr_source_excinfo(self):
+ """ check if indentation is right """
+ pr = FormattedExcinfo()
+ excinfo = self.excinfo_from_exec("""
+ def f():
+ assert 0
+ f()
+ """)
+ pr = FormattedExcinfo()
+ source = pr._getentrysource(excinfo.traceback[-1])
+ lines = pr.get_source(source, 1, excinfo)
+ assert lines == [
+ ' def f():',
+ '> assert 0',
+ 'E assert 0'
+ ]
+
+
+ def test_repr_source_not_existing(self):
+ pr = FormattedExcinfo()
+ co = compile("raise ValueError()", "", "exec")
+ try:
+ exec (co)
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
+
+ def test_repr_many_line_source_not_existing(self):
+ pr = FormattedExcinfo()
+ co = compile("""
+a = 1
+raise ValueError()
+""", "", "exec")
+ try:
+ exec (co)
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
+
+ def test_repr_source_failing_fullsource(self):
+ pr = FormattedExcinfo()
+
+ class FakeCode(object):
+ class raw:
+ co_filename = '?'
+ path = '?'
+ firstlineno = 5
+
+ def fullsource(self):
+ return None
+ fullsource = property(fullsource)
+
+ class FakeFrame(object):
+ code = FakeCode()
+ f_locals = {}
+ f_globals = {}
+
+ class FakeTracebackEntry(py.code.Traceback.Entry):
+ def __init__(self, tb):
+ self.lineno = 5+3
+
+ @property
+ def frame(self):
+ return FakeFrame()
+
+ class Traceback(py.code.Traceback):
+ Entry = FakeTracebackEntry
+
+ class FakeExcinfo(py.code.ExceptionInfo):
+ typename = "Foo"
+ def __init__(self):
+ pass
+
+ def exconly(self, tryshort):
+ return "EXC"
+ def errisinstance(self, cls):
+ return False
+
+ excinfo = FakeExcinfo()
+ class FakeRawTB(object):
+ tb_next = None
+ tb = FakeRawTB()
+ excinfo.traceback = Traceback(tb)
+
+ fail = IOError()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
+
+ fail = py.error.ENOENT
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
+
+
+ def test_repr_local(self):
+ p = FormattedExcinfo(showlocals=True)
+ loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
+ reprlocals = p.repr_locals(loc)
+ assert reprlocals.lines
+ assert reprlocals.lines[0] == '__builtins__ = <builtins>'
+ assert reprlocals.lines[1] == 'x = 3'
+ assert reprlocals.lines[2] == 'y = 5'
+ assert reprlocals.lines[3] == 'z = 7'
+
+ def test_repr_tracebackentry_lines(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = py.test.raises(ValueError, mod.func1)
+ excinfo.traceback = excinfo.traceback.filter()
+ p = FormattedExcinfo()
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
+
+ # test as intermittent entry
+ lines = reprtb.lines
+ assert lines[0] == ' def func1():'
+ assert lines[1] == '> raise ValueError("hello\\nworld")'
+
+ # test as last entry
+ p = FormattedExcinfo(showlocals=True)
+ repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = repr_entry.lines
+ assert lines[0] == ' def func1():'
+ assert lines[1] == '> raise ValueError("hello\\nworld")'
+ assert lines[2] == 'E ValueError: hello'
+ assert lines[3] == 'E world'
+ assert not lines[4:]
+
+ loc = repr_entry.reprlocals is not None
+ loc = repr_entry.reprfileloc
+ assert loc.path == mod.__file__
+ assert loc.lineno == 3
+ #assert loc.message == "ValueError: hello"
+
+ def test_repr_tracebackentry_lines(self, importasmod):
+ mod = importasmod("""
+ def func1(m, x, y, z):
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = py.test.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
+ excinfo.traceback = excinfo.traceback.filter()
+ entry = excinfo.traceback[-1]
+ p = FormattedExcinfo(funcargs=True)
+ reprfuncargs = p.repr_args(entry)
+ assert reprfuncargs.args[0] == ('m', repr("m"*90))
+ assert reprfuncargs.args[1] == ('x', '5')
+ assert reprfuncargs.args[2] == ('y', '13')
+ assert reprfuncargs.args[3] == ('z', repr("z" * 120))
+
+ p = FormattedExcinfo(funcargs=True)
+ repr_entry = p.repr_traceback_entry(entry)
+ assert repr_entry.reprfuncargs.args == reprfuncargs.args
+ tw = TWMock()
+ repr_entry.toterminal(tw)
+ assert tw.lines[0] == "m = " + repr('m' * 90)
+ assert tw.lines[1] == "x = 5, y = 13"
+ assert tw.lines[2] == "z = " + repr('z' * 120)
+
+ def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
+ mod = importasmod("""
+ def func1(x, *y, **z):
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = py.test.raises(ValueError, mod.func1, 'a', 'b', c='d')
+ excinfo.traceback = excinfo.traceback.filter()
+ entry = excinfo.traceback[-1]
+ p = FormattedExcinfo(funcargs=True)
+ reprfuncargs = p.repr_args(entry)
+ assert reprfuncargs.args[0] == ('x', repr('a'))
+ assert reprfuncargs.args[1] == ('y', repr(('b',)))
+ assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
+
+ p = FormattedExcinfo(funcargs=True)
+ repr_entry = p.repr_traceback_entry(entry)
+ assert repr_entry.reprfuncargs.args == reprfuncargs.args
+ tw = TWMock()
+ repr_entry.toterminal(tw)
+ assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
+
+ def test_repr_tracebackentry_short(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
+ lines = reprtb.lines
+ basename = py.path.local(mod.__file__).basename
+ assert lines[0] == ' func1()'
+ assert basename in str(reprtb.reprfileloc.path)
+ assert reprtb.reprfileloc.lineno == 5
+
+ # test last entry
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprtb.lines
+ assert lines[0] == ' raise ValueError("hello")'
+ assert lines[1] == 'E ValueError: hello'
+ assert basename in str(reprtb.reprfileloc.path)
+ assert reprtb.reprfileloc.lineno == 3
+
+ def test_repr_tracebackentry_no(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(style="no")
+ p.repr_traceback_entry(excinfo.traceback[-2])
+
+ p = FormattedExcinfo(style="no")
+ reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprentry.lines
+ assert lines[0] == 'E ValueError: hello'
+ assert not lines[1:]
+
+ def test_repr_traceback_tbfilter(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(tbfilter=True)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 2
+ p = FormattedExcinfo(tbfilter=False)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 3
+
+ def test_traceback_short_no_source(self, importasmod, monkeypatch):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ try:
+ mod.entry()
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ from py._code.code import Code
+ monkeypatch.setattr(Code, 'path', 'bogus')
+ excinfo.traceback[0].frame.code.path = "bogus"
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
+ lines = reprtb.lines
+ last_p = FormattedExcinfo(style="short")
+ last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ last_lines = last_reprtb.lines
+ monkeypatch.undo()
+ basename = py.path.local(mod.__file__).basename
+ assert lines[0] == ' func1()'
+
+ assert last_lines[0] == ' raise ValueError("hello")'
+ assert last_lines[1] == 'E ValueError: hello'
+
+ def test_repr_traceback_and_excinfo(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+
+ for style in ("long", "short"):
+ p = FormattedExcinfo(style=style)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 2
+ assert reprtb.style == style
+ assert not reprtb.extraline
+ repr = p.repr_excinfo(excinfo)
+ assert repr.reprtraceback
+ assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
+ assert repr.reprcrash.path.endswith("mod.py")
+ assert repr.reprcrash.message == "ValueError: 0"
+
+ def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+
+ p = FormattedExcinfo()
+ def raiseos():
+ raise OSError(2)
+ monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
+ assert p._makepath(__file__) == __file__
+ reprtb = p.repr_traceback(excinfo)
+
+ def test_repr_excinfo_addouterr(self, importasmod):
+ mod = importasmod("""
+ def entry():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ repr = excinfo.getrepr()
+ repr.addsection("title", "content")
+ twmock = TWMock()
+ repr.toterminal(twmock)
+ assert twmock.lines[-1] == "content"
+ assert twmock.lines[-2] == ("-", "title")
+
+ def test_repr_excinfo_reprcrash(self, importasmod):
+ mod = importasmod("""
+ def entry():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ repr = excinfo.getrepr()
+ assert repr.reprcrash.path.endswith("mod.py")
+ assert repr.reprcrash.lineno == 3
+ assert repr.reprcrash.message == "ValueError"
+ assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
+
+ def test_repr_traceback_recursion(self, importasmod):
+ mod = importasmod("""
+ def rec2(x):
+ return rec1(x+1)
+ def rec1(x):
+ return rec2(x-1)
+ def entry():
+ rec1(42)
+ """)
+ excinfo = py.test.raises(RuntimeError, mod.entry)
+
+ for style in ("short", "long", "no"):
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback(excinfo)
+ assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
+ assert str(reprtb)
+
+ def test_tb_entry_AssertionError(self, importasmod):
+ # probably this test is a bit redundant
+ # as py/magic/testing/test_assertion.py
+ # already tests correctness of
+ # assertion-reinterpretation logic
+ mod = importasmod("""
+ def somefunc():
+ x = 1
+ assert x == 2
+ """)
+ excinfo = py.test.raises(AssertionError, mod.somefunc)
+
+ p = FormattedExcinfo()
+ reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprentry.lines
+ assert lines[-1] == "E assert 1 == 2"
+
+ def test_reprexcinfo_getrepr(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ try:
+ mod.entry()
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+
+ for style in ("short", "long", "no"):
+ for showlocals in (True, False):
+ repr = excinfo.getrepr(style=style, showlocals=showlocals)
+ assert isinstance(repr, ReprExceptionInfo)
+ assert repr.reprtraceback.style == style
+
+ def test_reprexcinfo_unicode(self):
+ from py._code.code import TerminalRepr
+ class MyRepr(TerminalRepr):
+ def toterminal(self, tw):
+ tw.line(py.builtin._totext("я", "utf-8"))
+ x = py.builtin._totext(MyRepr())
+ assert x == py.builtin._totext("я", "utf-8")
+
+ def test_toterminal_long(self, importasmod):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == " def f():"
+ assert tw.lines[1] == "> g(3)"
+ assert tw.lines[2] == ""
+ assert tw.lines[3].endswith("mod.py:5: ")
+ assert tw.lines[4] == ("_ ", None)
+ assert tw.lines[5] == ""
+ assert tw.lines[6] == " def g(x):"
+ assert tw.lines[7] == "> raise ValueError(x)"
+ assert tw.lines[8] == "E ValueError: 3"
+ assert tw.lines[9] == ""
+ assert tw.lines[10].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_missing_source(self, importasmod, tmpdir):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tmpdir.join('mod.py').remove()
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == "> ???"
+ assert tw.lines[1] == ""
+ assert tw.lines[2].endswith("mod.py:5: ")
+ assert tw.lines[3] == ("_ ", None)
+ assert tw.lines[4] == ""
+ assert tw.lines[5] == "> ???"
+ assert tw.lines[6] == "E ValueError: 3"
+ assert tw.lines[7] == ""
+ assert tw.lines[8].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tmpdir.join('mod.py').write('asdf')
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == "> ???"
+ assert tw.lines[1] == ""
+ assert tw.lines[2].endswith("mod.py:5: ")
+ assert tw.lines[3] == ("_ ", None)
+ assert tw.lines[4] == ""
+ assert tw.lines[5] == "> ???"
+ assert tw.lines[6] == "E ValueError: 3"
+ assert tw.lines[7] == ""
+ assert tw.lines[8].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_filenames(self, importasmod):
+ mod = importasmod("""
+ def f():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tw = TWMock()
+ path = py.path.local(mod.__file__)
+ old = path.dirpath().chdir()
+ try:
+ repr = excinfo.getrepr(abspath=False)
+ repr.toterminal(tw)
+ line = tw.lines[-1]
+ x = py.path.local().bestrelpath(path)
+ if len(x) < len(str(path)):
+ assert line == "mod.py:3: ValueError"
+
+ repr = excinfo.getrepr(abspath=True)
+ repr.toterminal(tw)
+ line = tw.lines[-1]
+ assert line == "%s:3: ValueError" %(path,)
+ finally:
+ old.chdir()
+
+ @py.test.mark.multi(reproptions=[
+ {'style': style, 'showlocals': showlocals,
+ 'funcargs': funcargs, 'tbfilter': tbfilter
+ } for style in ("long", "short", "no")
+ for showlocals in (True, False)
+ for tbfilter in (True, False)
+ for funcargs in (True, False)])
+ def test_format_excinfo(self, importasmod, reproptions):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tw = py.io.TerminalWriter(stringio=True)
+ repr = excinfo.getrepr(**reproptions)
+ repr.toterminal(tw)
+ assert tw.stringio.getvalue()
+
+
+ def test_native_style(self):
+ excinfo = self.excinfo_from_exec("""
+ assert 0
+ """)
+ repr = excinfo.getrepr(style='native')
+ assert "assert 0" in str(repr.reprcrash)
+ s = str(repr)
+ assert s.startswith('Traceback (most recent call last):\n File')
+ assert s.endswith('\nAssertionError: assert 0')
+ assert 'exec (source.compile())' in s
+ # python 2.4 fails to get the source line for the assert
+ if py.std.sys.version_info >= (2, 5):
+ assert s.count('assert 0') == 2
+
+ def test_traceback_repr_style(self, importasmod):
+ mod = importasmod("""
+ def f():
+ g()
+ def g():
+ h()
+ def h():
+ i()
+ def i():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ excinfo.traceback = excinfo.traceback.filter()
+ excinfo.traceback[1].set_repr_style("short")
+ excinfo.traceback[2].set_repr_style("short")
+ r = excinfo.getrepr(style="long")
+ tw = TWMock()
+ r.toterminal(tw)
+ for line in tw.lines: print (line)
+ assert tw.lines[0] == ""
+ assert tw.lines[1] == " def f():"
+ assert tw.lines[2] == "> g()"
+ assert tw.lines[3] == ""
+ assert tw.lines[4].endswith("mod.py:3: ")
+ assert tw.lines[5] == ("_ ", None)
+ assert tw.lines[6].endswith("in g")
+ assert tw.lines[7] == " h()"
+ assert tw.lines[8].endswith("in h")
+ assert tw.lines[9] == " i()"
+ assert tw.lines[10] == ("_ ", None)
+ assert tw.lines[11] == ""
+ assert tw.lines[12] == " def i():"
+ assert tw.lines[13] == "> raise ValueError()"
+ assert tw.lines[14] == "E ValueError"
+ assert tw.lines[15] == ""
+ assert tw.lines[16].endswith("mod.py:9: ValueError")
diff --git a/testing/code/test_source.py b/testing/code/test_source.py
index 830de2c..7d135e8 100644
--- a/testing/code/test_source.py
+++ b/testing/code/test_source.py
@@ -1,651 +1,651 @@
-from py.code import Source
-import py
-import sys
-
-from py._code.source import _ast
-if _ast is not None:
- astonly = py.test.mark.nothing
-else:
- astonly = py.test.mark.xfail("True", reason="only works with AST-compile")
-
-failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
-
-def test_source_str_function():
- x = Source("3")
- assert str(x) == "3"
-
- x = Source(" 3")
- assert str(x) == "3"
-
- x = Source("""
- 3
- """, rstrip=False)
- assert str(x) == "\n3\n "
-
- x = Source("""
- 3
- """, rstrip=True)
- assert str(x) == "\n3"
-
-def test_unicode():
- try:
- unicode
- except NameError:
- return
- x = Source(unicode("4"))
- assert str(x) == "4"
- co = py.code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
- val = eval(co)
- assert isinstance(val, unicode)
-
-def test_source_from_function():
- source = py.code.Source(test_source_str_function)
- assert str(source).startswith('def test_source_str_function():')
-
-def test_source_from_method():
- class TestClass:
- def test_method(self):
- pass
- source = py.code.Source(TestClass().test_method)
- assert source.lines == ["def test_method(self):",
- " pass"]
-
-def test_source_from_lines():
- lines = ["a \n", "b\n", "c"]
- source = py.code.Source(lines)
- assert source.lines == ['a ', 'b', 'c']
-
-def test_source_from_inner_function():
- def f():
- pass
- source = py.code.Source(f, deindent=False)
- assert str(source).startswith(' def f():')
- source = py.code.Source(f)
- assert str(source).startswith('def f():')
-
-def test_source_putaround_simple():
- source = Source("raise ValueError")
- source = source.putaround(
- "try:", """\
- except ValueError:
- x = 42
- else:
- x = 23""")
- assert str(source)=="""\
-try:
- raise ValueError
-except ValueError:
- x = 42
-else:
- x = 23"""
-
-def test_source_putaround():
- source = Source()
- source = source.putaround("""
- if 1:
- x=1
- """)
- assert str(source).strip() == "if 1:\n x=1"
-
-def test_source_strips():
- source = Source("")
- assert source == Source()
- assert str(source) == ''
- assert source.strip() == source
-
-def test_source_strip_multiline():
- source = Source()
- source.lines = ["", " hello", " "]
- source2 = source.strip()
- assert source2.lines == [" hello"]
-
-def test_syntaxerror_rerepresentation():
- ex = py.test.raises(SyntaxError, py.code.compile, 'xyz xyz')
- assert ex.value.lineno == 1
- assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
- assert ex.value.text.strip(), 'x x'
-
-def test_isparseable():
- assert Source("hello").isparseable()
- assert Source("if 1:\n pass").isparseable()
- assert Source(" \nif 1:\n pass").isparseable()
- assert not Source("if 1:\n").isparseable()
- assert not Source(" \nif 1:\npass").isparseable()
- assert not Source(chr(0)).isparseable()
-
-class TestAccesses:
- source = Source("""\
- def f(x):
- pass
- def g(x):
- pass
- """)
- def test_getrange(self):
- x = self.source[0:2]
- assert x.isparseable()
- assert len(x.lines) == 2
- assert str(x) == "def f(x):\n pass"
-
- def test_getline(self):
- x = self.source[0]
- assert x == "def f(x):"
-
- def test_len(self):
- assert len(self.source) == 4
-
- def test_iter(self):
- l = [x for x in self.source]
- assert len(l) == 4
-
-class TestSourceParsingAndCompiling:
- source = Source("""\
- def f(x):
- assert (x ==
- 3 +
- 4)
- """).strip()
-
- def test_compile(self):
- co = py.code.compile("x=3")
- d = {}
- exec (co, d)
- assert d['x'] == 3
-
- def test_compile_and_getsource_simple(self):
- co = py.code.compile("x=3")
- exec (co)
- source = py.code.Source(co)
- assert str(source) == "x=3"
-
- def test_compile_and_getsource_through_same_function(self):
- def gensource(source):
- return py.code.compile(source)
- co1 = gensource("""
- def f():
- raise KeyError()
- """)
- co2 = gensource("""
- def f():
- raise ValueError()
- """)
- source1 = py.std.inspect.getsource(co1)
- assert 'KeyError' in source1
- source2 = py.std.inspect.getsource(co2)
- assert 'ValueError' in source2
-
- def test_getstatement(self):
- #print str(self.source)
- ass = str(self.source[1:])
- for i in range(1, 4):
- #print "trying start in line %r" % self.source[i]
- s = self.source.getstatement(i)
- #x = s.deindent()
- assert str(s) == ass
-
- def test_getstatementrange_triple_quoted(self):
- #print str(self.source)
- source = Source("""hello('''
- ''')""")
- s = source.getstatement(0)
- assert s == str(source)
- s = source.getstatement(1)
- assert s == str(source)
-
- @astonly
- def test_getstatementrange_within_constructs(self):
- source = Source("""\
- try:
- try:
- raise ValueError
- except SomeThing:
- pass
- finally:
- 42
- """)
- assert len(source) == 7
- # check all lineno's that could occur in a traceback
- #assert source.getstatementrange(0) == (0, 7)
- #assert source.getstatementrange(1) == (1, 5)
- assert source.getstatementrange(2) == (2, 3)
- assert source.getstatementrange(3) == (3, 4)
- assert source.getstatementrange(4) == (4, 5)
- #assert source.getstatementrange(5) == (0, 7)
- assert source.getstatementrange(6) == (6, 7)
-
- def test_getstatementrange_bug(self):
- source = Source("""\
- try:
- x = (
- y +
- z)
- except:
- pass
- """)
- assert len(source) == 6
- assert source.getstatementrange(2) == (1, 4)
-
- def test_getstatementrange_bug2(self):
- source = Source("""\
- assert (
- 33
- ==
- [
- X(3,
- b=1, c=2
- ),
- ]
- )
- """)
- assert len(source) == 9
- assert source.getstatementrange(5) == (0, 9)
-
- def test_getstatementrange_ast_issue58(self):
- source = Source("""\
-
- def test_some():
- for a in [a for a in
- CAUSE_ERROR]: pass
-
- x = 3
- """)
- assert getstatement(2, source).lines == source.lines[2:3]
- assert getstatement(3, source).lines == source.lines[3:4]
-
- @py.test.mark.skipif("sys.version_info < (2,6)")
- def test_getstatementrange_out_of_bounds_py3(self):
- source = Source("if xxx:\n from .collections import something")
- r = source.getstatementrange(1)
- assert r == (1,2)
-
- def test_getstatementrange_with_syntaxerror_issue7(self):
- source = Source(":")
- py.test.raises(SyntaxError, lambda: source.getstatementrange(0))
-
- @py.test.mark.skipif("sys.version_info < (2,6)")
- def test_compile_to_ast(self):
- import ast
- source = Source("x = 4")
- mod = source.compile(flag=ast.PyCF_ONLY_AST)
- assert isinstance(mod, ast.Module)
- compile(mod, "<filename>", "exec")
-
- def test_compile_and_getsource(self):
- co = self.source.compile()
- py.builtin.exec_(co, globals())
- f(7)
- excinfo = py.test.raises(AssertionError, "f(6)")
- frame = excinfo.traceback[-1].frame
- stmt = frame.code.fullsource.getstatement(frame.lineno)
- #print "block", str(block)
- assert str(stmt).strip().startswith('assert')
-
- def test_compilefuncs_and_path_sanity(self):
- def check(comp, name):
- co = comp(self.source, name)
- if not name:
- expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
- else:
- expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
- fn = co.co_filename
- assert fn.endswith(expected)
-
- mycode = py.code.Code(self.test_compilefuncs_and_path_sanity)
- mylineno = mycode.firstlineno
- mypath = mycode.path
-
- for comp in py.code.compile, py.code.Source.compile:
- for name in '', None, 'my':
- yield check, comp, name
-
- def test_offsetless_synerr(self):
- py.test.raises(SyntaxError, py.code.compile, "lambda a,a: 0", mode='eval')
-
-def test_getstartingblock_singleline():
- class A:
- def __init__(self, *args):
- frame = sys._getframe(1)
- self.source = py.code.Frame(frame).statement
-
- x = A('x', 'y')
-
- l = [i for i in x.source.lines if i.strip()]
- assert len(l) == 1
-
-def test_getstartingblock_multiline():
- class A:
- def __init__(self, *args):
- frame = sys._getframe(1)
- self.source = py.code.Frame(frame).statement
-
- x = A('x',
- 'y' \
- ,
- 'z')
-
- l = [i for i in x.source.lines if i.strip()]
- assert len(l) == 4
-
-def test_getline_finally():
- def c(): pass
- excinfo = py.test.raises(TypeError, """
- teardown = None
- try:
- c(1)
- finally:
- if teardown:
- teardown()
- """)
- source = excinfo.traceback[-1].statement
- assert str(source).strip() == 'c(1)'
-
-def test_getfuncsource_dynamic():
- source = """
- def f():
- raise ValueError
-
- def g(): pass
- """
- co = py.code.compile(source)
- py.builtin.exec_(co, globals())
- assert str(py.code.Source(f)).strip() == 'def f():\n raise ValueError'
- assert str(py.code.Source(g)).strip() == 'def g(): pass'
-
-
-def test_getfuncsource_with_multine_string():
- def f():
- c = '''while True:
- pass
-'''
- assert str(py.code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
-
-
-def test_deindent():
- from py._code.source import deindent as deindent
- assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
-
- def f():
- c = '''while True:
- pass
-'''
- import inspect
- lines = deindent(inspect.getsource(f).splitlines())
- assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
-
- source = """
- def f():
- def g():
- pass
- """
- lines = deindent(source.splitlines())
- assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
-
-@py.test.mark.xfail("sys.version_info[:3] < (2,7,0) or "
- "((3,0) <= sys.version_info[:2] < (3,2))")
-def test_source_of_class_at_eof_without_newline(tmpdir):
- # this test fails because the implicit inspect.getsource(A) below
- # does not return the "x = 1" last line.
- source = py.code.Source('''
- class A(object):
- def method(self):
- x = 1
- ''')
- path = tmpdir.join("a.py")
- path.write(source)
- s2 = py.code.Source(tmpdir.join("a.py").pyimport().A)
- assert str(source).strip() == str(s2).strip()
-
-if True:
- def x():
- pass
-
-def test_getsource_fallback():
- from py._code.source import getsource
- expected = """def x():
- pass"""
- src = getsource(x)
- assert src == expected
-
-def test_idem_compile_and_getsource():
- from py._code.source import getsource
- expected = "def x(): pass"
- co = py.code.compile(expected)
- src = getsource(co)
- assert src == expected
-
-def test_findsource_fallback():
- from py._code.source import findsource
- src, lineno = findsource(x)
- assert 'test_findsource_simple' in str(src)
- assert src[lineno] == ' def x():'
-
-def test_findsource():
- from py._code.source import findsource
- co = py.code.compile("""if 1:
- def x():
- pass
-""")
-
- src, lineno = findsource(co)
- assert 'if 1:' in str(src)
-
- d = {}
- eval(co, d)
- src, lineno = findsource(d['x'])
- assert 'if 1:' in str(src)
- assert src[lineno] == " def x():"
-
-
-def test_getfslineno():
- from py.code import getfslineno
-
- def f(x):
- pass
-
- fspath, lineno = getfslineno(f)
-
- assert fspath.basename == "test_source.py"
- assert lineno == py.code.getrawcode(f).co_firstlineno-1 # see findsource
-
- class A(object):
- pass
-
- fspath, lineno = getfslineno(A)
-
- _, A_lineno = py.std.inspect.findsource(A)
- assert fspath.basename == "test_source.py"
- assert lineno == A_lineno
-
- assert getfslineno(3) == ("", -1)
- class B:
- pass
- B.__name__ = "B2"
- assert getfslineno(B)[1] == -1
-
-def test_code_of_object_instance_with_call():
- class A:
- pass
- py.test.raises(TypeError, lambda: py.code.Source(A()))
- class WithCall:
- def __call__(self):
- pass
-
- code = py.code.Code(WithCall())
- assert 'pass' in str(code.source())
-
- class Hello(object):
- def __call__(self):
- pass
- py.test.raises(TypeError, lambda: py.code.Code(Hello))
-
-
-def getstatement(lineno, source):
- from py._code.source import getstatementrange_ast
- source = py.code.Source(source, deindent=False)
- ast, start, end = getstatementrange_ast(lineno, source)
- return source[start:end]
-
-def test_oneline():
- source = getstatement(0, "raise ValueError")
- assert str(source) == "raise ValueError"
-
-def test_comment_and_no_newline_at_end():
- from py._code.source import getstatementrange_ast
- source = Source(['def test_basic_complex():',
- ' assert 1 == 2',
- '# vim: filetype=pyopencl:fdm=marker'])
- ast, start, end = getstatementrange_ast(1, source)
- assert end == 2
-
-def test_oneline_and_comment():
- source = getstatement(0, "raise ValueError\n#hello")
- assert str(source) == "raise ValueError"
-
-def test_comments():
- source = '''def test():
- "comment 1"
- x = 1
- # comment 2
- # comment 3
-
- assert False
-
-"""
-comment 4
-"""
-'''
- for line in range(2,6):
- assert str(getstatement(line, source)) == ' x = 1'
- for line in range(6,10):
- assert str(getstatement(line, source)) == ' assert False'
- assert str(getstatement(10, source)) == '"""'
-
-def test_comment_in_statement():
- source = '''test(foo=1,
- # comment 1
- bar=2)
-'''
- for line in range(1,3):
- assert str(getstatement(line, source)) == \
- 'test(foo=1,\n # comment 1\n bar=2)'
-
-def test_single_line_else():
- source = getstatement(1, "if False: 2\nelse: 3")
- assert str(source) == "else: 3"
-
-def test_single_line_finally():
- source = getstatement(1, "try: 1\nfinally: 3")
- assert str(source) == "finally: 3"
-
-def test_issue55():
- source = ('def round_trip(dinp):\n assert 1 == dinp\n'
- 'def test_rt():\n round_trip("""\n""")\n')
- s = getstatement(3, source)
- assert str(s) == ' round_trip("""\n""")'
-
-
-def XXXtest_multiline():
- source = getstatement(0, """\
-raise ValueError(
- 23
-)
-x = 3
-""")
- assert str(source) == "raise ValueError(\n 23\n)"
-
-class TestTry:
- pytestmark = astonly
- source = """\
-try:
- raise ValueError
-except Something:
- raise IndexError(1)
-else:
- raise KeyError()
-"""
-
- def test_body(self):
- source = getstatement(1, self.source)
- assert str(source) == " raise ValueError"
-
- def test_except_line(self):
- source = getstatement(2, self.source)
- assert str(source) == "except Something:"
-
- def test_except_body(self):
- source = getstatement(3, self.source)
- assert str(source) == " raise IndexError(1)"
-
- def test_else(self):
- source = getstatement(5, self.source)
- assert str(source) == " raise KeyError()"
-
-class TestTryFinally:
- source = """\
-try:
- raise ValueError
-finally:
- raise IndexError(1)
-"""
-
- def test_body(self):
- source = getstatement(1, self.source)
- assert str(source) == " raise ValueError"
-
- def test_finally(self):
- source = getstatement(3, self.source)
- assert str(source) == " raise IndexError(1)"
-
-
-
-class TestIf:
- pytestmark = astonly
- source = """\
-if 1:
- y = 3
-elif False:
- y = 5
-else:
- y = 7
-"""
-
- def test_body(self):
- source = getstatement(1, self.source)
- assert str(source) == " y = 3"
-
- def test_elif_clause(self):
- source = getstatement(2, self.source)
- assert str(source) == "elif False:"
-
- def test_elif(self):
- source = getstatement(3, self.source)
- assert str(source) == " y = 5"
-
- def test_else(self):
- source = getstatement(5, self.source)
- assert str(source) == " y = 7"
-
-def test_semicolon():
- s = """\
-hello ; pytest.skip()
-"""
- source = getstatement(0, s)
- assert str(source) == s.strip()
-
-def test_def_online():
- s = """\
-def func(): raise ValueError(42)
-
-def something():
- pass
-"""
- source = getstatement(0, s)
- assert str(source) == "def func(): raise ValueError(42)"
-
-def XXX_test_expression_multiline():
- source = """\
-something
-'''
-'''"""
- result = getstatement(1, source)
- assert str(result) == "'''\n'''"
-
+from py.code import Source
+import py
+import sys
+
+from py._code.source import _ast
+if _ast is not None:
+ astonly = py.test.mark.nothing
+else:
+ astonly = py.test.mark.xfail("True", reason="only works with AST-compile")
+
+failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
+
+def test_source_str_function():
+ x = Source("3")
+ assert str(x) == "3"
+
+ x = Source(" 3")
+ assert str(x) == "3"
+
+ x = Source("""
+ 3
+ """, rstrip=False)
+ assert str(x) == "\n3\n "
+
+ x = Source("""
+ 3
+ """, rstrip=True)
+ assert str(x) == "\n3"
+
+def test_unicode():
+ try:
+ unicode
+ except NameError:
+ return
+ x = Source(unicode("4"))
+ assert str(x) == "4"
+ co = py.code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
+ val = eval(co)
+ assert isinstance(val, unicode)
+
+def test_source_from_function():
+ source = py.code.Source(test_source_str_function)
+ assert str(source).startswith('def test_source_str_function():')
+
+def test_source_from_method():
+ class TestClass:
+ def test_method(self):
+ pass
+ source = py.code.Source(TestClass().test_method)
+ assert source.lines == ["def test_method(self):",
+ " pass"]
+
+def test_source_from_lines():
+ lines = ["a \n", "b\n", "c"]
+ source = py.code.Source(lines)
+ assert source.lines == ['a ', 'b', 'c']
+
+def test_source_from_inner_function():
+ def f():
+ pass
+ source = py.code.Source(f, deindent=False)
+ assert str(source).startswith(' def f():')
+ source = py.code.Source(f)
+ assert str(source).startswith('def f():')
+
+def test_source_putaround_simple():
+ source = Source("raise ValueError")
+ source = source.putaround(
+ "try:", """\
+ except ValueError:
+ x = 42
+ else:
+ x = 23""")
+ assert str(source)=="""\
+try:
+ raise ValueError
+except ValueError:
+ x = 42
+else:
+ x = 23"""
+
+def test_source_putaround():
+ source = Source()
+ source = source.putaround("""
+ if 1:
+ x=1
+ """)
+ assert str(source).strip() == "if 1:\n x=1"
+
+def test_source_strips():
+ source = Source("")
+ assert source == Source()
+ assert str(source) == ''
+ assert source.strip() == source
+
+def test_source_strip_multiline():
+ source = Source()
+ source.lines = ["", " hello", " "]
+ source2 = source.strip()
+ assert source2.lines == [" hello"]
+
+def test_syntaxerror_rerepresentation():
+ ex = py.test.raises(SyntaxError, py.code.compile, 'xyz xyz')
+ assert ex.value.lineno == 1
+ assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
+ assert ex.value.text.strip(), 'x x'
+
+def test_isparseable():
+ assert Source("hello").isparseable()
+ assert Source("if 1:\n pass").isparseable()
+ assert Source(" \nif 1:\n pass").isparseable()
+ assert not Source("if 1:\n").isparseable()
+ assert not Source(" \nif 1:\npass").isparseable()
+ assert not Source(chr(0)).isparseable()
+
+class TestAccesses:
+ source = Source("""\
+ def f(x):
+ pass
+ def g(x):
+ pass
+ """)
+ def test_getrange(self):
+ x = self.source[0:2]
+ assert x.isparseable()
+ assert len(x.lines) == 2
+ assert str(x) == "def f(x):\n pass"
+
+ def test_getline(self):
+ x = self.source[0]
+ assert x == "def f(x):"
+
+ def test_len(self):
+ assert len(self.source) == 4
+
+ def test_iter(self):
+ l = [x for x in self.source]
+ assert len(l) == 4
+
+class TestSourceParsingAndCompiling:
+ source = Source("""\
+ def f(x):
+ assert (x ==
+ 3 +
+ 4)
+ """).strip()
+
+ def test_compile(self):
+ co = py.code.compile("x=3")
+ d = {}
+ exec (co, d)
+ assert d['x'] == 3
+
+ def test_compile_and_getsource_simple(self):
+ co = py.code.compile("x=3")
+ exec (co)
+ source = py.code.Source(co)
+ assert str(source) == "x=3"
+
+ def test_compile_and_getsource_through_same_function(self):
+ def gensource(source):
+ return py.code.compile(source)
+ co1 = gensource("""
+ def f():
+ raise KeyError()
+ """)
+ co2 = gensource("""
+ def f():
+ raise ValueError()
+ """)
+ source1 = py.std.inspect.getsource(co1)
+ assert 'KeyError' in source1
+ source2 = py.std.inspect.getsource(co2)
+ assert 'ValueError' in source2
+
+ def test_getstatement(self):
+ #print str(self.source)
+ ass = str(self.source[1:])
+ for i in range(1, 4):
+ #print "trying start in line %r" % self.source[i]
+ s = self.source.getstatement(i)
+ #x = s.deindent()
+ assert str(s) == ass
+
+ def test_getstatementrange_triple_quoted(self):
+ #print str(self.source)
+ source = Source("""hello('''
+ ''')""")
+ s = source.getstatement(0)
+ assert s == str(source)
+ s = source.getstatement(1)
+ assert s == str(source)
+
+ @astonly
+ def test_getstatementrange_within_constructs(self):
+ source = Source("""\
+ try:
+ try:
+ raise ValueError
+ except SomeThing:
+ pass
+ finally:
+ 42
+ """)
+ assert len(source) == 7
+ # check all lineno's that could occur in a traceback
+ #assert source.getstatementrange(0) == (0, 7)
+ #assert source.getstatementrange(1) == (1, 5)
+ assert source.getstatementrange(2) == (2, 3)
+ assert source.getstatementrange(3) == (3, 4)
+ assert source.getstatementrange(4) == (4, 5)
+ #assert source.getstatementrange(5) == (0, 7)
+ assert source.getstatementrange(6) == (6, 7)
+
+ def test_getstatementrange_bug(self):
+ source = Source("""\
+ try:
+ x = (
+ y +
+ z)
+ except:
+ pass
+ """)
+ assert len(source) == 6
+ assert source.getstatementrange(2) == (1, 4)
+
+ def test_getstatementrange_bug2(self):
+ source = Source("""\
+ assert (
+ 33
+ ==
+ [
+ X(3,
+ b=1, c=2
+ ),
+ ]
+ )
+ """)
+ assert len(source) == 9
+ assert source.getstatementrange(5) == (0, 9)
+
+ def test_getstatementrange_ast_issue58(self):
+ source = Source("""\
+
+ def test_some():
+ for a in [a for a in
+ CAUSE_ERROR]: pass
+
+ x = 3
+ """)
+ assert getstatement(2, source).lines == source.lines[2:3]
+ assert getstatement(3, source).lines == source.lines[3:4]
+
+ @py.test.mark.skipif("sys.version_info < (2,6)")
+ def test_getstatementrange_out_of_bounds_py3(self):
+ source = Source("if xxx:\n from .collections import something")
+ r = source.getstatementrange(1)
+ assert r == (1,2)
+
+ def test_getstatementrange_with_syntaxerror_issue7(self):
+ source = Source(":")
+ py.test.raises(SyntaxError, lambda: source.getstatementrange(0))
+
+ @py.test.mark.skipif("sys.version_info < (2,6)")
+ def test_compile_to_ast(self):
+ import ast
+ source = Source("x = 4")
+ mod = source.compile(flag=ast.PyCF_ONLY_AST)
+ assert isinstance(mod, ast.Module)
+ compile(mod, "<filename>", "exec")
+
+ def test_compile_and_getsource(self):
+ co = self.source.compile()
+ py.builtin.exec_(co, globals())
+ f(7)
+ excinfo = py.test.raises(AssertionError, "f(6)")
+ frame = excinfo.traceback[-1].frame
+ stmt = frame.code.fullsource.getstatement(frame.lineno)
+ #print "block", str(block)
+ assert str(stmt).strip().startswith('assert')
+
+ def test_compilefuncs_and_path_sanity(self):
+ def check(comp, name):
+ co = comp(self.source, name)
+ if not name:
+ expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
+ else:
+ expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
+ fn = co.co_filename
+ assert fn.endswith(expected)
+
+ mycode = py.code.Code(self.test_compilefuncs_and_path_sanity)
+ mylineno = mycode.firstlineno
+ mypath = mycode.path
+
+ for comp in py.code.compile, py.code.Source.compile:
+ for name in '', None, 'my':
+ yield check, comp, name
+
+ def test_offsetless_synerr(self):
+ py.test.raises(SyntaxError, py.code.compile, "lambda a,a: 0", mode='eval')
+
+def test_getstartingblock_singleline():
+ class A:
+ def __init__(self, *args):
+ frame = sys._getframe(1)
+ self.source = py.code.Frame(frame).statement
+
+ x = A('x', 'y')
+
+ l = [i for i in x.source.lines if i.strip()]
+ assert len(l) == 1
+
+def test_getstartingblock_multiline():
+ class A:
+ def __init__(self, *args):
+ frame = sys._getframe(1)
+ self.source = py.code.Frame(frame).statement
+
+ x = A('x',
+ 'y' \
+ ,
+ 'z')
+
+ l = [i for i in x.source.lines if i.strip()]
+ assert len(l) == 4
+
+def test_getline_finally():
+ def c(): pass
+ excinfo = py.test.raises(TypeError, """
+ teardown = None
+ try:
+ c(1)
+ finally:
+ if teardown:
+ teardown()
+ """)
+ source = excinfo.traceback[-1].statement
+ assert str(source).strip() == 'c(1)'
+
+def test_getfuncsource_dynamic():
+ source = """
+ def f():
+ raise ValueError
+
+ def g(): pass
+ """
+ co = py.code.compile(source)
+ py.builtin.exec_(co, globals())
+ assert str(py.code.Source(f)).strip() == 'def f():\n raise ValueError'
+ assert str(py.code.Source(g)).strip() == 'def g(): pass'
+
+
+def test_getfuncsource_with_multine_string():
+ def f():
+ c = '''while True:
+ pass
+'''
+ assert str(py.code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
+
+
+def test_deindent():
+ from py._code.source import deindent as deindent
+ assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
+
+ def f():
+ c = '''while True:
+ pass
+'''
+ import inspect
+ lines = deindent(inspect.getsource(f).splitlines())
+ assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
+
+ source = """
+ def f():
+ def g():
+ pass
+ """
+ lines = deindent(source.splitlines())
+ assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
+
+@py.test.mark.xfail("sys.version_info[:3] < (2,7,0) or "
+ "((3,0) <= sys.version_info[:2] < (3,2))")
+def test_source_of_class_at_eof_without_newline(tmpdir):
+ # this test fails because the implicit inspect.getsource(A) below
+ # does not return the "x = 1" last line.
+ source = py.code.Source('''
+ class A(object):
+ def method(self):
+ x = 1
+ ''')
+ path = tmpdir.join("a.py")
+ path.write(source)
+ s2 = py.code.Source(tmpdir.join("a.py").pyimport().A)
+ assert str(source).strip() == str(s2).strip()
+
+if True:
+ def x():
+ pass
+
+def test_getsource_fallback():
+ from py._code.source import getsource
+ expected = """def x():
+ pass"""
+ src = getsource(x)
+ assert src == expected
+
+def test_idem_compile_and_getsource():
+ from py._code.source import getsource
+ expected = "def x(): pass"
+ co = py.code.compile(expected)
+ src = getsource(co)
+ assert src == expected
+
+def test_findsource_fallback():
+ from py._code.source import findsource
+ src, lineno = findsource(x)
+ assert 'test_findsource_simple' in str(src)
+ assert src[lineno] == ' def x():'
+
+def test_findsource():
+ from py._code.source import findsource
+ co = py.code.compile("""if 1:
+ def x():
+ pass
+""")
+
+ src, lineno = findsource(co)
+ assert 'if 1:' in str(src)
+
+ d = {}
+ eval(co, d)
+ src, lineno = findsource(d['x'])
+ assert 'if 1:' in str(src)
+ assert src[lineno] == " def x():"
+
+
+def test_getfslineno():
+ from py.code import getfslineno
+
+ def f(x):
+ pass
+
+ fspath, lineno = getfslineno(f)
+
+ assert fspath.basename == "test_source.py"
+ assert lineno == py.code.getrawcode(f).co_firstlineno-1 # see findsource
+
+ class A(object):
+ pass
+
+ fspath, lineno = getfslineno(A)
+
+ _, A_lineno = py.std.inspect.findsource(A)
+ assert fspath.basename == "test_source.py"
+ assert lineno == A_lineno
+
+ assert getfslineno(3) == ("", -1)
+ class B:
+ pass
+ B.__name__ = "B2"
+ assert getfslineno(B)[1] == -1
+
+def test_code_of_object_instance_with_call():
+ class A:
+ pass
+ py.test.raises(TypeError, lambda: py.code.Source(A()))
+ class WithCall:
+ def __call__(self):
+ pass
+
+ code = py.code.Code(WithCall())
+ assert 'pass' in str(code.source())
+
+ class Hello(object):
+ def __call__(self):
+ pass
+ py.test.raises(TypeError, lambda: py.code.Code(Hello))
+
+
+def getstatement(lineno, source):
+ from py._code.source import getstatementrange_ast
+ source = py.code.Source(source, deindent=False)
+ ast, start, end = getstatementrange_ast(lineno, source)
+ return source[start:end]
+
+def test_oneline():
+ source = getstatement(0, "raise ValueError")
+ assert str(source) == "raise ValueError"
+
+def test_comment_and_no_newline_at_end():
+ from py._code.source import getstatementrange_ast
+ source = Source(['def test_basic_complex():',
+ ' assert 1 == 2',
+ '# vim: filetype=pyopencl:fdm=marker'])
+ ast, start, end = getstatementrange_ast(1, source)
+ assert end == 2
+
+def test_oneline_and_comment():
+ source = getstatement(0, "raise ValueError\n#hello")
+ assert str(source) == "raise ValueError"
+
+def test_comments():
+ source = '''def test():
+ "comment 1"
+ x = 1
+ # comment 2
+ # comment 3
+
+ assert False
+
+"""
+comment 4
+"""
+'''
+ for line in range(2,6):
+ assert str(getstatement(line, source)) == ' x = 1'
+ for line in range(6,10):
+ assert str(getstatement(line, source)) == ' assert False'
+ assert str(getstatement(10, source)) == '"""'
+
+def test_comment_in_statement():
+ source = '''test(foo=1,
+ # comment 1
+ bar=2)
+'''
+ for line in range(1,3):
+ assert str(getstatement(line, source)) == \
+ 'test(foo=1,\n # comment 1\n bar=2)'
+
+def test_single_line_else():
+ source = getstatement(1, "if False: 2\nelse: 3")
+ assert str(source) == "else: 3"
+
+def test_single_line_finally():
+ source = getstatement(1, "try: 1\nfinally: 3")
+ assert str(source) == "finally: 3"
+
+def test_issue55():
+ source = ('def round_trip(dinp):\n assert 1 == dinp\n'
+ 'def test_rt():\n round_trip("""\n""")\n')
+ s = getstatement(3, source)
+ assert str(s) == ' round_trip("""\n""")'
+
+
+def XXXtest_multiline():
+ source = getstatement(0, """\
+raise ValueError(
+ 23
+)
+x = 3
+""")
+ assert str(source) == "raise ValueError(\n 23\n)"
+
+class TestTry:
+ pytestmark = astonly
+ source = """\
+try:
+ raise ValueError
+except Something:
+ raise IndexError(1)
+else:
+ raise KeyError()
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " raise ValueError"
+
+ def test_except_line(self):
+ source = getstatement(2, self.source)
+ assert str(source) == "except Something:"
+
+ def test_except_body(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " raise IndexError(1)"
+
+ def test_else(self):
+ source = getstatement(5, self.source)
+ assert str(source) == " raise KeyError()"
+
+class TestTryFinally:
+ source = """\
+try:
+ raise ValueError
+finally:
+ raise IndexError(1)
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " raise ValueError"
+
+ def test_finally(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " raise IndexError(1)"
+
+
+
+class TestIf:
+ pytestmark = astonly
+ source = """\
+if 1:
+ y = 3
+elif False:
+ y = 5
+else:
+ y = 7
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " y = 3"
+
+ def test_elif_clause(self):
+ source = getstatement(2, self.source)
+ assert str(source) == "elif False:"
+
+ def test_elif(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " y = 5"
+
+ def test_else(self):
+ source = getstatement(5, self.source)
+ assert str(source) == " y = 7"
+
+def test_semicolon():
+ s = """\
+hello ; pytest.skip()
+"""
+ source = getstatement(0, s)
+ assert str(source) == s.strip()
+
+def test_def_online():
+ s = """\
+def func(): raise ValueError(42)
+
+def something():
+ pass
+"""
+ source = getstatement(0, s)
+ assert str(source) == "def func(): raise ValueError(42)"
+
+def XXX_test_expression_multiline():
+ source = """\
+something
+'''
+'''"""
+ result = getstatement(1, source)
+ assert str(result) == "'''\n'''"
+
diff --git a/testing/conftest.py b/testing/conftest.py
index 0f956b3..3f00ce8 100644
--- a/testing/conftest.py
+++ b/testing/conftest.py
@@ -1,3 +1,3 @@
-
-pytest_plugins = "pytester",
-
+
+pytest_plugins = "pytester",
+
diff --git a/testing/conftest.pyc b/testing/conftest.pyc
deleted file mode 100644
index 887ad93..0000000
--- a/testing/conftest.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__init__.py b/testing/io_/__init__.py
index 792d600..16e3853 100644
--- a/testing/io_/__init__.py
+++ b/testing/io_/__init__.py
@@ -1 +1 @@
-#
+#
diff --git a/testing/io_/__init__.pyc b/testing/io_/__init__.pyc
deleted file mode 100644
index c91f1de..0000000
--- a/testing/io_/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__pycache__/__init__.cpython-35.pyc b/testing/io_/__pycache__/__init__.cpython-35.pyc
deleted file mode 100644
index f5cfcfb..0000000
--- a/testing/io_/__pycache__/__init__.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__pycache__/test_capture.cpython-27-PYTEST.pyc b/testing/io_/__pycache__/test_capture.cpython-27-PYTEST.pyc
deleted file mode 100644
index a79ea91..0000000
--- a/testing/io_/__pycache__/test_capture.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__pycache__/test_capture.cpython-35-PYTEST.pyc b/testing/io_/__pycache__/test_capture.cpython-35-PYTEST.pyc
deleted file mode 100644
index e771292..0000000
--- a/testing/io_/__pycache__/test_capture.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__pycache__/test_saferepr.cpython-27-PYTEST.pyc b/testing/io_/__pycache__/test_saferepr.cpython-27-PYTEST.pyc
deleted file mode 100644
index 2fb9ac4..0000000
--- a/testing/io_/__pycache__/test_saferepr.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__pycache__/test_saferepr.cpython-35-PYTEST.pyc b/testing/io_/__pycache__/test_saferepr.cpython-35-PYTEST.pyc
deleted file mode 100644
index a7dcca8..0000000
--- a/testing/io_/__pycache__/test_saferepr.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__pycache__/test_terminalwriter.cpython-27-PYTEST.pyc b/testing/io_/__pycache__/test_terminalwriter.cpython-27-PYTEST.pyc
deleted file mode 100644
index 05d90d3..0000000
--- a/testing/io_/__pycache__/test_terminalwriter.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/__pycache__/test_terminalwriter.cpython-35-PYTEST.pyc b/testing/io_/__pycache__/test_terminalwriter.cpython-35-PYTEST.pyc
deleted file mode 100644
index 03845f5..0000000
--- a/testing/io_/__pycache__/test_terminalwriter.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/io_/test_capture.py b/testing/io_/test_capture.py
index 5745e12..2d7597f 100644
--- a/testing/io_/test_capture.py
+++ b/testing/io_/test_capture.py
@@ -1,501 +1,501 @@
-from __future__ import with_statement
-
-import os, sys
-import py
-
-needsdup = py.test.mark.skipif("not hasattr(os, 'dup')")
-
-from py.builtin import print_
-
-if sys.version_info >= (3,0):
- def tobytes(obj):
- if isinstance(obj, str):
- obj = obj.encode('UTF-8')
- assert isinstance(obj, bytes)
- return obj
- def totext(obj):
- if isinstance(obj, bytes):
- obj = str(obj, 'UTF-8')
- assert isinstance(obj, str)
- return obj
-else:
- def tobytes(obj):
- if isinstance(obj, unicode):
- obj = obj.encode('UTF-8')
- assert isinstance(obj, str)
- return obj
- def totext(obj):
- if isinstance(obj, str):
- obj = unicode(obj, 'UTF-8')
- assert isinstance(obj, unicode)
- return obj
-
-def oswritebytes(fd, obj):
- os.write(fd, tobytes(obj))
-
-class TestTextIO:
- def test_text(self):
- f = py.io.TextIO()
- f.write("hello")
- s = f.getvalue()
- assert s == "hello"
- f.close()
-
- def test_unicode_and_str_mixture(self):
- f = py.io.TextIO()
- if sys.version_info >= (3,0):
- f.write("\u00f6")
- py.test.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
- else:
- f.write(unicode("\u00f6", 'UTF-8'))
- f.write("hello") # bytes
- s = f.getvalue()
- f.close()
- assert isinstance(s, unicode)
-
-def test_bytes_io():
- f = py.io.BytesIO()
- f.write(tobytes("hello"))
- py.test.raises(TypeError, "f.write(totext('hello'))")
- s = f.getvalue()
- assert s == tobytes("hello")
-
-def test_dontreadfrominput():
- from py._io.capture import DontReadFromInput
- f = DontReadFromInput()
- assert not f.isatty()
- py.test.raises(IOError, f.read)
- py.test.raises(IOError, f.readlines)
- py.test.raises(IOError, iter, f)
- py.test.raises(ValueError, f.fileno)
- f.close() # just for completeness
-
-def pytest_funcarg__tmpfile(request):
- testdir = request.getfuncargvalue("testdir")
- f = testdir.makepyfile("").open('wb+')
- request.addfinalizer(f.close)
- return f
-
-@needsdup
-def test_dupfile(tmpfile):
- flist = []
- for i in range(5):
- nf = py.io.dupfile(tmpfile, encoding="utf-8")
- assert nf != tmpfile
- assert nf.fileno() != tmpfile.fileno()
- assert nf not in flist
- print_(i, end="", file=nf)
- flist.append(nf)
- for i in range(5):
- f = flist[i]
- f.close()
- tmpfile.seek(0)
- s = tmpfile.read()
- assert "01234" in repr(s)
- tmpfile.close()
-
-def test_dupfile_no_mode():
- """
- dupfile should trap an AttributeError and return f if no mode is supplied.
- """
- class SomeFileWrapper(object):
- "An object with a fileno method but no mode attribute"
- def fileno(self):
- return 1
- tmpfile = SomeFileWrapper()
- assert py.io.dupfile(tmpfile) is tmpfile
- with py.test.raises(AttributeError):
- py.io.dupfile(tmpfile, raising=True)
-
-def lsof_check(func):
- pid = os.getpid()
- try:
- out = py.process.cmdexec("lsof -p %d" % pid)
- except py.process.cmdexec.Error:
- py.test.skip("could not run 'lsof'")
- func()
- out2 = py.process.cmdexec("lsof -p %d" % pid)
- len1 = len([x for x in out.split("\n") if "REG" in x])
- len2 = len([x for x in out2.split("\n") if "REG" in x])
- assert len2 < len1 + 3, out2
-
-class TestFDCapture:
- pytestmark = needsdup
-
- def test_not_now(self, tmpfile):
- fd = tmpfile.fileno()
- cap = py.io.FDCapture(fd, now=False)
- data = tobytes("hello")
- os.write(fd, data)
- f = cap.done()
- s = f.read()
- assert not s
- cap = py.io.FDCapture(fd, now=False)
- cap.start()
- os.write(fd, data)
- f = cap.done()
- s = f.read()
- assert s == "hello"
-
- def test_simple(self, tmpfile):
- fd = tmpfile.fileno()
- cap = py.io.FDCapture(fd)
- data = tobytes("hello")
- os.write(fd, data)
- f = cap.done()
- s = f.read()
- assert s == "hello"
- f.close()
-
- def test_simple_many(self, tmpfile):
- for i in range(10):
- self.test_simple(tmpfile)
-
- def test_simple_many_check_open_files(self, tmpfile):
- lsof_check(lambda: self.test_simple_many(tmpfile))
-
- def test_simple_fail_second_start(self, tmpfile):
- fd = tmpfile.fileno()
- cap = py.io.FDCapture(fd)
- f = cap.done()
- py.test.raises(ValueError, cap.start)
- f.close()
-
- def test_stderr(self):
- cap = py.io.FDCapture(2, patchsys=True)
- print_("hello", file=sys.stderr)
- f = cap.done()
- s = f.read()
- assert s == "hello\n"
-
- def test_stdin(self, tmpfile):
- tmpfile.write(tobytes("3"))
- tmpfile.seek(0)
- cap = py.io.FDCapture(0, tmpfile=tmpfile)
- # check with os.read() directly instead of raw_input(), because
- # sys.stdin itself may be redirected (as py.test now does by default)
- x = os.read(0, 100).strip()
- f = cap.done()
- assert x == tobytes("3")
-
- def test_writeorg(self, tmpfile):
- data1, data2 = tobytes("foo"), tobytes("bar")
- try:
- cap = py.io.FDCapture(tmpfile.fileno())
- tmpfile.write(data1)
- cap.writeorg(data2)
- finally:
- tmpfile.close()
- f = cap.done()
- scap = f.read()
- assert scap == totext(data1)
- stmp = open(tmpfile.name, 'rb').read()
- assert stmp == data2
-
-
-class TestStdCapture:
- def getcapture(self, **kw):
- return py.io.StdCapture(**kw)
-
- def test_capturing_done_simple(self):
- cap = self.getcapture()
- sys.stdout.write("hello")
- sys.stderr.write("world")
- outfile, errfile = cap.done()
- s = outfile.read()
- assert s == "hello"
- s = errfile.read()
- assert s == "world"
-
- def test_capturing_reset_simple(self):
- cap = self.getcapture()
- print("hello world")
- sys.stderr.write("hello error\n")
- out, err = cap.reset()
- assert out == "hello world\n"
- assert err == "hello error\n"
-
- def test_capturing_readouterr(self):
- cap = self.getcapture()
- try:
- print ("hello world")
- sys.stderr.write("hello error\n")
- out, err = cap.readouterr()
- assert out == "hello world\n"
- assert err == "hello error\n"
- sys.stderr.write("error2")
- finally:
- out, err = cap.reset()
- assert err == "error2"
-
- def test_capturing_readouterr_unicode(self):
- cap = self.getcapture()
- print ("hx\xc4\x85\xc4\x87")
- out, err = cap.readouterr()
- assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
-
- @py.test.mark.skipif('sys.version_info >= (3,)',
- reason='text output different for bytes on python3')
- def test_capturing_readouterr_decode_error_handling(self):
- cap = self.getcapture()
- # triggered a internal error in pytest
- print('\xa6')
- out, err = cap.readouterr()
- assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
-
- def test_capturing_mixed(self):
- cap = self.getcapture(mixed=True)
- sys.stdout.write("hello ")
- sys.stderr.write("world")
- sys.stdout.write(".")
- out, err = cap.reset()
- assert out.strip() == "hello world."
- assert not err
-
- def test_reset_twice_error(self):
- cap = self.getcapture()
- print ("hello")
- out, err = cap.reset()
- py.test.raises(ValueError, cap.reset)
- assert out == "hello\n"
- assert not err
-
- def test_capturing_modify_sysouterr_in_between(self):
- oldout = sys.stdout
- olderr = sys.stderr
- cap = self.getcapture()
- sys.stdout.write("hello")
- sys.stderr.write("world")
- sys.stdout = py.io.TextIO()
- sys.stderr = py.io.TextIO()
- print ("not seen")
- sys.stderr.write("not seen\n")
- out, err = cap.reset()
- assert out == "hello"
- assert err == "world"
- assert sys.stdout == oldout
- assert sys.stderr == olderr
-
- def test_capturing_error_recursive(self):
- cap1 = self.getcapture()
- print ("cap1")
- cap2 = self.getcapture()
- print ("cap2")
- out2, err2 = cap2.reset()
- out1, err1 = cap1.reset()
- assert out1 == "cap1\n"
- assert out2 == "cap2\n"
-
- def test_just_out_capture(self):
- cap = self.getcapture(out=True, err=False)
- sys.stdout.write("hello")
- sys.stderr.write("world")
- out, err = cap.reset()
- assert out == "hello"
- assert not err
-
- def test_just_err_capture(self):
- cap = self.getcapture(out=False, err=True)
- sys.stdout.write("hello")
- sys.stderr.write("world")
- out, err = cap.reset()
- assert err == "world"
- assert not out
-
- def test_stdin_restored(self):
- old = sys.stdin
- cap = self.getcapture(in_=True)
- newstdin = sys.stdin
- out, err = cap.reset()
- assert newstdin != sys.stdin
- assert sys.stdin is old
-
- def test_stdin_nulled_by_default(self):
- print ("XXX this test may well hang instead of crashing")
- print ("XXX which indicates an error in the underlying capturing")
- print ("XXX mechanisms")
- cap = self.getcapture()
- py.test.raises(IOError, "sys.stdin.read()")
- out, err = cap.reset()
-
- def test_suspend_resume(self):
- cap = self.getcapture(out=True, err=False, in_=False)
- try:
- print ("hello")
- sys.stderr.write("error\n")
- out, err = cap.suspend()
- assert out == "hello\n"
- assert not err
- print ("in between")
- sys.stderr.write("in between\n")
- cap.resume()
- print ("after")
- sys.stderr.write("error_after\n")
- finally:
- out, err = cap.reset()
- assert out == "after\n"
- assert not err
-
-class TestStdCaptureNotNow(TestStdCapture):
- def getcapture(self, **kw):
- kw['now'] = False
- cap = py.io.StdCapture(**kw)
- cap.startall()
- return cap
-
-class TestStdCaptureFD(TestStdCapture):
- pytestmark = needsdup
-
- def getcapture(self, **kw):
- return py.io.StdCaptureFD(**kw)
-
- def test_intermingling(self):
- cap = self.getcapture()
- oswritebytes(1, "1")
- sys.stdout.write(str(2))
- sys.stdout.flush()
- oswritebytes(1, "3")
- oswritebytes(2, "a")
- sys.stderr.write("b")
- sys.stderr.flush()
- oswritebytes(2, "c")
- out, err = cap.reset()
- assert out == "123"
- assert err == "abc"
-
- def test_callcapture(self):
- def func(x, y):
- print (x)
- py.std.sys.stderr.write(str(y))
- return 42
-
- res, out, err = py.io.StdCaptureFD.call(func, 3, y=4)
- assert res == 42
- assert out.startswith("3")
- assert err.startswith("4")
-
- def test_many(self, capfd):
- def f():
- for i in range(10):
- cap = py.io.StdCaptureFD()
- cap.reset()
- lsof_check(f)
-
-class TestStdCaptureFDNotNow(TestStdCaptureFD):
- pytestmark = needsdup
-
- def getcapture(self, **kw):
- kw['now'] = False
- cap = py.io.StdCaptureFD(**kw)
- cap.startall()
- return cap
-
-@needsdup
-def test_stdcapture_fd_tmpfile(tmpfile):
- capfd = py.io.StdCaptureFD(out=tmpfile)
- os.write(1, "hello".encode("ascii"))
- os.write(2, "world".encode("ascii"))
- outf, errf = capfd.done()
- assert outf == tmpfile
-
-class TestStdCaptureFDinvalidFD:
- pytestmark = needsdup
- def test_stdcapture_fd_invalid_fd(self, testdir):
- testdir.makepyfile("""
- import py, os
- def test_stdout():
- os.close(1)
- cap = py.io.StdCaptureFD(out=True, err=False, in_=False)
- cap.done()
- def test_stderr():
- os.close(2)
- cap = py.io.StdCaptureFD(out=False, err=True, in_=False)
- cap.done()
- def test_stdin():
- os.close(0)
- cap = py.io.StdCaptureFD(out=False, err=False, in_=True)
- cap.done()
- """)
- result = testdir.runpytest("--capture=fd")
- assert result.ret == 0
- assert result.parseoutcomes()['passed'] == 3
-
-def test_capture_not_started_but_reset():
- capsys = py.io.StdCapture(now=False)
- capsys.done()
- capsys.done()
- capsys.reset()
-
-@needsdup
-def test_capture_no_sys():
- capsys = py.io.StdCapture()
- try:
- cap = py.io.StdCaptureFD(patchsys=False)
- sys.stdout.write("hello")
- sys.stderr.write("world")
- oswritebytes(1, "1")
- oswritebytes(2, "2")
- out, err = cap.reset()
- assert out == "1"
- assert err == "2"
- finally:
- capsys.reset()
-
-@needsdup
-def test_callcapture_nofd():
- def func(x, y):
- oswritebytes(1, "hello")
- oswritebytes(2, "hello")
- print (x)
- sys.stderr.write(str(y))
- return 42
-
- capfd = py.io.StdCaptureFD(patchsys=False)
- try:
- res, out, err = py.io.StdCapture.call(func, 3, y=4)
- finally:
- capfd.reset()
- assert res == 42
- assert out.startswith("3")
- assert err.startswith("4")
-
-@needsdup
-@py.test.mark.multi(use=[True, False])
-def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
- if not use:
- tmpfile = True
- cap = py.io.StdCaptureFD(out=False, err=tmpfile, now=False)
- cap.startall()
- capfile = cap.err.tmpfile
- cap.suspend()
- cap.resume()
- capfile2 = cap.err.tmpfile
- assert capfile2 == capfile
-
-@py.test.mark.multi(method=['StdCapture', 'StdCaptureFD'])
-def test_capturing_and_logging_fundamentals(testdir, method):
- if method == "StdCaptureFD" and not hasattr(os, 'dup'):
- py.test.skip("need os.dup")
- # here we check a fundamental feature
- p = testdir.makepyfile("""
- import sys, os
- import py, logging
- cap = py.io.%s(out=False, in_=False)
-
- logging.warn("hello1")
- outerr = cap.suspend()
- print ("suspend, captured %%s" %%(outerr,))
- logging.warn("hello2")
-
- cap.resume()
- logging.warn("hello3")
-
- outerr = cap.suspend()
- print ("suspend2, captured %%s" %% (outerr,))
- """ % (method,))
- result = testdir.runpython(p)
- result.stdout.fnmatch_lines([
- "suspend, captured*hello1*",
- "suspend2, captured*hello2*WARNING:root:hello3*",
- ])
- assert "atexit" not in result.stderr.str()
+from __future__ import with_statement
+
+import os, sys
+import py
+
+needsdup = py.test.mark.skipif("not hasattr(os, 'dup')")
+
+from py.builtin import print_
+
+if sys.version_info >= (3,0):
+ def tobytes(obj):
+ if isinstance(obj, str):
+ obj = obj.encode('UTF-8')
+ assert isinstance(obj, bytes)
+ return obj
+ def totext(obj):
+ if isinstance(obj, bytes):
+ obj = str(obj, 'UTF-8')
+ assert isinstance(obj, str)
+ return obj
+else:
+ def tobytes(obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode('UTF-8')
+ assert isinstance(obj, str)
+ return obj
+ def totext(obj):
+ if isinstance(obj, str):
+ obj = unicode(obj, 'UTF-8')
+ assert isinstance(obj, unicode)
+ return obj
+
+def oswritebytes(fd, obj):
+ os.write(fd, tobytes(obj))
+
+class TestTextIO:
+ def test_text(self):
+ f = py.io.TextIO()
+ f.write("hello")
+ s = f.getvalue()
+ assert s == "hello"
+ f.close()
+
+ def test_unicode_and_str_mixture(self):
+ f = py.io.TextIO()
+ if sys.version_info >= (3,0):
+ f.write("\u00f6")
+ py.test.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
+ else:
+ f.write(unicode("\u00f6", 'UTF-8'))
+ f.write("hello") # bytes
+ s = f.getvalue()
+ f.close()
+ assert isinstance(s, unicode)
+
+def test_bytes_io():
+ f = py.io.BytesIO()
+ f.write(tobytes("hello"))
+ py.test.raises(TypeError, "f.write(totext('hello'))")
+ s = f.getvalue()
+ assert s == tobytes("hello")
+
+def test_dontreadfrominput():
+ from py._io.capture import DontReadFromInput
+ f = DontReadFromInput()
+ assert not f.isatty()
+ py.test.raises(IOError, f.read)
+ py.test.raises(IOError, f.readlines)
+ py.test.raises(IOError, iter, f)
+ py.test.raises(ValueError, f.fileno)
+ f.close() # just for completeness
+
+def pytest_funcarg__tmpfile(request):
+ testdir = request.getfuncargvalue("testdir")
+ f = testdir.makepyfile("").open('wb+')
+ request.addfinalizer(f.close)
+ return f
+
+@needsdup
+def test_dupfile(tmpfile):
+ flist = []
+ for i in range(5):
+ nf = py.io.dupfile(tmpfile, encoding="utf-8")
+ assert nf != tmpfile
+ assert nf.fileno() != tmpfile.fileno()
+ assert nf not in flist
+ print_(i, end="", file=nf)
+ flist.append(nf)
+ for i in range(5):
+ f = flist[i]
+ f.close()
+ tmpfile.seek(0)
+ s = tmpfile.read()
+ assert "01234" in repr(s)
+ tmpfile.close()
+
+def test_dupfile_no_mode():
+ """
+ dupfile should trap an AttributeError and return f if no mode is supplied.
+ """
+ class SomeFileWrapper(object):
+ "An object with a fileno method but no mode attribute"
+ def fileno(self):
+ return 1
+ tmpfile = SomeFileWrapper()
+ assert py.io.dupfile(tmpfile) is tmpfile
+ with py.test.raises(AttributeError):
+ py.io.dupfile(tmpfile, raising=True)
+
+def lsof_check(func):
+ pid = os.getpid()
+ try:
+ out = py.process.cmdexec("lsof -p %d" % pid)
+ except py.process.cmdexec.Error:
+ py.test.skip("could not run 'lsof'")
+ func()
+ out2 = py.process.cmdexec("lsof -p %d" % pid)
+ len1 = len([x for x in out.split("\n") if "REG" in x])
+ len2 = len([x for x in out2.split("\n") if "REG" in x])
+ assert len2 < len1 + 3, out2
+
+class TestFDCapture:
+ pytestmark = needsdup
+
+ def test_not_now(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = py.io.FDCapture(fd, now=False)
+ data = tobytes("hello")
+ os.write(fd, data)
+ f = cap.done()
+ s = f.read()
+ assert not s
+ cap = py.io.FDCapture(fd, now=False)
+ cap.start()
+ os.write(fd, data)
+ f = cap.done()
+ s = f.read()
+ assert s == "hello"
+
+ def test_simple(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = py.io.FDCapture(fd)
+ data = tobytes("hello")
+ os.write(fd, data)
+ f = cap.done()
+ s = f.read()
+ assert s == "hello"
+ f.close()
+
+ def test_simple_many(self, tmpfile):
+ for i in range(10):
+ self.test_simple(tmpfile)
+
+ def test_simple_many_check_open_files(self, tmpfile):
+ lsof_check(lambda: self.test_simple_many(tmpfile))
+
+ def test_simple_fail_second_start(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = py.io.FDCapture(fd)
+ f = cap.done()
+ py.test.raises(ValueError, cap.start)
+ f.close()
+
+ def test_stderr(self):
+ cap = py.io.FDCapture(2, patchsys=True)
+ print_("hello", file=sys.stderr)
+ f = cap.done()
+ s = f.read()
+ assert s == "hello\n"
+
+ def test_stdin(self, tmpfile):
+ tmpfile.write(tobytes("3"))
+ tmpfile.seek(0)
+ cap = py.io.FDCapture(0, tmpfile=tmpfile)
+ # check with os.read() directly instead of raw_input(), because
+ # sys.stdin itself may be redirected (as py.test now does by default)
+ x = os.read(0, 100).strip()
+ f = cap.done()
+ assert x == tobytes("3")
+
+ def test_writeorg(self, tmpfile):
+ data1, data2 = tobytes("foo"), tobytes("bar")
+ try:
+ cap = py.io.FDCapture(tmpfile.fileno())
+ tmpfile.write(data1)
+ cap.writeorg(data2)
+ finally:
+ tmpfile.close()
+ f = cap.done()
+ scap = f.read()
+ assert scap == totext(data1)
+ stmp = open(tmpfile.name, 'rb').read()
+ assert stmp == data2
+
+
+class TestStdCapture:
+ def getcapture(self, **kw):
+ return py.io.StdCapture(**kw)
+
+ def test_capturing_done_simple(self):
+ cap = self.getcapture()
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ outfile, errfile = cap.done()
+ s = outfile.read()
+ assert s == "hello"
+ s = errfile.read()
+ assert s == "world"
+
+ def test_capturing_reset_simple(self):
+ cap = self.getcapture()
+ print("hello world")
+ sys.stderr.write("hello error\n")
+ out, err = cap.reset()
+ assert out == "hello world\n"
+ assert err == "hello error\n"
+
+ def test_capturing_readouterr(self):
+ cap = self.getcapture()
+ try:
+ print ("hello world")
+ sys.stderr.write("hello error\n")
+ out, err = cap.readouterr()
+ assert out == "hello world\n"
+ assert err == "hello error\n"
+ sys.stderr.write("error2")
+ finally:
+ out, err = cap.reset()
+ assert err == "error2"
+
+ def test_capturing_readouterr_unicode(self):
+ cap = self.getcapture()
+ print ("hx\xc4\x85\xc4\x87")
+ out, err = cap.readouterr()
+ assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
+
+ @py.test.mark.skipif('sys.version_info >= (3,)',
+ reason='text output different for bytes on python3')
+ def test_capturing_readouterr_decode_error_handling(self):
+ cap = self.getcapture()
+ # triggered a internal error in pytest
+ print('\xa6')
+ out, err = cap.readouterr()
+ assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
+
+ def test_capturing_mixed(self):
+ cap = self.getcapture(mixed=True)
+ sys.stdout.write("hello ")
+ sys.stderr.write("world")
+ sys.stdout.write(".")
+ out, err = cap.reset()
+ assert out.strip() == "hello world."
+ assert not err
+
+ def test_reset_twice_error(self):
+ cap = self.getcapture()
+ print ("hello")
+ out, err = cap.reset()
+ py.test.raises(ValueError, cap.reset)
+ assert out == "hello\n"
+ assert not err
+
+ def test_capturing_modify_sysouterr_in_between(self):
+ oldout = sys.stdout
+ olderr = sys.stderr
+ cap = self.getcapture()
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ sys.stdout = py.io.TextIO()
+ sys.stderr = py.io.TextIO()
+ print ("not seen")
+ sys.stderr.write("not seen\n")
+ out, err = cap.reset()
+ assert out == "hello"
+ assert err == "world"
+ assert sys.stdout == oldout
+ assert sys.stderr == olderr
+
+ def test_capturing_error_recursive(self):
+ cap1 = self.getcapture()
+ print ("cap1")
+ cap2 = self.getcapture()
+ print ("cap2")
+ out2, err2 = cap2.reset()
+ out1, err1 = cap1.reset()
+ assert out1 == "cap1\n"
+ assert out2 == "cap2\n"
+
+ def test_just_out_capture(self):
+ cap = self.getcapture(out=True, err=False)
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ out, err = cap.reset()
+ assert out == "hello"
+ assert not err
+
+ def test_just_err_capture(self):
+ cap = self.getcapture(out=False, err=True)
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ out, err = cap.reset()
+ assert err == "world"
+ assert not out
+
+ def test_stdin_restored(self):
+ old = sys.stdin
+ cap = self.getcapture(in_=True)
+ newstdin = sys.stdin
+ out, err = cap.reset()
+ assert newstdin != sys.stdin
+ assert sys.stdin is old
+
+ def test_stdin_nulled_by_default(self):
+ print ("XXX this test may well hang instead of crashing")
+ print ("XXX which indicates an error in the underlying capturing")
+ print ("XXX mechanisms")
+ cap = self.getcapture()
+ py.test.raises(IOError, "sys.stdin.read()")
+ out, err = cap.reset()
+
+ def test_suspend_resume(self):
+ cap = self.getcapture(out=True, err=False, in_=False)
+ try:
+ print ("hello")
+ sys.stderr.write("error\n")
+ out, err = cap.suspend()
+ assert out == "hello\n"
+ assert not err
+ print ("in between")
+ sys.stderr.write("in between\n")
+ cap.resume()
+ print ("after")
+ sys.stderr.write("error_after\n")
+ finally:
+ out, err = cap.reset()
+ assert out == "after\n"
+ assert not err
+
+class TestStdCaptureNotNow(TestStdCapture):
+ def getcapture(self, **kw):
+ kw['now'] = False
+ cap = py.io.StdCapture(**kw)
+ cap.startall()
+ return cap
+
+class TestStdCaptureFD(TestStdCapture):
+ pytestmark = needsdup
+
+ def getcapture(self, **kw):
+ return py.io.StdCaptureFD(**kw)
+
+ def test_intermingling(self):
+ cap = self.getcapture()
+ oswritebytes(1, "1")
+ sys.stdout.write(str(2))
+ sys.stdout.flush()
+ oswritebytes(1, "3")
+ oswritebytes(2, "a")
+ sys.stderr.write("b")
+ sys.stderr.flush()
+ oswritebytes(2, "c")
+ out, err = cap.reset()
+ assert out == "123"
+ assert err == "abc"
+
+ def test_callcapture(self):
+ def func(x, y):
+ print (x)
+ py.std.sys.stderr.write(str(y))
+ return 42
+
+ res, out, err = py.io.StdCaptureFD.call(func, 3, y=4)
+ assert res == 42
+ assert out.startswith("3")
+ assert err.startswith("4")
+
+ def test_many(self, capfd):
+ def f():
+ for i in range(10):
+ cap = py.io.StdCaptureFD()
+ cap.reset()
+ lsof_check(f)
+
+class TestStdCaptureFDNotNow(TestStdCaptureFD):
+ pytestmark = needsdup
+
+ def getcapture(self, **kw):
+ kw['now'] = False
+ cap = py.io.StdCaptureFD(**kw)
+ cap.startall()
+ return cap
+
+@needsdup
+def test_stdcapture_fd_tmpfile(tmpfile):
+ capfd = py.io.StdCaptureFD(out=tmpfile)
+ os.write(1, "hello".encode("ascii"))
+ os.write(2, "world".encode("ascii"))
+ outf, errf = capfd.done()
+ assert outf == tmpfile
+
+class TestStdCaptureFDinvalidFD:
+ pytestmark = needsdup
+ def test_stdcapture_fd_invalid_fd(self, testdir):
+ testdir.makepyfile("""
+ import py, os
+ def test_stdout():
+ os.close(1)
+ cap = py.io.StdCaptureFD(out=True, err=False, in_=False)
+ cap.done()
+ def test_stderr():
+ os.close(2)
+ cap = py.io.StdCaptureFD(out=False, err=True, in_=False)
+ cap.done()
+ def test_stdin():
+ os.close(0)
+ cap = py.io.StdCaptureFD(out=False, err=False, in_=True)
+ cap.done()
+ """)
+ result = testdir.runpytest("--capture=fd")
+ assert result.ret == 0
+ assert result.parseoutcomes()['passed'] == 3
+
+def test_capture_not_started_but_reset():
+ capsys = py.io.StdCapture(now=False)
+ capsys.done()
+ capsys.done()
+ capsys.reset()
+
+@needsdup
+def test_capture_no_sys():
+ capsys = py.io.StdCapture()
+ try:
+ cap = py.io.StdCaptureFD(patchsys=False)
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ oswritebytes(1, "1")
+ oswritebytes(2, "2")
+ out, err = cap.reset()
+ assert out == "1"
+ assert err == "2"
+ finally:
+ capsys.reset()
+
+@needsdup
+def test_callcapture_nofd():
+ def func(x, y):
+ oswritebytes(1, "hello")
+ oswritebytes(2, "hello")
+ print (x)
+ sys.stderr.write(str(y))
+ return 42
+
+ capfd = py.io.StdCaptureFD(patchsys=False)
+ try:
+ res, out, err = py.io.StdCapture.call(func, 3, y=4)
+ finally:
+ capfd.reset()
+ assert res == 42
+ assert out.startswith("3")
+ assert err.startswith("4")
+
+@needsdup
+@py.test.mark.multi(use=[True, False])
+def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
+ if not use:
+ tmpfile = True
+ cap = py.io.StdCaptureFD(out=False, err=tmpfile, now=False)
+ cap.startall()
+ capfile = cap.err.tmpfile
+ cap.suspend()
+ cap.resume()
+ capfile2 = cap.err.tmpfile
+ assert capfile2 == capfile
+
+@py.test.mark.multi(method=['StdCapture', 'StdCaptureFD'])
+def test_capturing_and_logging_fundamentals(testdir, method):
+ if method == "StdCaptureFD" and not hasattr(os, 'dup'):
+ py.test.skip("need os.dup")
+ # here we check a fundamental feature
+ p = testdir.makepyfile("""
+ import sys, os
+ import py, logging
+ cap = py.io.%s(out=False, in_=False)
+
+ logging.warn("hello1")
+ outerr = cap.suspend()
+ print ("suspend, captured %%s" %%(outerr,))
+ logging.warn("hello2")
+
+ cap.resume()
+ logging.warn("hello3")
+
+ outerr = cap.suspend()
+ print ("suspend2, captured %%s" %% (outerr,))
+ """ % (method,))
+ result = testdir.runpython(p)
+ result.stdout.fnmatch_lines([
+ "suspend, captured*hello1*",
+ "suspend2, captured*hello2*WARNING:root:hello3*",
+ ])
+ assert "atexit" not in result.stderr.str()
diff --git a/testing/io_/test_saferepr.py b/testing/io_/test_saferepr.py
index 1ed9c4f..daf9808 100644
--- a/testing/io_/test_saferepr.py
+++ b/testing/io_/test_saferepr.py
@@ -1,78 +1,78 @@
-# -*- coding: utf-8 -*-
-
-from __future__ import generators
-import py
-import sys
-
-saferepr = py.io.saferepr
-
-class TestSafeRepr:
- def test_simple_repr(self):
- assert saferepr(1) == '1'
- assert saferepr(None) == 'None'
-
- def test_maxsize(self):
- s = saferepr('x'*50, maxsize=25)
- assert len(s) == 25
- expected = repr('x'*10 + '...' + 'x'*10)
- assert s == expected
-
- def test_maxsize_error_on_instance(self):
- class A:
- def __repr__(self):
- raise ValueError('...')
-
- s = saferepr(('*'*50, A()), maxsize=25)
- assert len(s) == 25
- assert s[0] == '(' and s[-1] == ')'
-
- def test_exceptions(self):
- class BrokenRepr:
- def __init__(self, ex):
- self.ex = ex
- foo = 0
- def __repr__(self):
- raise self.ex
- class BrokenReprException(Exception):
- __str__ = None
- __repr__ = None
- assert 'Exception' in saferepr(BrokenRepr(Exception("broken")))
- s = saferepr(BrokenReprException("really broken"))
- assert 'TypeError' in s
- if py.std.sys.version_info < (2,6):
- assert 'unknown' in saferepr(BrokenRepr("string"))
- else:
- assert 'TypeError' in saferepr(BrokenRepr("string"))
-
- s2 = saferepr(BrokenRepr(BrokenReprException('omg even worse')))
- assert 'NameError' not in s2
- assert 'unknown' in s2
-
- def test_big_repr(self):
- from py._io.saferepr import SafeRepr
- assert len(saferepr(range(1000))) <= \
- len('[' + SafeRepr().maxlist * "1000" + ']')
-
- def test_repr_on_newstyle(self):
- class Function(object):
- def __repr__(self):
- return "<%s>" %(self.name)
- try:
- s = saferepr(Function())
- except Exception:
- py.test.fail("saferepr failed for newstyle class")
-
- def test_unicode(self):
- val = py.builtin._totext('£€', 'utf-8')
- reprval = py.builtin._totext("'£€'", 'utf-8')
- assert saferepr(val) == reprval
-
-def test_unicode_handling():
- value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
- def f():
- raise Exception(value)
- excinfo = py.test.raises(Exception, f)
- s = str(excinfo)
- if sys.version_info[0] < 3:
- u = unicode(excinfo)
-
+# -*- coding: utf-8 -*-
+
+from __future__ import generators
+import py
+import sys
+
+saferepr = py.io.saferepr
+
+class TestSafeRepr:
+ def test_simple_repr(self):
+ assert saferepr(1) == '1'
+ assert saferepr(None) == 'None'
+
+ def test_maxsize(self):
+ s = saferepr('x'*50, maxsize=25)
+ assert len(s) == 25
+ expected = repr('x'*10 + '...' + 'x'*10)
+ assert s == expected
+
+ def test_maxsize_error_on_instance(self):
+ class A:
+ def __repr__(self):
+ raise ValueError('...')
+
+ s = saferepr(('*'*50, A()), maxsize=25)
+ assert len(s) == 25
+ assert s[0] == '(' and s[-1] == ')'
+
+ def test_exceptions(self):
+ class BrokenRepr:
+ def __init__(self, ex):
+ self.ex = ex
+ foo = 0
+ def __repr__(self):
+ raise self.ex
+ class BrokenReprException(Exception):
+ __str__ = None
+ __repr__ = None
+ assert 'Exception' in saferepr(BrokenRepr(Exception("broken")))
+ s = saferepr(BrokenReprException("really broken"))
+ assert 'TypeError' in s
+ if py.std.sys.version_info < (2,6):
+ assert 'unknown' in saferepr(BrokenRepr("string"))
+ else:
+ assert 'TypeError' in saferepr(BrokenRepr("string"))
+
+ s2 = saferepr(BrokenRepr(BrokenReprException('omg even worse')))
+ assert 'NameError' not in s2
+ assert 'unknown' in s2
+
+ def test_big_repr(self):
+ from py._io.saferepr import SafeRepr
+ assert len(saferepr(range(1000))) <= \
+ len('[' + SafeRepr().maxlist * "1000" + ']')
+
+ def test_repr_on_newstyle(self):
+ class Function(object):
+ def __repr__(self):
+ return "<%s>" %(self.name)
+ try:
+ s = saferepr(Function())
+ except Exception:
+ py.test.fail("saferepr failed for newstyle class")
+
+ def test_unicode(self):
+ val = py.builtin._totext('£€', 'utf-8')
+ reprval = py.builtin._totext("'£€'", 'utf-8')
+ assert saferepr(val) == reprval
+
+def test_unicode_handling():
+ value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
+ def f():
+ raise Exception(value)
+ excinfo = py.test.raises(Exception, f)
+ s = str(excinfo)
+ if sys.version_info[0] < 3:
+ u = unicode(excinfo)
+
diff --git a/testing/io_/test_terminalwriter.py b/testing/io_/test_terminalwriter.py
index 0a15541..a4b8f01 100644
--- a/testing/io_/test_terminalwriter.py
+++ b/testing/io_/test_terminalwriter.py
@@ -1,271 +1,271 @@
-
-import py
-import os, sys
-from py._io import terminalwriter
-import codecs
-import pytest
-
-def test_get_terminal_width():
- x = py.io.get_terminal_width
- assert x == terminalwriter.get_terminal_width
-
-def test_getdimensions(monkeypatch):
- fcntl = py.test.importorskip("fcntl")
- import struct
- l = []
- monkeypatch.setattr(fcntl, 'ioctl', lambda *args: l.append(args))
- try:
- terminalwriter._getdimensions()
- except (TypeError, struct.error):
- pass
- assert len(l) == 1
- assert l[0][0] == 1
-
-def test_terminal_width_COLUMNS(monkeypatch):
- """ Dummy test for get_terminal_width
- """
- fcntl = py.test.importorskip("fcntl")
- monkeypatch.setattr(fcntl, 'ioctl', lambda *args: int('x'))
- monkeypatch.setenv('COLUMNS', '42')
- assert terminalwriter.get_terminal_width() == 42
- monkeypatch.delenv('COLUMNS', raising=False)
-
-def test_terminalwriter_defaultwidth_80(monkeypatch):
- monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: 0/0)
- monkeypatch.delenv('COLUMNS', raising=False)
- tw = py.io.TerminalWriter()
- assert tw.fullwidth == 80
-
-def test_terminalwriter_getdimensions_bogus(monkeypatch):
- monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (10,10))
- monkeypatch.delenv('COLUMNS', raising=False)
- tw = py.io.TerminalWriter()
- assert tw.fullwidth == 80
-
-def test_terminalwriter_getdimensions_emacs(monkeypatch):
- # emacs terminal returns (0,0) but set COLUMNS properly
- monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (0,0))
- monkeypatch.setenv('COLUMNS', '42')
- tw = py.io.TerminalWriter()
- assert tw.fullwidth == 42
-
-def test_terminalwriter_computes_width(monkeypatch):
- monkeypatch.setattr(terminalwriter, 'get_terminal_width', lambda: 42)
- tw = py.io.TerminalWriter()
- assert tw.fullwidth == 42
-
-def test_terminalwriter_default_instantiation():
- tw = py.io.TerminalWriter(stringio=True)
- assert hasattr(tw, 'stringio')
-
-def test_terminalwriter_dumb_term_no_markup(monkeypatch):
- monkeypatch.setattr(os, 'environ', {'TERM': 'dumb', 'PATH': ''})
- class MyFile:
- closed = False
- def isatty(self):
- return True
- monkeypatch.setattr(sys, 'stdout', MyFile())
- try:
- assert sys.stdout.isatty()
- tw = py.io.TerminalWriter()
- assert not tw.hasmarkup
- finally:
- monkeypatch.undo()
-
-def test_terminalwriter_file_unicode(tmpdir):
- f = py.std.codecs.open(str(tmpdir.join("xyz")), "wb", "utf8")
- tw = py.io.TerminalWriter(file=f)
- assert tw.encoding == "utf8"
-
-def test_unicode_encoding():
- msg = py.builtin._totext('b\u00f6y', 'utf8')
- for encoding in 'utf8', 'latin1':
- l = []
- tw = py.io.TerminalWriter(l.append, encoding=encoding)
- tw.line(msg)
- assert l[0].strip() == msg.encode(encoding)
-
-@pytest.mark.parametrize("encoding", ["ascii"])
-def test_unicode_on_file_with_ascii_encoding(tmpdir, monkeypatch, encoding):
- msg = py.builtin._totext('hell\xf6', "latin1")
- #pytest.raises(UnicodeEncodeError, lambda: bytes(msg))
- f = py.std.codecs.open(str(tmpdir.join("x")), "w", encoding)
- tw = py.io.TerminalWriter(f)
- tw.line(msg)
- f.close()
- s = tmpdir.join("x").open("rb").read().strip()
- assert encoding == "ascii"
- assert s == msg.encode("unicode-escape")
-
-
-win32 = int(sys.platform == "win32")
-class TestTerminalWriter:
- def pytest_generate_tests(self, metafunc):
- if "tw" in metafunc.funcargnames:
- metafunc.addcall(id="path", param="path")
- metafunc.addcall(id="stringio", param="stringio")
- metafunc.addcall(id="callable", param="callable")
- def pytest_funcarg__tw(self, request):
- if request.param == "path":
- tmpdir = request.getfuncargvalue("tmpdir")
- p = tmpdir.join("tmpfile")
- f = codecs.open(str(p), 'w+', encoding='utf8')
- tw = py.io.TerminalWriter(f)
- def getlines():
- tw._file.flush()
- return codecs.open(str(p), 'r',
- encoding='utf8').readlines()
- elif request.param == "stringio":
- tw = py.io.TerminalWriter(stringio=True)
- def getlines():
- tw.stringio.seek(0)
- return tw.stringio.readlines()
- elif request.param == "callable":
- writes = []
- tw = py.io.TerminalWriter(writes.append)
- def getlines():
- io = py.io.TextIO()
- io.write("".join(writes))
- io.seek(0)
- return io.readlines()
- tw.getlines = getlines
- tw.getvalue = lambda: "".join(getlines())
- return tw
-
- def test_line(self, tw):
- tw.line("hello")
- l = tw.getlines()
- assert len(l) == 1
- assert l[0] == "hello\n"
-
- def test_line_unicode(self, tw):
- for encoding in 'utf8', 'latin1':
- tw._encoding = encoding
- msg = py.builtin._totext('b\u00f6y', 'utf8')
- tw.line(msg)
- l = tw.getlines()
- assert l[0] == msg + "\n"
-
- def test_sep_no_title(self, tw):
- tw.sep("-", fullwidth=60)
- l = tw.getlines()
- assert len(l) == 1
- assert l[0] == "-" * (60-win32) + "\n"
-
- def test_sep_with_title(self, tw):
- tw.sep("-", "hello", fullwidth=60)
- l = tw.getlines()
- assert len(l) == 1
- assert l[0] == "-" * 26 + " hello " + "-" * (27-win32) + "\n"
-
- @py.test.mark.skipif("sys.platform == 'win32'")
- def test__escaped(self, tw):
- text2 = tw._escaped("hello", (31))
- assert text2.find("hello") != -1
-
- @py.test.mark.skipif("sys.platform == 'win32'")
- def test_markup(self, tw):
- for bold in (True, False):
- for color in ("red", "green"):
- text2 = tw.markup("hello", **{color: True, 'bold': bold})
- assert text2.find("hello") != -1
- py.test.raises(ValueError, "tw.markup('x', wronkw=3)")
- py.test.raises(ValueError, "tw.markup('x', wronkw=0)")
-
- def test_line_write_markup(self, tw):
- tw.hasmarkup = True
- tw.line("x", bold=True)
- tw.write("x\n", red=True)
- l = tw.getlines()
- if sys.platform != "win32":
- assert len(l[0]) >= 2, l
- assert len(l[1]) >= 2, l
-
- def test_attr_fullwidth(self, tw):
- tw.sep("-", "hello", fullwidth=70)
- tw.fullwidth = 70
- tw.sep("-", "hello")
- l = tw.getlines()
- assert len(l[0]) == len(l[1])
-
- def test_reline(self, tw):
- tw.line("hello")
- tw.hasmarkup = False
- pytest.raises(ValueError, lambda: tw.reline("x"))
- tw.hasmarkup = True
- tw.reline("0 1 2")
- tw.getlines()
- l = tw.getvalue().split("\n")
- assert len(l) == 2
- tw.reline("0 1 3")
- l = tw.getvalue().split("\n")
- assert len(l) == 2
- assert l[1].endswith("0 1 3\r")
- tw.line("so")
- l = tw.getvalue().split("\n")
- assert len(l) == 3
- assert l[-1] == ""
- assert l[1] == ("0 1 2\r0 1 3\rso ")
- assert l[0] == "hello"
-
-
-def test_terminal_with_callable_write_and_flush():
- l = set()
- class fil:
- flush = lambda self: l.add("1")
- write = lambda self, x: l.add("1")
- __call__ = lambda self, x: l.add("2")
-
- tw = py.io.TerminalWriter(fil())
- tw.line("hello")
- assert l == set(["1"])
- del fil.flush
- l.clear()
- tw = py.io.TerminalWriter(fil())
- tw.line("hello")
- assert l == set(["2"])
-
-
-@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
-def test_attr_hasmarkup():
- tw = py.io.TerminalWriter(stringio=True)
- assert not tw.hasmarkup
- tw.hasmarkup = True
- tw.line("hello", bold=True)
- s = tw.stringio.getvalue()
- assert len(s) > len("hello\n")
- assert '\x1b[1m' in s
- assert '\x1b[0m' in s
-
-@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
-def test_ansi_print():
- # we have no easy way to construct a file that
- # represents a terminal
- f = py.io.TextIO()
- f.isatty = lambda: True
- py.io.ansi_print("hello", 0x32, file=f)
- text2 = f.getvalue()
- assert text2.find("hello") != -1
- assert len(text2) >= len("hello\n")
- assert '\x1b[50m' in text2
- assert '\x1b[0m' in text2
-
-def test_should_do_markup_PY_COLORS_eq_1(monkeypatch):
- monkeypatch.setitem(os.environ, 'PY_COLORS', '1')
- tw = py.io.TerminalWriter(stringio=True)
- assert tw.hasmarkup
- tw.line("hello", bold=True)
- s = tw.stringio.getvalue()
- assert len(s) > len("hello\n")
- assert '\x1b[1m' in s
- assert '\x1b[0m' in s
-
-def test_should_do_markup_PY_COLORS_eq_0(monkeypatch):
- monkeypatch.setitem(os.environ, 'PY_COLORS', '0')
- f = py.io.TextIO()
- f.isatty = lambda: True
- tw = py.io.TerminalWriter(file=f)
- assert not tw.hasmarkup
- tw.line("hello", bold=True)
- s = f.getvalue()
- assert s == "hello\n"
+
+import py
+import os, sys
+from py._io import terminalwriter
+import codecs
+import pytest
+
+def test_get_terminal_width():
+ x = py.io.get_terminal_width
+ assert x == terminalwriter.get_terminal_width
+
+def test_getdimensions(monkeypatch):
+ fcntl = py.test.importorskip("fcntl")
+ import struct
+ l = []
+ monkeypatch.setattr(fcntl, 'ioctl', lambda *args: l.append(args))
+ try:
+ terminalwriter._getdimensions()
+ except (TypeError, struct.error):
+ pass
+ assert len(l) == 1
+ assert l[0][0] == 1
+
+def test_terminal_width_COLUMNS(monkeypatch):
+ """ Dummy test for get_terminal_width
+ """
+ fcntl = py.test.importorskip("fcntl")
+ monkeypatch.setattr(fcntl, 'ioctl', lambda *args: int('x'))
+ monkeypatch.setenv('COLUMNS', '42')
+ assert terminalwriter.get_terminal_width() == 42
+ monkeypatch.delenv('COLUMNS', raising=False)
+
+def test_terminalwriter_defaultwidth_80(monkeypatch):
+ monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: 0/0)
+ monkeypatch.delenv('COLUMNS', raising=False)
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 80
+
+def test_terminalwriter_getdimensions_bogus(monkeypatch):
+ monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (10,10))
+ monkeypatch.delenv('COLUMNS', raising=False)
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 80
+
+def test_terminalwriter_getdimensions_emacs(monkeypatch):
+ # emacs terminal returns (0,0) but set COLUMNS properly
+ monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (0,0))
+ monkeypatch.setenv('COLUMNS', '42')
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 42
+
+def test_terminalwriter_computes_width(monkeypatch):
+ monkeypatch.setattr(terminalwriter, 'get_terminal_width', lambda: 42)
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 42
+
+def test_terminalwriter_default_instantiation():
+ tw = py.io.TerminalWriter(stringio=True)
+ assert hasattr(tw, 'stringio')
+
+def test_terminalwriter_dumb_term_no_markup(monkeypatch):
+ monkeypatch.setattr(os, 'environ', {'TERM': 'dumb', 'PATH': ''})
+ class MyFile:
+ closed = False
+ def isatty(self):
+ return True
+ monkeypatch.setattr(sys, 'stdout', MyFile())
+ try:
+ assert sys.stdout.isatty()
+ tw = py.io.TerminalWriter()
+ assert not tw.hasmarkup
+ finally:
+ monkeypatch.undo()
+
+def test_terminalwriter_file_unicode(tmpdir):
+ f = py.std.codecs.open(str(tmpdir.join("xyz")), "wb", "utf8")
+ tw = py.io.TerminalWriter(file=f)
+ assert tw.encoding == "utf8"
+
+def test_unicode_encoding():
+ msg = py.builtin._totext('b\u00f6y', 'utf8')
+ for encoding in 'utf8', 'latin1':
+ l = []
+ tw = py.io.TerminalWriter(l.append, encoding=encoding)
+ tw.line(msg)
+ assert l[0].strip() == msg.encode(encoding)
+
+@pytest.mark.parametrize("encoding", ["ascii"])
+def test_unicode_on_file_with_ascii_encoding(tmpdir, monkeypatch, encoding):
+ msg = py.builtin._totext('hell\xf6', "latin1")
+ #pytest.raises(UnicodeEncodeError, lambda: bytes(msg))
+ f = py.std.codecs.open(str(tmpdir.join("x")), "w", encoding)
+ tw = py.io.TerminalWriter(f)
+ tw.line(msg)
+ f.close()
+ s = tmpdir.join("x").open("rb").read().strip()
+ assert encoding == "ascii"
+ assert s == msg.encode("unicode-escape")
+
+
+win32 = int(sys.platform == "win32")
+class TestTerminalWriter:
+ def pytest_generate_tests(self, metafunc):
+ if "tw" in metafunc.funcargnames:
+ metafunc.addcall(id="path", param="path")
+ metafunc.addcall(id="stringio", param="stringio")
+ metafunc.addcall(id="callable", param="callable")
+ def pytest_funcarg__tw(self, request):
+ if request.param == "path":
+ tmpdir = request.getfuncargvalue("tmpdir")
+ p = tmpdir.join("tmpfile")
+ f = codecs.open(str(p), 'w+', encoding='utf8')
+ tw = py.io.TerminalWriter(f)
+ def getlines():
+ tw._file.flush()
+ return codecs.open(str(p), 'r',
+ encoding='utf8').readlines()
+ elif request.param == "stringio":
+ tw = py.io.TerminalWriter(stringio=True)
+ def getlines():
+ tw.stringio.seek(0)
+ return tw.stringio.readlines()
+ elif request.param == "callable":
+ writes = []
+ tw = py.io.TerminalWriter(writes.append)
+ def getlines():
+ io = py.io.TextIO()
+ io.write("".join(writes))
+ io.seek(0)
+ return io.readlines()
+ tw.getlines = getlines
+ tw.getvalue = lambda: "".join(getlines())
+ return tw
+
+ def test_line(self, tw):
+ tw.line("hello")
+ l = tw.getlines()
+ assert len(l) == 1
+ assert l[0] == "hello\n"
+
+ def test_line_unicode(self, tw):
+ for encoding in 'utf8', 'latin1':
+ tw._encoding = encoding
+ msg = py.builtin._totext('b\u00f6y', 'utf8')
+ tw.line(msg)
+ l = tw.getlines()
+ assert l[0] == msg + "\n"
+
+ def test_sep_no_title(self, tw):
+ tw.sep("-", fullwidth=60)
+ l = tw.getlines()
+ assert len(l) == 1
+ assert l[0] == "-" * (60-win32) + "\n"
+
+ def test_sep_with_title(self, tw):
+ tw.sep("-", "hello", fullwidth=60)
+ l = tw.getlines()
+ assert len(l) == 1
+ assert l[0] == "-" * 26 + " hello " + "-" * (27-win32) + "\n"
+
+ @py.test.mark.skipif("sys.platform == 'win32'")
+ def test__escaped(self, tw):
+ text2 = tw._escaped("hello", (31))
+ assert text2.find("hello") != -1
+
+ @py.test.mark.skipif("sys.platform == 'win32'")
+ def test_markup(self, tw):
+ for bold in (True, False):
+ for color in ("red", "green"):
+ text2 = tw.markup("hello", **{color: True, 'bold': bold})
+ assert text2.find("hello") != -1
+ py.test.raises(ValueError, "tw.markup('x', wronkw=3)")
+ py.test.raises(ValueError, "tw.markup('x', wronkw=0)")
+
+ def test_line_write_markup(self, tw):
+ tw.hasmarkup = True
+ tw.line("x", bold=True)
+ tw.write("x\n", red=True)
+ l = tw.getlines()
+ if sys.platform != "win32":
+ assert len(l[0]) >= 2, l
+ assert len(l[1]) >= 2, l
+
+ def test_attr_fullwidth(self, tw):
+ tw.sep("-", "hello", fullwidth=70)
+ tw.fullwidth = 70
+ tw.sep("-", "hello")
+ l = tw.getlines()
+ assert len(l[0]) == len(l[1])
+
+ def test_reline(self, tw):
+ tw.line("hello")
+ tw.hasmarkup = False
+ pytest.raises(ValueError, lambda: tw.reline("x"))
+ tw.hasmarkup = True
+ tw.reline("0 1 2")
+ tw.getlines()
+ l = tw.getvalue().split("\n")
+ assert len(l) == 2
+ tw.reline("0 1 3")
+ l = tw.getvalue().split("\n")
+ assert len(l) == 2
+ assert l[1].endswith("0 1 3\r")
+ tw.line("so")
+ l = tw.getvalue().split("\n")
+ assert len(l) == 3
+ assert l[-1] == ""
+ assert l[1] == ("0 1 2\r0 1 3\rso ")
+ assert l[0] == "hello"
+
+
+def test_terminal_with_callable_write_and_flush():
+ l = set()
+ class fil:
+ flush = lambda self: l.add("1")
+ write = lambda self, x: l.add("1")
+ __call__ = lambda self, x: l.add("2")
+
+ tw = py.io.TerminalWriter(fil())
+ tw.line("hello")
+ assert l == set(["1"])
+ del fil.flush
+ l.clear()
+ tw = py.io.TerminalWriter(fil())
+ tw.line("hello")
+ assert l == set(["2"])
+
+
+@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
+def test_attr_hasmarkup():
+ tw = py.io.TerminalWriter(stringio=True)
+ assert not tw.hasmarkup
+ tw.hasmarkup = True
+ tw.line("hello", bold=True)
+ s = tw.stringio.getvalue()
+ assert len(s) > len("hello\n")
+ assert '\x1b[1m' in s
+ assert '\x1b[0m' in s
+
+@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
+def test_ansi_print():
+ # we have no easy way to construct a file that
+ # represents a terminal
+ f = py.io.TextIO()
+ f.isatty = lambda: True
+ py.io.ansi_print("hello", 0x32, file=f)
+ text2 = f.getvalue()
+ assert text2.find("hello") != -1
+ assert len(text2) >= len("hello\n")
+ assert '\x1b[50m' in text2
+ assert '\x1b[0m' in text2
+
+def test_should_do_markup_PY_COLORS_eq_1(monkeypatch):
+ monkeypatch.setitem(os.environ, 'PY_COLORS', '1')
+ tw = py.io.TerminalWriter(stringio=True)
+ assert tw.hasmarkup
+ tw.line("hello", bold=True)
+ s = tw.stringio.getvalue()
+ assert len(s) > len("hello\n")
+ assert '\x1b[1m' in s
+ assert '\x1b[0m' in s
+
+def test_should_do_markup_PY_COLORS_eq_0(monkeypatch):
+ monkeypatch.setitem(os.environ, 'PY_COLORS', '0')
+ f = py.io.TextIO()
+ f.isatty = lambda: True
+ tw = py.io.TerminalWriter(file=f)
+ assert not tw.hasmarkup
+ tw.line("hello", bold=True)
+ s = f.getvalue()
+ assert s == "hello\n"
diff --git a/testing/log/__init__.pyc b/testing/log/__init__.pyc
deleted file mode 100644
index 33cacde..0000000
--- a/testing/log/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/log/__pycache__/__init__.cpython-35.pyc b/testing/log/__pycache__/__init__.cpython-35.pyc
deleted file mode 100644
index 3835dd4..0000000
--- a/testing/log/__pycache__/__init__.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/log/__pycache__/test_log.cpython-27-PYTEST.pyc b/testing/log/__pycache__/test_log.cpython-27-PYTEST.pyc
deleted file mode 100644
index 24a38b7..0000000
--- a/testing/log/__pycache__/test_log.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/log/__pycache__/test_log.cpython-35-PYTEST.pyc b/testing/log/__pycache__/test_log.cpython-35-PYTEST.pyc
deleted file mode 100644
index 4589e4b..0000000
--- a/testing/log/__pycache__/test_log.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/log/__pycache__/test_warning.cpython-27-PYTEST.pyc b/testing/log/__pycache__/test_warning.cpython-27-PYTEST.pyc
deleted file mode 100644
index 2dcb21e..0000000
--- a/testing/log/__pycache__/test_warning.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/log/__pycache__/test_warning.cpython-35-PYTEST.pyc b/testing/log/__pycache__/test_warning.cpython-35-PYTEST.pyc
deleted file mode 100644
index 397b437..0000000
--- a/testing/log/__pycache__/test_warning.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/log/test_log.py b/testing/log/test_log.py
index b41bc3a..e3694a7 100644
--- a/testing/log/test_log.py
+++ b/testing/log/test_log.py
@@ -1,190 +1,190 @@
-import py
-import sys
-
-from py._log.log import default_keywordmapper
-
-callcapture = py.io.StdCapture.call
-
-def setup_module(mod):
- mod._oldstate = default_keywordmapper.getstate()
-
-def teardown_module(mod):
- default_keywordmapper.setstate(mod._oldstate)
-
-class TestLogProducer:
- def setup_method(self, meth):
- default_keywordmapper.setstate(_oldstate)
-
- def test_getstate_setstate(self):
- state = py.log._getstate()
- py.log.setconsumer("hello", [].append)
- state2 = py.log._getstate()
- assert state2 != state
- py.log._setstate(state)
- state3 = py.log._getstate()
- assert state3 == state
-
- def test_producer_repr(self):
- d = py.log.Producer("default")
- assert repr(d).find('default') != -1
-
- def test_produce_one_keyword(self):
- l = []
- py.log.setconsumer('s1', l.append)
- py.log.Producer('s1')("hello world")
- assert len(l) == 1
- msg = l[0]
- assert msg.content().startswith('hello world')
- assert msg.prefix() == '[s1] '
- assert str(msg) == "[s1] hello world"
-
- def test_producer_class(self):
- p = py.log.Producer('x1')
- l = []
- py.log.setconsumer(p._keywords, l.append)
- p("hello")
- assert len(l) == 1
- assert len(l[0].keywords) == 1
- assert 'x1' == l[0].keywords[0]
-
- def test_producer_caching(self):
- p = py.log.Producer('x1')
- x2 = p.x2
- assert x2 is p.x2
-
-class TestLogConsumer:
- def setup_method(self, meth):
- default_keywordmapper.setstate(_oldstate)
- def test_log_none(self):
- log = py.log.Producer("XXX")
- l = []
- py.log.setconsumer('XXX', l.append)
- log("1")
- assert l
- l[:] = []
- py.log.setconsumer('XXX', None)
- log("2")
- assert not l
-
- def test_log_default_stderr(self):
- res, out, err = callcapture(py.log.Producer("default"), "hello")
- assert err.strip() == "[default] hello"
-
- def test_simple_consumer_match(self):
- l = []
- py.log.setconsumer("x1", l.append)
- p = py.log.Producer("x1 x2")
- p("hello")
- assert l
- assert l[0].content() == "hello"
-
- def test_simple_consumer_match_2(self):
- l = []
- p = py.log.Producer("x1 x2")
- py.log.setconsumer(p._keywords, l.append)
- p("42")
- assert l
- assert l[0].content() == "42"
-
- def test_no_auto_producer(self):
- p = py.log.Producer('x')
- py.test.raises(AttributeError, "p._x")
- py.test.raises(AttributeError, "p.x_y")
-
- def test_setconsumer_with_producer(self):
- l = []
- p = py.log.Producer("hello")
- py.log.setconsumer(p, l.append)
- p("world")
- assert str(l[0]) == "[hello] world"
-
- def test_multi_consumer(self):
- l = []
- py.log.setconsumer("x1", l.append)
- py.log.setconsumer("x1 x2", None)
- p = py.log.Producer("x1 x2")
- p("hello")
- assert not l
- py.log.Producer("x1")("hello")
- assert l
- assert l[0].content() == "hello"
-
- def test_log_stderr(self):
- py.log.setconsumer("xyz", py.log.STDOUT)
- res, out, err = callcapture(py.log.Producer("xyz"), "hello")
- assert not err
- assert out.strip() == '[xyz] hello'
-
- def test_log_file(self, tmpdir):
- customlog = tmpdir.join('log.out')
- py.log.setconsumer("default", open(str(customlog), 'w', 1))
- py.log.Producer("default")("hello world #1")
- assert customlog.readlines() == ['[default] hello world #1\n']
-
- py.log.setconsumer("default", py.log.Path(customlog, buffering=False))
- py.log.Producer("default")("hello world #2")
- res = customlog.readlines()
- assert res == ['[default] hello world #2\n'] # no append by default!
-
- def test_log_file_append_mode(self, tmpdir):
- logfilefn = tmpdir.join('log_append.out')
-
- # The append mode is on by default, so we don't need to specify it for File
- py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
- buffering=0))
- assert logfilefn.check()
- py.log.Producer("default")("hello world #1")
- lines = logfilefn.readlines()
- assert lines == ['[default] hello world #1\n']
- py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
- buffering=0))
- py.log.Producer("default")("hello world #1")
- lines = logfilefn.readlines()
- assert lines == ['[default] hello world #1\n',
- '[default] hello world #1\n']
-
- def test_log_file_delayed_create(self, tmpdir):
- logfilefn = tmpdir.join('log_create.out')
-
- py.log.setconsumer("default", py.log.Path(logfilefn,
- delayed_create=True, buffering=0))
- assert not logfilefn.check()
- py.log.Producer("default")("hello world #1")
- lines = logfilefn.readlines()
- assert lines == ['[default] hello world #1\n']
-
- def test_keyword_based_log_files(self, tmpdir):
- logfiles = []
- keywords = 'k1 k2 k3'.split()
- for key in keywords:
- path = tmpdir.join(key)
- py.log.setconsumer(key, py.log.Path(path, buffering=0))
-
- py.log.Producer('k1')('1')
- py.log.Producer('k2')('2')
- py.log.Producer('k3')('3')
-
- for key in keywords:
- path = tmpdir.join(key)
- assert path.read().strip() == '[%s] %s' % (key, key[-1])
-
- # disabled for now; the syslog log file can usually be read only by root
- # I manually inspected /var/log/messages and the entries were there
- def no_test_log_syslog(self):
- py.log.setconsumer("default", py.log.Syslog())
- py.log.default("hello world #1")
-
- # disabled for now until I figure out how to read entries in the
- # Event Logs on Windows
- # I manually inspected the Application Log and the entries were there
- def no_test_log_winevent(self):
- py.log.setconsumer("default", py.log.WinEvent())
- py.log.default("hello world #1")
-
- # disabled for now until I figure out how to properly pass the parameters
- def no_test_log_email(self):
- py.log.setconsumer("default", py.log.Email(mailhost="gheorghiu.net",
- fromaddr="grig",
- toaddrs="grig",
- subject = "py.log email"))
- py.log.default("hello world #1")
+import py
+import sys
+
+from py._log.log import default_keywordmapper
+
+callcapture = py.io.StdCapture.call
+
+def setup_module(mod):
+ mod._oldstate = default_keywordmapper.getstate()
+
+def teardown_module(mod):
+ default_keywordmapper.setstate(mod._oldstate)
+
+class TestLogProducer:
+ def setup_method(self, meth):
+ default_keywordmapper.setstate(_oldstate)
+
+ def test_getstate_setstate(self):
+ state = py.log._getstate()
+ py.log.setconsumer("hello", [].append)
+ state2 = py.log._getstate()
+ assert state2 != state
+ py.log._setstate(state)
+ state3 = py.log._getstate()
+ assert state3 == state
+
+ def test_producer_repr(self):
+ d = py.log.Producer("default")
+ assert repr(d).find('default') != -1
+
+ def test_produce_one_keyword(self):
+ l = []
+ py.log.setconsumer('s1', l.append)
+ py.log.Producer('s1')("hello world")
+ assert len(l) == 1
+ msg = l[0]
+ assert msg.content().startswith('hello world')
+ assert msg.prefix() == '[s1] '
+ assert str(msg) == "[s1] hello world"
+
+ def test_producer_class(self):
+ p = py.log.Producer('x1')
+ l = []
+ py.log.setconsumer(p._keywords, l.append)
+ p("hello")
+ assert len(l) == 1
+ assert len(l[0].keywords) == 1
+ assert 'x1' == l[0].keywords[0]
+
+ def test_producer_caching(self):
+ p = py.log.Producer('x1')
+ x2 = p.x2
+ assert x2 is p.x2
+
+class TestLogConsumer:
+ def setup_method(self, meth):
+ default_keywordmapper.setstate(_oldstate)
+ def test_log_none(self):
+ log = py.log.Producer("XXX")
+ l = []
+ py.log.setconsumer('XXX', l.append)
+ log("1")
+ assert l
+ l[:] = []
+ py.log.setconsumer('XXX', None)
+ log("2")
+ assert not l
+
+ def test_log_default_stderr(self):
+ res, out, err = callcapture(py.log.Producer("default"), "hello")
+ assert err.strip() == "[default] hello"
+
+ def test_simple_consumer_match(self):
+ l = []
+ py.log.setconsumer("x1", l.append)
+ p = py.log.Producer("x1 x2")
+ p("hello")
+ assert l
+ assert l[0].content() == "hello"
+
+ def test_simple_consumer_match_2(self):
+ l = []
+ p = py.log.Producer("x1 x2")
+ py.log.setconsumer(p._keywords, l.append)
+ p("42")
+ assert l
+ assert l[0].content() == "42"
+
+ def test_no_auto_producer(self):
+ p = py.log.Producer('x')
+ py.test.raises(AttributeError, "p._x")
+ py.test.raises(AttributeError, "p.x_y")
+
+ def test_setconsumer_with_producer(self):
+ l = []
+ p = py.log.Producer("hello")
+ py.log.setconsumer(p, l.append)
+ p("world")
+ assert str(l[0]) == "[hello] world"
+
+ def test_multi_consumer(self):
+ l = []
+ py.log.setconsumer("x1", l.append)
+ py.log.setconsumer("x1 x2", None)
+ p = py.log.Producer("x1 x2")
+ p("hello")
+ assert not l
+ py.log.Producer("x1")("hello")
+ assert l
+ assert l[0].content() == "hello"
+
+ def test_log_stderr(self):
+ py.log.setconsumer("xyz", py.log.STDOUT)
+ res, out, err = callcapture(py.log.Producer("xyz"), "hello")
+ assert not err
+ assert out.strip() == '[xyz] hello'
+
+ def test_log_file(self, tmpdir):
+ customlog = tmpdir.join('log.out')
+ py.log.setconsumer("default", open(str(customlog), 'w', 1))
+ py.log.Producer("default")("hello world #1")
+ assert customlog.readlines() == ['[default] hello world #1\n']
+
+ py.log.setconsumer("default", py.log.Path(customlog, buffering=False))
+ py.log.Producer("default")("hello world #2")
+ res = customlog.readlines()
+ assert res == ['[default] hello world #2\n'] # no append by default!
+
+ def test_log_file_append_mode(self, tmpdir):
+ logfilefn = tmpdir.join('log_append.out')
+
+ # The append mode is on by default, so we don't need to specify it for File
+ py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
+ buffering=0))
+ assert logfilefn.check()
+ py.log.Producer("default")("hello world #1")
+ lines = logfilefn.readlines()
+ assert lines == ['[default] hello world #1\n']
+ py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
+ buffering=0))
+ py.log.Producer("default")("hello world #1")
+ lines = logfilefn.readlines()
+ assert lines == ['[default] hello world #1\n',
+ '[default] hello world #1\n']
+
+ def test_log_file_delayed_create(self, tmpdir):
+ logfilefn = tmpdir.join('log_create.out')
+
+ py.log.setconsumer("default", py.log.Path(logfilefn,
+ delayed_create=True, buffering=0))
+ assert not logfilefn.check()
+ py.log.Producer("default")("hello world #1")
+ lines = logfilefn.readlines()
+ assert lines == ['[default] hello world #1\n']
+
+ def test_keyword_based_log_files(self, tmpdir):
+ logfiles = []
+ keywords = 'k1 k2 k3'.split()
+ for key in keywords:
+ path = tmpdir.join(key)
+ py.log.setconsumer(key, py.log.Path(path, buffering=0))
+
+ py.log.Producer('k1')('1')
+ py.log.Producer('k2')('2')
+ py.log.Producer('k3')('3')
+
+ for key in keywords:
+ path = tmpdir.join(key)
+ assert path.read().strip() == '[%s] %s' % (key, key[-1])
+
+ # disabled for now; the syslog log file can usually be read only by root
+ # I manually inspected /var/log/messages and the entries were there
+ def no_test_log_syslog(self):
+ py.log.setconsumer("default", py.log.Syslog())
+ py.log.default("hello world #1")
+
+ # disabled for now until I figure out how to read entries in the
+ # Event Logs on Windows
+ # I manually inspected the Application Log and the entries were there
+ def no_test_log_winevent(self):
+ py.log.setconsumer("default", py.log.WinEvent())
+ py.log.default("hello world #1")
+
+ # disabled for now until I figure out how to properly pass the parameters
+ def no_test_log_email(self):
+ py.log.setconsumer("default", py.log.Email(mailhost="gheorghiu.net",
+ fromaddr="grig",
+ toaddrs="grig",
+ subject = "py.log email"))
+ py.log.default("hello world #1")
diff --git a/testing/log/test_warning.py b/testing/log/test_warning.py
index 8c89cf8..d5676e9 100644
--- a/testing/log/test_warning.py
+++ b/testing/log/test_warning.py
@@ -1,76 +1,76 @@
-import pytest
-import py
-
-mypath = py.path.local(__file__).new(ext=".py")
-
-@pytest.mark.xfail
-def test_forwarding_to_warnings_module():
- pytest.deprecated_call(py.log._apiwarn, "1.3", "..")
-
-def test_apiwarn_functional(recwarn):
- capture = py.io.StdCapture()
- py.log._apiwarn("x.y.z", "something", stacklevel=1)
- out, err = capture.reset()
- py.builtin.print_("out", out)
- py.builtin.print_("err", err)
- assert err.find("x.y.z") != -1
- lno = py.code.getrawcode(test_apiwarn_functional).co_firstlineno + 2
- exp = "%s:%s" % (mypath, lno)
- assert err.find(exp) != -1
-
-def test_stacklevel(recwarn):
- def f():
- py.log._apiwarn("x", "some", stacklevel=2)
- # 3
- # 4
- capture = py.io.StdCapture()
- f()
- out, err = capture.reset()
- lno = py.code.getrawcode(test_stacklevel).co_firstlineno + 6
- warning = str(err)
- assert warning.find(":%s" % lno) != -1
-
-def test_stacklevel_initpkg_with_resolve(testdir, recwarn):
- testdir.makepyfile(modabc="""
- import py
- def f():
- py.log._apiwarn("x", "some", stacklevel="apipkg123")
- """)
- testdir.makepyfile(apipkg123="""
- def __getattr__():
- import modabc
- modabc.f()
- """)
- p = testdir.makepyfile("""
- import apipkg123
- apipkg123.__getattr__()
- """)
- capture = py.io.StdCapture()
- p.pyimport()
- out, err = capture.reset()
- warning = str(err)
- loc = 'test_stacklevel_initpkg_with_resolve.py:2'
- assert warning.find(loc) != -1
-
-def test_stacklevel_initpkg_no_resolve(recwarn):
- def f():
- py.log._apiwarn("x", "some", stacklevel="apipkg")
- capture = py.io.StdCapture()
- f()
- out, err = capture.reset()
- lno = py.code.getrawcode(test_stacklevel_initpkg_no_resolve).co_firstlineno + 2
- warning = str(err)
- assert warning.find(":%s" % lno) != -1
-
-
-def test_function(recwarn):
- capture = py.io.StdCapture()
- py.log._apiwarn("x.y.z", "something", function=test_function)
- out, err = capture.reset()
- py.builtin.print_("out", out)
- py.builtin.print_("err", err)
- assert err.find("x.y.z") != -1
- lno = py.code.getrawcode(test_function).co_firstlineno
- exp = "%s:%s" % (mypath, lno)
- assert err.find(exp) != -1
-
+import pytest
+import py
+
+mypath = py.path.local(__file__).new(ext=".py")
+
+@pytest.mark.xfail
+def test_forwarding_to_warnings_module():
+ pytest.deprecated_call(py.log._apiwarn, "1.3", "..")
+
+def test_apiwarn_functional(recwarn):
+ capture = py.io.StdCapture()
+ py.log._apiwarn("x.y.z", "something", stacklevel=1)
+ out, err = capture.reset()
+ py.builtin.print_("out", out)
+ py.builtin.print_("err", err)
+ assert err.find("x.y.z") != -1
+ lno = py.code.getrawcode(test_apiwarn_functional).co_firstlineno + 2
+ exp = "%s:%s" % (mypath, lno)
+ assert err.find(exp) != -1
+
+def test_stacklevel(recwarn):
+ def f():
+ py.log._apiwarn("x", "some", stacklevel=2)
+ # 3
+ # 4
+ capture = py.io.StdCapture()
+ f()
+ out, err = capture.reset()
+ lno = py.code.getrawcode(test_stacklevel).co_firstlineno + 6
+ warning = str(err)
+ assert warning.find(":%s" % lno) != -1
+
+def test_stacklevel_initpkg_with_resolve(testdir, recwarn):
+ testdir.makepyfile(modabc="""
+ import py
+ def f():
+ py.log._apiwarn("x", "some", stacklevel="apipkg123")
+ """)
+ testdir.makepyfile(apipkg123="""
+ def __getattr__():
+ import modabc
+ modabc.f()
+ """)
+ p = testdir.makepyfile("""
+ import apipkg123
+ apipkg123.__getattr__()
+ """)
+ capture = py.io.StdCapture()
+ p.pyimport()
+ out, err = capture.reset()
+ warning = str(err)
+ loc = 'test_stacklevel_initpkg_with_resolve.py:2'
+ assert warning.find(loc) != -1
+
+def test_stacklevel_initpkg_no_resolve(recwarn):
+ def f():
+ py.log._apiwarn("x", "some", stacklevel="apipkg")
+ capture = py.io.StdCapture()
+ f()
+ out, err = capture.reset()
+ lno = py.code.getrawcode(test_stacklevel_initpkg_no_resolve).co_firstlineno + 2
+ warning = str(err)
+ assert warning.find(":%s" % lno) != -1
+
+
+def test_function(recwarn):
+ capture = py.io.StdCapture()
+ py.log._apiwarn("x.y.z", "something", function=test_function)
+ out, err = capture.reset()
+ py.builtin.print_("out", out)
+ py.builtin.print_("err", err)
+ assert err.find("x.y.z") != -1
+ lno = py.code.getrawcode(test_function).co_firstlineno
+ exp = "%s:%s" % (mypath, lno)
+ assert err.find(exp) != -1
+
diff --git a/testing/path/__init__.pyc b/testing/path/__init__.pyc
deleted file mode 100644
index 4fdf73d..0000000
--- a/testing/path/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/__init__.cpython-35.pyc b/testing/path/__pycache__/__init__.cpython-35.pyc
deleted file mode 100644
index d1c2d20..0000000
--- a/testing/path/__pycache__/__init__.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/common.cpython-27-PYTEST.pyc b/testing/path/__pycache__/common.cpython-27-PYTEST.pyc
deleted file mode 100644
index e7ff36f..0000000
--- a/testing/path/__pycache__/common.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/common.cpython-35-PYTEST.pyc b/testing/path/__pycache__/common.cpython-35-PYTEST.pyc
deleted file mode 100644
index 6c87bd8..0000000
--- a/testing/path/__pycache__/common.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/common.cpython-35.pyc b/testing/path/__pycache__/common.cpython-35.pyc
deleted file mode 100644
index 5b9c354..0000000
--- a/testing/path/__pycache__/common.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/conftest.cpython-27-PYTEST.pyc b/testing/path/__pycache__/conftest.cpython-27-PYTEST.pyc
deleted file mode 100644
index c132cb0..0000000
--- a/testing/path/__pycache__/conftest.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/conftest.cpython-35-PYTEST.pyc b/testing/path/__pycache__/conftest.cpython-35-PYTEST.pyc
deleted file mode 100644
index fe070ad..0000000
--- a/testing/path/__pycache__/conftest.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/conftest.cpython-35.pyc b/testing/path/__pycache__/conftest.cpython-35.pyc
deleted file mode 100644
index 226e1df..0000000
--- a/testing/path/__pycache__/conftest.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/svntestbase.cpython-27-PYTEST.pyc b/testing/path/__pycache__/svntestbase.cpython-27-PYTEST.pyc
deleted file mode 100644
index e0be9c4..0000000
--- a/testing/path/__pycache__/svntestbase.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/svntestbase.cpython-35-PYTEST.pyc b/testing/path/__pycache__/svntestbase.cpython-35-PYTEST.pyc
deleted file mode 100644
index f3dfd2b..0000000
--- a/testing/path/__pycache__/svntestbase.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/svntestbase.cpython-35.pyc b/testing/path/__pycache__/svntestbase.cpython-35.pyc
deleted file mode 100644
index 0580696..0000000
--- a/testing/path/__pycache__/svntestbase.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_cacheutil.cpython-27-PYTEST.pyc b/testing/path/__pycache__/test_cacheutil.cpython-27-PYTEST.pyc
deleted file mode 100644
index 6c2f060..0000000
--- a/testing/path/__pycache__/test_cacheutil.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_cacheutil.cpython-35-PYTEST.pyc b/testing/path/__pycache__/test_cacheutil.cpython-35-PYTEST.pyc
deleted file mode 100644
index 9ccb280..0000000
--- a/testing/path/__pycache__/test_cacheutil.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_local.cpython-27-PYTEST.pyc b/testing/path/__pycache__/test_local.cpython-27-PYTEST.pyc
deleted file mode 100644
index def5459..0000000
--- a/testing/path/__pycache__/test_local.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_local.cpython-35-PYTEST.pyc b/testing/path/__pycache__/test_local.cpython-35-PYTEST.pyc
deleted file mode 100644
index 7d3d874..0000000
--- a/testing/path/__pycache__/test_local.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_svnauth.cpython-27-PYTEST.pyc b/testing/path/__pycache__/test_svnauth.cpython-27-PYTEST.pyc
deleted file mode 100644
index ecba924..0000000
--- a/testing/path/__pycache__/test_svnauth.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_svnauth.cpython-35-PYTEST.pyc b/testing/path/__pycache__/test_svnauth.cpython-35-PYTEST.pyc
deleted file mode 100644
index fa74aab..0000000
--- a/testing/path/__pycache__/test_svnauth.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_svnurl.cpython-27-PYTEST.pyc b/testing/path/__pycache__/test_svnurl.cpython-27-PYTEST.pyc
deleted file mode 100644
index 0af4cb6..0000000
--- a/testing/path/__pycache__/test_svnurl.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_svnurl.cpython-35-PYTEST.pyc b/testing/path/__pycache__/test_svnurl.cpython-35-PYTEST.pyc
deleted file mode 100644
index 2d69ebf..0000000
--- a/testing/path/__pycache__/test_svnurl.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_svnwc.cpython-27-PYTEST.pyc b/testing/path/__pycache__/test_svnwc.cpython-27-PYTEST.pyc
deleted file mode 100644
index 35e72c4..0000000
--- a/testing/path/__pycache__/test_svnwc.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/__pycache__/test_svnwc.cpython-35-PYTEST.pyc b/testing/path/__pycache__/test_svnwc.cpython-35-PYTEST.pyc
deleted file mode 100644
index 0a0cf6c..0000000
--- a/testing/path/__pycache__/test_svnwc.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/common.py b/testing/path/common.py
index 274ced4..8a5f86b 100644
--- a/testing/path/common.py
+++ b/testing/path/common.py
@@ -1,495 +1,495 @@
-import py
-import sys
-
-import pytest
-
-class CommonFSTests(object):
- def test_constructor_equality(self, path1):
- p = path1.__class__(path1)
- assert p == path1
-
- def test_eq_nonstring(self, path1):
- p1 = path1.join('sampledir')
- p2 = path1.join('sampledir')
- assert p1 == p2
-
- def test_new_identical(self, path1):
- assert path1 == path1.new()
-
- def test_join(self, path1):
- p = path1.join('sampledir')
- strp = str(p)
- assert strp.endswith('sampledir')
- assert strp.startswith(str(path1))
-
- def test_join_normalized(self, path1):
- newpath = path1.join(path1.sep+'sampledir')
- strp = str(newpath)
- assert strp.endswith('sampledir')
- assert strp.startswith(str(path1))
- newpath = path1.join((path1.sep*2) + 'sampledir')
- strp = str(newpath)
- assert strp.endswith('sampledir')
- assert strp.startswith(str(path1))
-
- def test_join_noargs(self, path1):
- newpath = path1.join()
- assert path1 == newpath
-
- def test_add_something(self, path1):
- p = path1.join('sample')
- p = p + 'dir'
- assert p.check()
- assert p.exists()
- assert p.isdir()
- assert not p.isfile()
-
- def test_parts(self, path1):
- newpath = path1.join('sampledir', 'otherfile')
- par = newpath.parts()[-3:]
- assert par == [path1, path1.join('sampledir'), newpath]
-
- revpar = newpath.parts(reverse=True)[:3]
- assert revpar == [newpath, path1.join('sampledir'), path1]
-
- def test_common(self, path1):
- other = path1.join('sampledir')
- x = other.common(path1)
- assert x == path1
-
- #def test_parents_nonexisting_file(self, path1):
- # newpath = path1 / 'dirnoexist' / 'nonexisting file'
- # par = list(newpath.parents())
- # assert par[:2] == [path1 / 'dirnoexist', path1]
-
- def test_basename_checks(self, path1):
- newpath = path1.join('sampledir')
- assert newpath.check(basename='sampledir')
- assert newpath.check(notbasename='xyz')
- assert newpath.basename == 'sampledir'
-
- def test_basename(self, path1):
- newpath = path1.join('sampledir')
- assert newpath.check(basename='sampledir')
- assert newpath.basename, 'sampledir'
-
- def test_dirname(self, path1):
- newpath = path1.join('sampledir')
- assert newpath.dirname == str(path1)
-
- def test_dirpath(self, path1):
- newpath = path1.join('sampledir')
- assert newpath.dirpath() == path1
-
- def test_dirpath_with_args(self, path1):
- newpath = path1.join('sampledir')
- assert newpath.dirpath('x') == path1.join('x')
-
- def test_newbasename(self, path1):
- newpath = path1.join('samplefile')
- newbase = newpath.new(basename="samplefile2")
- assert newbase.basename == "samplefile2"
- assert newbase.dirpath() == newpath.dirpath()
-
- def test_not_exists(self, path1):
- assert not path1.join('does_not_exist').check()
- assert path1.join('does_not_exist').check(exists=0)
-
- def test_exists(self, path1):
- assert path1.join("samplefile").check()
- assert path1.join("samplefile").check(exists=1)
- assert path1.join("samplefile").exists()
- assert path1.join("samplefile").isfile()
- assert not path1.join("samplefile").isdir()
-
- def test_dir(self, path1):
- #print repr(path1.join("sampledir"))
- assert path1.join("sampledir").check(dir=1)
- assert path1.join('samplefile').check(notdir=1)
- assert not path1.join("samplefile").check(dir=1)
- assert path1.join("samplefile").exists()
- assert not path1.join("samplefile").isdir()
- assert path1.join("samplefile").isfile()
-
- def test_fnmatch_file(self, path1):
- assert path1.join("samplefile").check(fnmatch='s*e')
- assert path1.join("samplefile").fnmatch('s*e')
- assert not path1.join("samplefile").fnmatch('s*x')
- assert not path1.join("samplefile").check(fnmatch='s*x')
-
- #def test_fnmatch_dir(self, path1):
-
- # pattern = path1.sep.join(['s*file'])
- # sfile = path1.join("samplefile")
- # assert sfile.check(fnmatch=pattern)
-
- def test_relto(self, path1):
- l=path1.join("sampledir", "otherfile")
- assert l.relto(path1) == l.sep.join(["sampledir", "otherfile"])
- assert l.check(relto=path1)
- assert path1.check(notrelto=l)
- assert not path1.check(relto=l)
-
- def test_bestrelpath(self, path1):
- curdir = path1
- sep = curdir.sep
- s = curdir.bestrelpath(curdir)
- assert s == "."
- s = curdir.bestrelpath(curdir.join("hello", "world"))
- assert s == "hello" + sep + "world"
-
- s = curdir.bestrelpath(curdir.dirpath().join("sister"))
- assert s == ".." + sep + "sister"
- assert curdir.bestrelpath(curdir.dirpath()) == ".."
-
- assert curdir.bestrelpath("hello") == "hello"
-
- def test_relto_not_relative(self, path1):
- l1=path1.join("bcde")
- l2=path1.join("b")
- assert not l1.relto(l2)
- assert not l2.relto(l1)
-
- @py.test.mark.xfail("sys.platform.startswith('java')")
- def test_listdir(self, path1):
- l = path1.listdir()
- assert path1.join('sampledir') in l
- assert path1.join('samplefile') in l
- py.test.raises(py.error.ENOTDIR,
- "path1.join('samplefile').listdir()")
-
- def test_listdir_fnmatchstring(self, path1):
- l = path1.listdir('s*dir')
- assert len(l)
- assert l[0], path1.join('sampledir')
-
- def test_listdir_filter(self, path1):
- l = path1.listdir(lambda x: x.check(dir=1))
- assert path1.join('sampledir') in l
- assert not path1.join('samplefile') in l
-
- def test_listdir_sorted(self, path1):
- l = path1.listdir(lambda x: x.check(basestarts="sample"), sort=True)
- assert path1.join('sampledir') == l[0]
- assert path1.join('samplefile') == l[1]
- assert path1.join('samplepickle') == l[2]
-
- def test_visit_nofilter(self, path1):
- l = []
- for i in path1.visit():
- l.append(i.relto(path1))
- assert "sampledir" in l
- assert path1.sep.join(["sampledir", "otherfile"]) in l
-
- def test_visit_norecurse(self, path1):
- l = []
- for i in path1.visit(None, lambda x: x.basename != "sampledir"):
- l.append(i.relto(path1))
- assert "sampledir" in l
- assert not path1.sep.join(["sampledir", "otherfile"]) in l
-
- @pytest.mark.parametrize('fil', ['*dir', u'*dir',
- pytest.mark.skip("sys.version_info <"
- " (3,6)")(b'*dir')])
- def test_visit_filterfunc_is_string(self, path1, fil):
- l = []
- for i in path1.visit(fil):
- l.append(i.relto(path1))
- assert len(l), 2
- assert "sampledir" in l
- assert "otherdir" in l
-
- @py.test.mark.xfail("sys.platform.startswith('java')")
- def test_visit_ignore(self, path1):
- p = path1.join('nonexisting')
- assert list(p.visit(ignore=py.error.ENOENT)) == []
-
- def test_visit_endswith(self, path1):
- l = []
- for i in path1.visit(lambda x: x.check(endswith="file")):
- l.append(i.relto(path1))
- assert path1.sep.join(["sampledir", "otherfile"]) in l
- assert "samplefile" in l
-
- def test_endswith(self, path1):
- assert path1.check(notendswith='.py')
- x = path1.join('samplefile')
- assert x.check(endswith='file')
-
- def test_cmp(self, path1):
- path1 = path1.join('samplefile')
- path2 = path1.join('samplefile2')
- assert (path1 < path2) == ('samplefile' < 'samplefile2')
- assert not (path1 < path1)
-
- def test_simple_read(self, path1):
- x = path1.join('samplefile').read('r')
- assert x == 'samplefile\n'
-
- def test_join_div_operator(self, path1):
- newpath = path1 / '/sampledir' / '/test//'
- newpath2 = path1.join('sampledir', 'test')
- assert newpath == newpath2
-
- def test_ext(self, path1):
- newpath = path1.join('sampledir.ext')
- assert newpath.ext == '.ext'
- newpath = path1.join('sampledir')
- assert not newpath.ext
-
- def test_purebasename(self, path1):
- newpath = path1.join('samplefile.py')
- assert newpath.purebasename == 'samplefile'
-
- def test_multiple_parts(self, path1):
- newpath = path1.join('samplefile.py')
- dirname, purebasename, basename, ext = newpath._getbyspec(
- 'dirname,purebasename,basename,ext')
- assert str(path1).endswith(dirname) # be careful with win32 'drive'
- assert purebasename == 'samplefile'
- assert basename == 'samplefile.py'
- assert ext == '.py'
-
- def test_dotted_name_ext(self, path1):
- newpath = path1.join('a.b.c')
- ext = newpath.ext
- assert ext == '.c'
- assert newpath.ext == '.c'
-
- def test_newext(self, path1):
- newpath = path1.join('samplefile.py')
- newext = newpath.new(ext='.txt')
- assert newext.basename == "samplefile.txt"
- assert newext.purebasename == "samplefile"
-
- def test_readlines(self, path1):
- fn = path1.join('samplefile')
- contents = fn.readlines()
- assert contents == ['samplefile\n']
-
- def test_readlines_nocr(self, path1):
- fn = path1.join('samplefile')
- contents = fn.readlines(cr=0)
- assert contents == ['samplefile', '']
-
- def test_file(self, path1):
- assert path1.join('samplefile').check(file=1)
-
- def test_not_file(self, path1):
- assert not path1.join("sampledir").check(file=1)
- assert path1.join("sampledir").check(file=0)
-
- def test_non_existent(self, path1):
- assert path1.join("sampledir.nothere").check(dir=0)
- assert path1.join("sampledir.nothere").check(file=0)
- assert path1.join("sampledir.nothere").check(notfile=1)
- assert path1.join("sampledir.nothere").check(notdir=1)
- assert path1.join("sampledir.nothere").check(notexists=1)
- assert not path1.join("sampledir.nothere").check(notfile=0)
-
- # pattern = path1.sep.join(['s*file'])
- # sfile = path1.join("samplefile")
- # assert sfile.check(fnmatch=pattern)
-
- def test_size(self, path1):
- url = path1.join("samplefile")
- assert url.size() > len("samplefile")
-
- def test_mtime(self, path1):
- url = path1.join("samplefile")
- assert url.mtime() > 0
-
- def test_relto_wrong_type(self, path1):
- py.test.raises(TypeError, "path1.relto(42)")
-
- def test_load(self, path1):
- p = path1.join('samplepickle')
- obj = p.load()
- assert type(obj) is dict
- assert obj.get('answer',None) == 42
-
- def test_visit_filesonly(self, path1):
- l = []
- for i in path1.visit(lambda x: x.check(file=1)):
- l.append(i.relto(path1))
- assert not "sampledir" in l
- assert path1.sep.join(["sampledir", "otherfile"]) in l
-
- def test_visit_nodotfiles(self, path1):
- l = []
- for i in path1.visit(lambda x: x.check(dotfile=0)):
- l.append(i.relto(path1))
- assert "sampledir" in l
- assert path1.sep.join(["sampledir", "otherfile"]) in l
- assert not ".dotfile" in l
-
- def test_visit_breadthfirst(self, path1):
- l = []
- for i in path1.visit(bf=True):
- l.append(i.relto(path1))
- for i, p in enumerate(l):
- if path1.sep in p:
- for j in range(i, len(l)):
- assert path1.sep in l[j]
- break
- else:
- py.test.fail("huh")
-
- def test_visit_sort(self, path1):
- l = []
- for i in path1.visit(bf=True, sort=True):
- l.append(i.relto(path1))
- for i, p in enumerate(l):
- if path1.sep in p:
- break
- assert l[:i] == sorted(l[:i])
- assert l[i:] == sorted(l[i:])
-
- def test_endswith(self, path1):
- def chk(p):
- return p.check(endswith="pickle")
- assert not chk(path1)
- assert not chk(path1.join('samplefile'))
- assert chk(path1.join('somepickle'))
-
- def test_copy_file(self, path1):
- otherdir = path1.join('otherdir')
- initpy = otherdir.join('__init__.py')
- copied = otherdir.join('copied')
- initpy.copy(copied)
- try:
- assert copied.check()
- s1 = initpy.read()
- s2 = copied.read()
- assert s1 == s2
- finally:
- if copied.check():
- copied.remove()
-
- def test_copy_dir(self, path1):
- otherdir = path1.join('otherdir')
- copied = path1.join('newdir')
- try:
- otherdir.copy(copied)
- assert copied.check(dir=1)
- assert copied.join('__init__.py').check(file=1)
- s1 = otherdir.join('__init__.py').read()
- s2 = copied.join('__init__.py').read()
- assert s1 == s2
- finally:
- if copied.check(dir=1):
- copied.remove(rec=1)
-
- def test_remove_file(self, path1):
- d = path1.ensure('todeleted')
- assert d.check()
- d.remove()
- assert not d.check()
-
- def test_remove_dir_recursive_by_default(self, path1):
- d = path1.ensure('to', 'be', 'deleted')
- assert d.check()
- p = path1.join('to')
- p.remove()
- assert not p.check()
-
- def test_ensure_dir(self, path1):
- b = path1.ensure_dir("001", "002")
- assert b.basename == "002"
- assert b.isdir()
-
- def test_mkdir_and_remove(self, path1):
- tmpdir = path1
- py.test.raises(py.error.EEXIST, tmpdir.mkdir, 'sampledir')
- new = tmpdir.join('mktest1')
- new.mkdir()
- assert new.check(dir=1)
- new.remove()
-
- new = tmpdir.mkdir('mktest')
- assert new.check(dir=1)
- new.remove()
- assert tmpdir.join('mktest') == new
-
- def test_move_file(self, path1):
- p = path1.join('samplefile')
- newp = p.dirpath('moved_samplefile')
- p.move(newp)
- try:
- assert newp.check(file=1)
- assert not p.check()
- finally:
- dp = newp.dirpath()
- if hasattr(dp, 'revert'):
- dp.revert()
- else:
- newp.move(p)
- assert p.check()
-
- def test_move_dir(self, path1):
- source = path1.join('sampledir')
- dest = path1.join('moveddir')
- source.move(dest)
- assert dest.check(dir=1)
- assert dest.join('otherfile').check(file=1)
- assert not source.join('sampledir').check()
-
- def test_fspath_protocol_match_strpath(self, path1):
- assert path1.__fspath__() == path1.strpath
-
- def test_fspath_func_match_strpath(self, path1):
- try:
- from os import fspath
- except ImportError:
- from py._path.common import fspath
- assert fspath(path1) == path1.strpath
-
- @py.test.mark.skip("sys.version_info < (3,6)")
- def test_fspath_open(self, path1):
- f = path1.join('opentestfile')
- open(f)
-
- @py.test.mark.skip("sys.version_info < (3,6)")
- def test_fspath_fsencode(self, path1):
- from os import fsencode
- assert fsencode(path1) == fsencode(path1.strpath)
-
-def setuptestfs(path):
- if path.join('samplefile').check():
- return
- #print "setting up test fs for", repr(path)
- samplefile = path.ensure('samplefile')
- samplefile.write('samplefile\n')
-
- execfile = path.ensure('execfile')
- execfile.write('x=42')
-
- execfilepy = path.ensure('execfile.py')
- execfilepy.write('x=42')
-
- d = {1:2, 'hello': 'world', 'answer': 42}
- path.ensure('samplepickle').dump(d)
-
- sampledir = path.ensure('sampledir', dir=1)
- sampledir.ensure('otherfile')
-
- otherdir = path.ensure('otherdir', dir=1)
- otherdir.ensure('__init__.py')
-
- module_a = otherdir.ensure('a.py')
- if sys.version_info >= (2,6):
- module_a.write('from .b import stuff as result\n')
- else:
- module_a.write('from b import stuff as result\n')
- module_b = otherdir.ensure('b.py')
- module_b.write('stuff="got it"\n')
- module_c = otherdir.ensure('c.py')
- module_c.write('''import py;
-import otherdir.a
-value = otherdir.a.result
-''')
- module_d = otherdir.ensure('d.py')
- module_d.write('''import py;
-from otherdir import a
-value2 = a.result
-''')
+import py
+import sys
+
+import pytest
+
+class CommonFSTests(object):
+ def test_constructor_equality(self, path1):
+ p = path1.__class__(path1)
+ assert p == path1
+
+ def test_eq_nonstring(self, path1):
+ p1 = path1.join('sampledir')
+ p2 = path1.join('sampledir')
+ assert p1 == p2
+
+ def test_new_identical(self, path1):
+ assert path1 == path1.new()
+
+ def test_join(self, path1):
+ p = path1.join('sampledir')
+ strp = str(p)
+ assert strp.endswith('sampledir')
+ assert strp.startswith(str(path1))
+
+ def test_join_normalized(self, path1):
+ newpath = path1.join(path1.sep+'sampledir')
+ strp = str(newpath)
+ assert strp.endswith('sampledir')
+ assert strp.startswith(str(path1))
+ newpath = path1.join((path1.sep*2) + 'sampledir')
+ strp = str(newpath)
+ assert strp.endswith('sampledir')
+ assert strp.startswith(str(path1))
+
+ def test_join_noargs(self, path1):
+ newpath = path1.join()
+ assert path1 == newpath
+
+ def test_add_something(self, path1):
+ p = path1.join('sample')
+ p = p + 'dir'
+ assert p.check()
+ assert p.exists()
+ assert p.isdir()
+ assert not p.isfile()
+
+ def test_parts(self, path1):
+ newpath = path1.join('sampledir', 'otherfile')
+ par = newpath.parts()[-3:]
+ assert par == [path1, path1.join('sampledir'), newpath]
+
+ revpar = newpath.parts(reverse=True)[:3]
+ assert revpar == [newpath, path1.join('sampledir'), path1]
+
+ def test_common(self, path1):
+ other = path1.join('sampledir')
+ x = other.common(path1)
+ assert x == path1
+
+ #def test_parents_nonexisting_file(self, path1):
+ # newpath = path1 / 'dirnoexist' / 'nonexisting file'
+ # par = list(newpath.parents())
+ # assert par[:2] == [path1 / 'dirnoexist', path1]
+
+ def test_basename_checks(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.check(basename='sampledir')
+ assert newpath.check(notbasename='xyz')
+ assert newpath.basename == 'sampledir'
+
+ def test_basename(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.check(basename='sampledir')
+ assert newpath.basename, 'sampledir'
+
+ def test_dirname(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.dirname == str(path1)
+
+ def test_dirpath(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.dirpath() == path1
+
+ def test_dirpath_with_args(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.dirpath('x') == path1.join('x')
+
+ def test_newbasename(self, path1):
+ newpath = path1.join('samplefile')
+ newbase = newpath.new(basename="samplefile2")
+ assert newbase.basename == "samplefile2"
+ assert newbase.dirpath() == newpath.dirpath()
+
+ def test_not_exists(self, path1):
+ assert not path1.join('does_not_exist').check()
+ assert path1.join('does_not_exist').check(exists=0)
+
+ def test_exists(self, path1):
+ assert path1.join("samplefile").check()
+ assert path1.join("samplefile").check(exists=1)
+ assert path1.join("samplefile").exists()
+ assert path1.join("samplefile").isfile()
+ assert not path1.join("samplefile").isdir()
+
+ def test_dir(self, path1):
+ #print repr(path1.join("sampledir"))
+ assert path1.join("sampledir").check(dir=1)
+ assert path1.join('samplefile').check(notdir=1)
+ assert not path1.join("samplefile").check(dir=1)
+ assert path1.join("samplefile").exists()
+ assert not path1.join("samplefile").isdir()
+ assert path1.join("samplefile").isfile()
+
+ def test_fnmatch_file(self, path1):
+ assert path1.join("samplefile").check(fnmatch='s*e')
+ assert path1.join("samplefile").fnmatch('s*e')
+ assert not path1.join("samplefile").fnmatch('s*x')
+ assert not path1.join("samplefile").check(fnmatch='s*x')
+
+ #def test_fnmatch_dir(self, path1):
+
+ # pattern = path1.sep.join(['s*file'])
+ # sfile = path1.join("samplefile")
+ # assert sfile.check(fnmatch=pattern)
+
+ def test_relto(self, path1):
+ l=path1.join("sampledir", "otherfile")
+ assert l.relto(path1) == l.sep.join(["sampledir", "otherfile"])
+ assert l.check(relto=path1)
+ assert path1.check(notrelto=l)
+ assert not path1.check(relto=l)
+
+ def test_bestrelpath(self, path1):
+ curdir = path1
+ sep = curdir.sep
+ s = curdir.bestrelpath(curdir)
+ assert s == "."
+ s = curdir.bestrelpath(curdir.join("hello", "world"))
+ assert s == "hello" + sep + "world"
+
+ s = curdir.bestrelpath(curdir.dirpath().join("sister"))
+ assert s == ".." + sep + "sister"
+ assert curdir.bestrelpath(curdir.dirpath()) == ".."
+
+ assert curdir.bestrelpath("hello") == "hello"
+
+ def test_relto_not_relative(self, path1):
+ l1=path1.join("bcde")
+ l2=path1.join("b")
+ assert not l1.relto(l2)
+ assert not l2.relto(l1)
+
+ @py.test.mark.xfail("sys.platform.startswith('java')")
+ def test_listdir(self, path1):
+ l = path1.listdir()
+ assert path1.join('sampledir') in l
+ assert path1.join('samplefile') in l
+ py.test.raises(py.error.ENOTDIR,
+ "path1.join('samplefile').listdir()")
+
+ def test_listdir_fnmatchstring(self, path1):
+ l = path1.listdir('s*dir')
+ assert len(l)
+ assert l[0], path1.join('sampledir')
+
+ def test_listdir_filter(self, path1):
+ l = path1.listdir(lambda x: x.check(dir=1))
+ assert path1.join('sampledir') in l
+ assert not path1.join('samplefile') in l
+
+ def test_listdir_sorted(self, path1):
+ l = path1.listdir(lambda x: x.check(basestarts="sample"), sort=True)
+ assert path1.join('sampledir') == l[0]
+ assert path1.join('samplefile') == l[1]
+ assert path1.join('samplepickle') == l[2]
+
+ def test_visit_nofilter(self, path1):
+ l = []
+ for i in path1.visit():
+ l.append(i.relto(path1))
+ assert "sampledir" in l
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+
+ def test_visit_norecurse(self, path1):
+ l = []
+ for i in path1.visit(None, lambda x: x.basename != "sampledir"):
+ l.append(i.relto(path1))
+ assert "sampledir" in l
+ assert not path1.sep.join(["sampledir", "otherfile"]) in l
+
+ @pytest.mark.parametrize('fil', ['*dir', u'*dir',
+ pytest.mark.skip("sys.version_info <"
+ " (3,6)")(b'*dir')])
+ def test_visit_filterfunc_is_string(self, path1, fil):
+ l = []
+ for i in path1.visit(fil):
+ l.append(i.relto(path1))
+ assert len(l), 2
+ assert "sampledir" in l
+ assert "otherdir" in l
+
+ @py.test.mark.xfail("sys.platform.startswith('java')")
+ def test_visit_ignore(self, path1):
+ p = path1.join('nonexisting')
+ assert list(p.visit(ignore=py.error.ENOENT)) == []
+
+ def test_visit_endswith(self, path1):
+ l = []
+ for i in path1.visit(lambda x: x.check(endswith="file")):
+ l.append(i.relto(path1))
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+ assert "samplefile" in l
+
+ def test_endswith(self, path1):
+ assert path1.check(notendswith='.py')
+ x = path1.join('samplefile')
+ assert x.check(endswith='file')
+
+ def test_cmp(self, path1):
+ path1 = path1.join('samplefile')
+ path2 = path1.join('samplefile2')
+ assert (path1 < path2) == ('samplefile' < 'samplefile2')
+ assert not (path1 < path1)
+
+ def test_simple_read(self, path1):
+ x = path1.join('samplefile').read('r')
+ assert x == 'samplefile\n'
+
+ def test_join_div_operator(self, path1):
+ newpath = path1 / '/sampledir' / '/test//'
+ newpath2 = path1.join('sampledir', 'test')
+ assert newpath == newpath2
+
+ def test_ext(self, path1):
+ newpath = path1.join('sampledir.ext')
+ assert newpath.ext == '.ext'
+ newpath = path1.join('sampledir')
+ assert not newpath.ext
+
+ def test_purebasename(self, path1):
+ newpath = path1.join('samplefile.py')
+ assert newpath.purebasename == 'samplefile'
+
+ def test_multiple_parts(self, path1):
+ newpath = path1.join('samplefile.py')
+ dirname, purebasename, basename, ext = newpath._getbyspec(
+ 'dirname,purebasename,basename,ext')
+ assert str(path1).endswith(dirname) # be careful with win32 'drive'
+ assert purebasename == 'samplefile'
+ assert basename == 'samplefile.py'
+ assert ext == '.py'
+
+ def test_dotted_name_ext(self, path1):
+ newpath = path1.join('a.b.c')
+ ext = newpath.ext
+ assert ext == '.c'
+ assert newpath.ext == '.c'
+
+ def test_newext(self, path1):
+ newpath = path1.join('samplefile.py')
+ newext = newpath.new(ext='.txt')
+ assert newext.basename == "samplefile.txt"
+ assert newext.purebasename == "samplefile"
+
+ def test_readlines(self, path1):
+ fn = path1.join('samplefile')
+ contents = fn.readlines()
+ assert contents == ['samplefile\n']
+
+ def test_readlines_nocr(self, path1):
+ fn = path1.join('samplefile')
+ contents = fn.readlines(cr=0)
+ assert contents == ['samplefile', '']
+
+ def test_file(self, path1):
+ assert path1.join('samplefile').check(file=1)
+
+ def test_not_file(self, path1):
+ assert not path1.join("sampledir").check(file=1)
+ assert path1.join("sampledir").check(file=0)
+
+ def test_non_existent(self, path1):
+ assert path1.join("sampledir.nothere").check(dir=0)
+ assert path1.join("sampledir.nothere").check(file=0)
+ assert path1.join("sampledir.nothere").check(notfile=1)
+ assert path1.join("sampledir.nothere").check(notdir=1)
+ assert path1.join("sampledir.nothere").check(notexists=1)
+ assert not path1.join("sampledir.nothere").check(notfile=0)
+
+ # pattern = path1.sep.join(['s*file'])
+ # sfile = path1.join("samplefile")
+ # assert sfile.check(fnmatch=pattern)
+
+ def test_size(self, path1):
+ url = path1.join("samplefile")
+ assert url.size() > len("samplefile")
+
+ def test_mtime(self, path1):
+ url = path1.join("samplefile")
+ assert url.mtime() > 0
+
+ def test_relto_wrong_type(self, path1):
+ py.test.raises(TypeError, "path1.relto(42)")
+
+ def test_load(self, path1):
+ p = path1.join('samplepickle')
+ obj = p.load()
+ assert type(obj) is dict
+ assert obj.get('answer',None) == 42
+
+ def test_visit_filesonly(self, path1):
+ l = []
+ for i in path1.visit(lambda x: x.check(file=1)):
+ l.append(i.relto(path1))
+ assert not "sampledir" in l
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+
+ def test_visit_nodotfiles(self, path1):
+ l = []
+ for i in path1.visit(lambda x: x.check(dotfile=0)):
+ l.append(i.relto(path1))
+ assert "sampledir" in l
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+ assert not ".dotfile" in l
+
+ def test_visit_breadthfirst(self, path1):
+ l = []
+ for i in path1.visit(bf=True):
+ l.append(i.relto(path1))
+ for i, p in enumerate(l):
+ if path1.sep in p:
+ for j in range(i, len(l)):
+ assert path1.sep in l[j]
+ break
+ else:
+ py.test.fail("huh")
+
+ def test_visit_sort(self, path1):
+ l = []
+ for i in path1.visit(bf=True, sort=True):
+ l.append(i.relto(path1))
+ for i, p in enumerate(l):
+ if path1.sep in p:
+ break
+ assert l[:i] == sorted(l[:i])
+ assert l[i:] == sorted(l[i:])
+
+ def test_endswith(self, path1):
+ def chk(p):
+ return p.check(endswith="pickle")
+ assert not chk(path1)
+ assert not chk(path1.join('samplefile'))
+ assert chk(path1.join('somepickle'))
+
+ def test_copy_file(self, path1):
+ otherdir = path1.join('otherdir')
+ initpy = otherdir.join('__init__.py')
+ copied = otherdir.join('copied')
+ initpy.copy(copied)
+ try:
+ assert copied.check()
+ s1 = initpy.read()
+ s2 = copied.read()
+ assert s1 == s2
+ finally:
+ if copied.check():
+ copied.remove()
+
+ def test_copy_dir(self, path1):
+ otherdir = path1.join('otherdir')
+ copied = path1.join('newdir')
+ try:
+ otherdir.copy(copied)
+ assert copied.check(dir=1)
+ assert copied.join('__init__.py').check(file=1)
+ s1 = otherdir.join('__init__.py').read()
+ s2 = copied.join('__init__.py').read()
+ assert s1 == s2
+ finally:
+ if copied.check(dir=1):
+ copied.remove(rec=1)
+
+ def test_remove_file(self, path1):
+ d = path1.ensure('todeleted')
+ assert d.check()
+ d.remove()
+ assert not d.check()
+
+ def test_remove_dir_recursive_by_default(self, path1):
+ d = path1.ensure('to', 'be', 'deleted')
+ assert d.check()
+ p = path1.join('to')
+ p.remove()
+ assert not p.check()
+
+ def test_ensure_dir(self, path1):
+ b = path1.ensure_dir("001", "002")
+ assert b.basename == "002"
+ assert b.isdir()
+
+ def test_mkdir_and_remove(self, path1):
+ tmpdir = path1
+ py.test.raises(py.error.EEXIST, tmpdir.mkdir, 'sampledir')
+ new = tmpdir.join('mktest1')
+ new.mkdir()
+ assert new.check(dir=1)
+ new.remove()
+
+ new = tmpdir.mkdir('mktest')
+ assert new.check(dir=1)
+ new.remove()
+ assert tmpdir.join('mktest') == new
+
+ def test_move_file(self, path1):
+ p = path1.join('samplefile')
+ newp = p.dirpath('moved_samplefile')
+ p.move(newp)
+ try:
+ assert newp.check(file=1)
+ assert not p.check()
+ finally:
+ dp = newp.dirpath()
+ if hasattr(dp, 'revert'):
+ dp.revert()
+ else:
+ newp.move(p)
+ assert p.check()
+
+ def test_move_dir(self, path1):
+ source = path1.join('sampledir')
+ dest = path1.join('moveddir')
+ source.move(dest)
+ assert dest.check(dir=1)
+ assert dest.join('otherfile').check(file=1)
+ assert not source.join('sampledir').check()
+
+ def test_fspath_protocol_match_strpath(self, path1):
+ assert path1.__fspath__() == path1.strpath
+
+ def test_fspath_func_match_strpath(self, path1):
+ try:
+ from os import fspath
+ except ImportError:
+ from py._path.common import fspath
+ assert fspath(path1) == path1.strpath
+
+ @py.test.mark.skip("sys.version_info < (3,6)")
+ def test_fspath_open(self, path1):
+ f = path1.join('opentestfile')
+ open(f)
+
+ @py.test.mark.skip("sys.version_info < (3,6)")
+ def test_fspath_fsencode(self, path1):
+ from os import fsencode
+ assert fsencode(path1) == fsencode(path1.strpath)
+
+def setuptestfs(path):
+ if path.join('samplefile').check():
+ return
+ #print "setting up test fs for", repr(path)
+ samplefile = path.ensure('samplefile')
+ samplefile.write('samplefile\n')
+
+ execfile = path.ensure('execfile')
+ execfile.write('x=42')
+
+ execfilepy = path.ensure('execfile.py')
+ execfilepy.write('x=42')
+
+ d = {1:2, 'hello': 'world', 'answer': 42}
+ path.ensure('samplepickle').dump(d)
+
+ sampledir = path.ensure('sampledir', dir=1)
+ sampledir.ensure('otherfile')
+
+ otherdir = path.ensure('otherdir', dir=1)
+ otherdir.ensure('__init__.py')
+
+ module_a = otherdir.ensure('a.py')
+ if sys.version_info >= (2,6):
+ module_a.write('from .b import stuff as result\n')
+ else:
+ module_a.write('from b import stuff as result\n')
+ module_b = otherdir.ensure('b.py')
+ module_b.write('stuff="got it"\n')
+ module_c = otherdir.ensure('c.py')
+ module_c.write('''import py;
+import otherdir.a
+value = otherdir.a.result
+''')
+ module_d = otherdir.ensure('d.py')
+ module_d.write('''import py;
+from otherdir import a
+value2 = a.result
+''')
diff --git a/testing/path/common.pyc b/testing/path/common.pyc
deleted file mode 100644
index 25add97..0000000
--- a/testing/path/common.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/conftest.py b/testing/path/conftest.py
index a9711b2..fb608cc 100644
--- a/testing/path/conftest.py
+++ b/testing/path/conftest.py
@@ -1,80 +1,80 @@
-import py
-import sys
-from py._path import svnwc as svncommon
-
-svnbin = py.path.local.sysfind('svn')
-repodump = py.path.local(__file__).dirpath('repotest.dump')
-from py.builtin import print_
-
-def pytest_funcarg__repowc1(request):
- if svnbin is None:
- py.test.skip("svn binary not found")
-
- tmpdir = request.getfuncargvalue("tmpdir")
- repo, repourl, wc = request.cached_setup(
- setup=lambda: getrepowc(tmpdir, "path1repo", "path1wc"),
- scope="module",
- )
- for x in ('test_remove', 'test_move', 'test_status_deleted'):
- if request.function.__name__.startswith(x):
- #print >>sys.stderr, ("saving repo", repo, "for", request.function)
- _savedrepowc = save_repowc(repo, wc)
- request.addfinalizer(lambda: restore_repowc(_savedrepowc))
- return repo, repourl, wc
-
-def pytest_funcarg__repowc2(request):
- tmpdir = request.getfuncargvalue("tmpdir")
- name = request.function.__name__
- repo, url, wc = getrepowc(tmpdir, "%s-repo-2" % name, "%s-wc-2" % name)
- return repo, url, wc
-
-def getsvnbin():
- if svnbin is None:
- py.test.skip("svn binary not found")
- return svnbin
-
-# make a wc directory out of a given root url
-# cache previously obtained wcs!
-#
-def getrepowc(tmpdir, reponame='basetestrepo', wcname='wc'):
- repo = tmpdir.mkdir(reponame)
- wcdir = tmpdir.mkdir(wcname)
- repo.ensure(dir=1)
- py.process.cmdexec('svnadmin create "%s"' %
- svncommon._escape_helper(repo))
- py.process.cmdexec('svnadmin load -q "%s" <"%s"' %
- (svncommon._escape_helper(repo), repodump))
- print_("created svn repository", repo)
- wcdir.ensure(dir=1)
- wc = py.path.svnwc(wcdir)
- if py.std.sys.platform == 'win32':
- repourl = "file://" + '/' + str(repo).replace('\\', '/')
- else:
- repourl = "file://%s" % repo
- wc.checkout(repourl)
- print_("checked out new repo into", wc)
- return (repo, repourl, wc)
-
-
-def save_repowc(repo, wc):
- assert not str(repo).startswith("file://"), repo
- assert repo.check()
- savedrepo = repo.dirpath(repo.basename+".1")
- savedwc = wc.dirpath(wc.basename+".1")
- repo.copy(savedrepo)
- wc.localpath.copy(savedwc.localpath)
- return savedrepo, savedwc
-
-def restore_repowc(obj):
- savedrepo, savedwc = obj
- #print >>sys.stderr, ("restoring", savedrepo)
- repo = savedrepo.new(basename=savedrepo.basename[:-2])
- assert repo.check()
- wc = savedwc.new(basename=savedwc.basename[:-2])
- assert wc.check()
- wc.localpath.remove()
- repo.remove()
- savedrepo.move(repo)
- savedwc.localpath.move(wc.localpath)
- py.path.svnurl._lsnorevcache.clear()
- py.path.svnurl._lsrevcache.clear()
+import py
+import sys
+from py._path import svnwc as svncommon
+
+svnbin = py.path.local.sysfind('svn')
+repodump = py.path.local(__file__).dirpath('repotest.dump')
+from py.builtin import print_
+
+def pytest_funcarg__repowc1(request):
+ if svnbin is None:
+ py.test.skip("svn binary not found")
+
+ tmpdir = request.getfuncargvalue("tmpdir")
+ repo, repourl, wc = request.cached_setup(
+ setup=lambda: getrepowc(tmpdir, "path1repo", "path1wc"),
+ scope="module",
+ )
+ for x in ('test_remove', 'test_move', 'test_status_deleted'):
+ if request.function.__name__.startswith(x):
+ #print >>sys.stderr, ("saving repo", repo, "for", request.function)
+ _savedrepowc = save_repowc(repo, wc)
+ request.addfinalizer(lambda: restore_repowc(_savedrepowc))
+ return repo, repourl, wc
+
+def pytest_funcarg__repowc2(request):
+ tmpdir = request.getfuncargvalue("tmpdir")
+ name = request.function.__name__
+ repo, url, wc = getrepowc(tmpdir, "%s-repo-2" % name, "%s-wc-2" % name)
+ return repo, url, wc
+
+def getsvnbin():
+ if svnbin is None:
+ py.test.skip("svn binary not found")
+ return svnbin
+
+# make a wc directory out of a given root url
+# cache previously obtained wcs!
+#
+def getrepowc(tmpdir, reponame='basetestrepo', wcname='wc'):
+ repo = tmpdir.mkdir(reponame)
+ wcdir = tmpdir.mkdir(wcname)
+ repo.ensure(dir=1)
+ py.process.cmdexec('svnadmin create "%s"' %
+ svncommon._escape_helper(repo))
+ py.process.cmdexec('svnadmin load -q "%s" <"%s"' %
+ (svncommon._escape_helper(repo), repodump))
+ print_("created svn repository", repo)
+ wcdir.ensure(dir=1)
+ wc = py.path.svnwc(wcdir)
+ if py.std.sys.platform == 'win32':
+ repourl = "file://" + '/' + str(repo).replace('\\', '/')
+ else:
+ repourl = "file://%s" % repo
+ wc.checkout(repourl)
+ print_("checked out new repo into", wc)
+ return (repo, repourl, wc)
+
+
+def save_repowc(repo, wc):
+ assert not str(repo).startswith("file://"), repo
+ assert repo.check()
+ savedrepo = repo.dirpath(repo.basename+".1")
+ savedwc = wc.dirpath(wc.basename+".1")
+ repo.copy(savedrepo)
+ wc.localpath.copy(savedwc.localpath)
+ return savedrepo, savedwc
+
+def restore_repowc(obj):
+ savedrepo, savedwc = obj
+ #print >>sys.stderr, ("restoring", savedrepo)
+ repo = savedrepo.new(basename=savedrepo.basename[:-2])
+ assert repo.check()
+ wc = savedwc.new(basename=savedwc.basename[:-2])
+ assert wc.check()
+ wc.localpath.remove()
+ repo.remove()
+ savedrepo.move(repo)
+ savedwc.localpath.move(wc.localpath)
+ py.path.svnurl._lsnorevcache.clear()
+ py.path.svnurl._lsrevcache.clear()
diff --git a/testing/path/conftest.pyc b/testing/path/conftest.pyc
deleted file mode 100644
index 46dcd7b..0000000
--- a/testing/path/conftest.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/svntestbase.py b/testing/path/svntestbase.py
index 8d94a9c..5b60788 100644
--- a/testing/path/svntestbase.py
+++ b/testing/path/svntestbase.py
@@ -1,31 +1,31 @@
-import sys
-import py
-from py._path import svnwc as svncommon
-from common import CommonFSTests
-
-class CommonSvnTests(CommonFSTests):
-
- def test_propget(self, path1):
- url = path1.join("samplefile")
- value = url.propget('svn:eol-style')
- assert value == 'native'
-
- def test_proplist(self, path1):
- url = path1.join("samplefile")
- res = url.proplist()
- assert res['svn:eol-style'] == 'native'
-
- def test_info(self, path1):
- url = path1.join("samplefile")
- res = url.info()
- assert res.size > len("samplefile") and res.created_rev >= 0
-
- def test_log_simple(self, path1):
- url = path1.join("samplefile")
- logentries = url.log()
- for logentry in logentries:
- assert logentry.rev == 1
- assert hasattr(logentry, 'author')
- assert hasattr(logentry, 'date')
-
-#cache.repositories.put(svnrepourl, 1200, 0)
+import sys
+import py
+from py._path import svnwc as svncommon
+from common import CommonFSTests
+
+class CommonSvnTests(CommonFSTests):
+
+ def test_propget(self, path1):
+ url = path1.join("samplefile")
+ value = url.propget('svn:eol-style')
+ assert value == 'native'
+
+ def test_proplist(self, path1):
+ url = path1.join("samplefile")
+ res = url.proplist()
+ assert res['svn:eol-style'] == 'native'
+
+ def test_info(self, path1):
+ url = path1.join("samplefile")
+ res = url.info()
+ assert res.size > len("samplefile") and res.created_rev >= 0
+
+ def test_log_simple(self, path1):
+ url = path1.join("samplefile")
+ logentries = url.log()
+ for logentry in logentries:
+ assert logentry.rev == 1
+ assert hasattr(logentry, 'author')
+ assert hasattr(logentry, 'date')
+
+#cache.repositories.put(svnrepourl, 1200, 0)
diff --git a/testing/path/svntestbase.pyc b/testing/path/svntestbase.pyc
deleted file mode 100644
index 27ba08e..0000000
--- a/testing/path/svntestbase.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/path/test_cacheutil.py b/testing/path/test_cacheutil.py
index 0b5cd31..d0493e0 100644
--- a/testing/path/test_cacheutil.py
+++ b/testing/path/test_cacheutil.py
@@ -1,84 +1,84 @@
-import py
-from py._path import cacheutil
-
-class BasicCacheAPITest:
- cache = None
- def test_getorbuild(self):
- val = self.cache.getorbuild(-42, lambda: 42)
- assert val == 42
- val = self.cache.getorbuild(-42, lambda: 23)
- assert val == 42
-
- def test_cache_get_key_error(self):
- py.test.raises(KeyError, "self.cache._getentry(-23)")
-
- def test_delentry_non_raising(self):
- val = self.cache.getorbuild(100, lambda: 100)
- self.cache.delentry(100)
- py.test.raises(KeyError, "self.cache._getentry(100)")
-
- def test_delentry_raising(self):
- val = self.cache.getorbuild(100, lambda: 100)
- self.cache.delentry(100)
- py.test.raises(KeyError, "self.cache.delentry(100, raising=True)")
-
- def test_clear(self):
- self.cache.clear()
-
-class TestBuildcostAccess(BasicCacheAPITest):
- cache = cacheutil.BuildcostAccessCache(maxentries=128)
-
- def test_cache_works_somewhat_simple(self, monkeypatch):
- cache = cacheutil.BuildcostAccessCache()
- # the default gettime
- # BuildcostAccessCache.build can
- # result into time()-time() == 0 which makes the below
- # test fail randomly. Let's rather use incrementing
- # numbers instead.
- l = [0]
- def counter():
- l[0] = l[0] + 1
- return l[0]
- monkeypatch.setattr(cacheutil, 'gettime', counter)
- for x in range(cache.maxentries):
- y = cache.getorbuild(x, lambda: x)
- assert x == y
- for x in range(cache.maxentries):
- assert cache.getorbuild(x, None) == x
- halfentries = int(cache.maxentries / 2)
- for x in range(halfentries):
- assert cache.getorbuild(x, None) == x
- assert cache.getorbuild(x, None) == x
- # evict one entry
- val = cache.getorbuild(-1, lambda: 42)
- assert val == 42
- # check that recently used ones are still there
- # and are not build again
- for x in range(halfentries):
- assert cache.getorbuild(x, None) == x
- assert cache.getorbuild(-1, None) == 42
-
-
-class TestAging(BasicCacheAPITest):
- maxsecs = 0.10
- cache = cacheutil.AgingCache(maxentries=128, maxseconds=maxsecs)
-
- def test_cache_eviction(self):
- self.cache.getorbuild(17, lambda: 17)
- endtime = py.std.time.time() + self.maxsecs * 10
- while py.std.time.time() < endtime:
- try:
- self.cache._getentry(17)
- except KeyError:
- break
- py.std.time.sleep(self.maxsecs*0.3)
- else:
- py.test.fail("waiting for cache eviction failed")
-
-def test_prune_lowestweight():
- maxsecs = 0.05
- cache = cacheutil.AgingCache(maxentries=10, maxseconds=maxsecs)
- for x in range(cache.maxentries):
- cache.getorbuild(x, lambda: x)
- py.std.time.sleep(maxsecs*1.1)
- cache.getorbuild(cache.maxentries+1, lambda: 42)
+import py
+from py._path import cacheutil
+
+class BasicCacheAPITest:
+ cache = None
+ def test_getorbuild(self):
+ val = self.cache.getorbuild(-42, lambda: 42)
+ assert val == 42
+ val = self.cache.getorbuild(-42, lambda: 23)
+ assert val == 42
+
+ def test_cache_get_key_error(self):
+ py.test.raises(KeyError, "self.cache._getentry(-23)")
+
+ def test_delentry_non_raising(self):
+ val = self.cache.getorbuild(100, lambda: 100)
+ self.cache.delentry(100)
+ py.test.raises(KeyError, "self.cache._getentry(100)")
+
+ def test_delentry_raising(self):
+ val = self.cache.getorbuild(100, lambda: 100)
+ self.cache.delentry(100)
+ py.test.raises(KeyError, "self.cache.delentry(100, raising=True)")
+
+ def test_clear(self):
+ self.cache.clear()
+
+class TestBuildcostAccess(BasicCacheAPITest):
+ cache = cacheutil.BuildcostAccessCache(maxentries=128)
+
+ def test_cache_works_somewhat_simple(self, monkeypatch):
+ cache = cacheutil.BuildcostAccessCache()
+ # the default gettime
+ # BuildcostAccessCache.build can
+ # result into time()-time() == 0 which makes the below
+ # test fail randomly. Let's rather use incrementing
+ # numbers instead.
+ l = [0]
+ def counter():
+ l[0] = l[0] + 1
+ return l[0]
+ monkeypatch.setattr(cacheutil, 'gettime', counter)
+ for x in range(cache.maxentries):
+ y = cache.getorbuild(x, lambda: x)
+ assert x == y
+ for x in range(cache.maxentries):
+ assert cache.getorbuild(x, None) == x
+ halfentries = int(cache.maxentries / 2)
+ for x in range(halfentries):
+ assert cache.getorbuild(x, None) == x
+ assert cache.getorbuild(x, None) == x
+ # evict one entry
+ val = cache.getorbuild(-1, lambda: 42)
+ assert val == 42
+ # check that recently used ones are still there
+ # and are not build again
+ for x in range(halfentries):
+ assert cache.getorbuild(x, None) == x
+ assert cache.getorbuild(-1, None) == 42
+
+
+class TestAging(BasicCacheAPITest):
+ maxsecs = 0.10
+ cache = cacheutil.AgingCache(maxentries=128, maxseconds=maxsecs)
+
+ def test_cache_eviction(self):
+ self.cache.getorbuild(17, lambda: 17)
+ endtime = py.std.time.time() + self.maxsecs * 10
+ while py.std.time.time() < endtime:
+ try:
+ self.cache._getentry(17)
+ except KeyError:
+ break
+ py.std.time.sleep(self.maxsecs*0.3)
+ else:
+ py.test.fail("waiting for cache eviction failed")
+
+def test_prune_lowestweight():
+ maxsecs = 0.05
+ cache = cacheutil.AgingCache(maxentries=10, maxseconds=maxsecs)
+ for x in range(cache.maxentries):
+ cache.getorbuild(x, lambda: x)
+ py.std.time.sleep(maxsecs*1.1)
+ cache.getorbuild(cache.maxentries+1, lambda: 42)
diff --git a/testing/path/test_local.py b/testing/path/test_local.py
index 69981d8..1c90a39 100644
--- a/testing/path/test_local.py
+++ b/testing/path/test_local.py
@@ -1,907 +1,922 @@
-# -*- coding: utf-8 -*-
-
-from __future__ import with_statement
-import time
-import py
-import pytest
-import os, sys
-from py.path import local
-import common
-
-failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
-failsonjywin32 = py.test.mark.xfail("sys.platform.startswith('java') "
- "and getattr(os, '_name', None) == 'nt'")
-win32only = py.test.mark.skipif(
- "not (sys.platform == 'win32' or getattr(os, '_name', None) == 'nt')")
-skiponwin32 = py.test.mark.skipif(
- "sys.platform == 'win32' or getattr(os, '_name', None) == 'nt'")
-
-ATIME_RESOLUTION = 0.01
-
-
-def pytest_funcarg__path1(request):
- def setup():
- path1 = request.getfuncargvalue("tmpdir")
- common.setuptestfs(path1)
- return path1
- def teardown(path1):
- # post check
- assert path1.join("samplefile").check()
- return request.cached_setup(setup, teardown, scope="session")
-
-def pytest_funcarg__fake_fspath_obj(request):
- class FakeFSPathClass(object):
- def __init__(self, path):
- self._path = path
- def __fspath__(self):
- return self._path
- return FakeFSPathClass("this/is/a/fake/path")
-
-class TestLocalPath(common.CommonFSTests):
- def test_join_normpath(self, tmpdir):
- assert tmpdir.join(".") == tmpdir
- p = tmpdir.join("../%s" % tmpdir.basename)
- assert p == tmpdir
- p = tmpdir.join("..//%s/" % tmpdir.basename)
- assert p == tmpdir
-
- @skiponwin32
- def test_dirpath_abs_no_abs(self, tmpdir):
- p = tmpdir.join('foo')
- assert p.dirpath('/bar') == tmpdir.join('bar')
- assert tmpdir.dirpath('/bar', abs=True) == py.path.local('/bar')
-
- def test_gethash(self, tmpdir):
- md5 = py.builtin._tryimport('md5', 'hashlib').md5
- lib = py.builtin._tryimport('sha', 'hashlib')
- sha = getattr(lib, 'sha1', getattr(lib, 'sha', None))
- fn = tmpdir.join("testhashfile")
- data = 'hello'.encode('ascii')
- fn.write(data, mode="wb")
- assert fn.computehash("md5") == md5(data).hexdigest()
- assert fn.computehash("sha1") == sha(data).hexdigest()
- py.test.raises(ValueError, fn.computehash, "asdasd")
-
- def test_remove_removes_readonly_file(self, tmpdir):
- readonly_file = tmpdir.join('readonly').ensure()
- readonly_file.chmod(0)
- readonly_file.remove()
- assert not readonly_file.check(exists=1)
-
- def test_remove_removes_readonly_dir(self, tmpdir):
- readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
- readonly_dir.chmod(int("500", 8))
- readonly_dir.remove()
- assert not readonly_dir.check(exists=1)
-
- def test_remove_removes_dir_and_readonly_file(self, tmpdir):
- readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
- readonly_file = readonly_dir.join('readonlyfile').ensure()
- readonly_file.chmod(0)
- readonly_dir.remove()
- assert not readonly_dir.check(exists=1)
-
- def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
- l = []
- monkeypatch.setattr(py.std.shutil, 'rmtree',
- lambda *args, **kwargs: l.append(kwargs))
- tmpdir.remove()
- assert not l[0]['ignore_errors']
- for val in (True, False):
- l[:] = []
- tmpdir.remove(ignore_errors=val)
- assert l[0]['ignore_errors'] == val
-
- def test_initialize_curdir(self):
- assert str(local()) == py.std.os.getcwd()
-
- @skiponwin32
- def test_chdir_gone(self, path1):
- p = path1.ensure("dir_to_be_removed", dir=1)
- p.chdir()
- p.remove()
- pytest.raises(py.error.ENOENT, py.path.local)
- assert path1.chdir() is None
- assert os.getcwd() == str(path1)
-
- def test_as_cwd(self, path1):
- dir = path1.ensure("subdir", dir=1)
- old = py.path.local()
- with dir.as_cwd() as x:
- assert x == old
- assert py.path.local() == dir
- assert os.getcwd() == str(old)
-
- def test_as_cwd_exception(self, path1):
- old = py.path.local()
- dir = path1.ensure("subdir", dir=1)
- with pytest.raises(ValueError):
- with dir.as_cwd():
- raise ValueError()
- assert old == py.path.local()
-
- def test_initialize_reldir(self, path1):
- with path1.as_cwd():
- p = local('samplefile')
- assert p.check()
-
- @pytest.mark.xfail("sys.version_info < (2,6) and sys.platform == 'win32'")
- def test_tilde_expansion(self, monkeypatch, tmpdir):
- monkeypatch.setenv("HOME", str(tmpdir))
- p = py.path.local("~", expanduser=True)
- assert p == os.path.expanduser("~")
-
- def test_eq_with_strings(self, path1):
- path1 = path1.join('sampledir')
- path2 = str(path1)
- assert path1 == path2
- assert path2 == path1
- path3 = path1.join('samplefile')
- assert path3 != path2
- assert path2 != path3
-
- def test_eq_with_none(self, path1):
- assert path1 != None
-
- def test_gt_with_strings(self, path1):
- path2 = path1.join('sampledir')
- path3 = str(path1.join("ttt"))
- assert path3 > path2
- assert path2 < path3
- assert path2 < "ttt"
- assert "ttt" > path2
- path4 = path1.join("aaa")
- l = [path2, path4,path3]
- assert sorted(l) == [path4, path2, path3]
-
- def test_open_and_ensure(self, path1):
- p = path1.join("sub1", "sub2", "file")
- with p.open("w", ensure=1) as f:
- f.write("hello")
- assert p.read() == "hello"
-
- def test_write_and_ensure(self, path1):
- p = path1.join("sub1", "sub2", "file")
- p.write("hello", ensure=1)
- assert p.read() == "hello"
-
- @py.test.mark.multi(bin=(False, True))
- def test_dump(self, tmpdir, bin):
- path = tmpdir.join("dumpfile%s" % int(bin))
- try:
- d = {'answer' : 42}
- path.dump(d, bin=bin)
- f = path.open('rb+')
- dnew = py.std.pickle.load(f)
- assert d == dnew
- finally:
- f.close()
-
- @failsonjywin32
- def test_setmtime(self):
- import tempfile
- import time
- try:
- fd, name = tempfile.mkstemp()
- py.std.os.close(fd)
- except AttributeError:
- name = tempfile.mktemp()
- open(name, 'w').close()
- try:
- mtime = int(time.time())-100
- path = local(name)
- assert path.mtime() != mtime
- path.setmtime(mtime)
- assert path.mtime() == mtime
- path.setmtime()
- assert path.mtime() != mtime
- finally:
- py.std.os.remove(name)
-
- def test_normpath(self, path1):
- new1 = path1.join("/otherdir")
- new2 = path1.join("otherdir")
- assert str(new1) == str(new2)
-
- def test_mkdtemp_creation(self):
- d = local.mkdtemp()
- try:
- assert d.check(dir=1)
- finally:
- d.remove(rec=1)
-
- def test_tmproot(self):
- d = local.mkdtemp()
- tmproot = local.get_temproot()
- try:
- assert d.check(dir=1)
- assert d.dirpath() == tmproot
- finally:
- d.remove(rec=1)
-
- def test_chdir(self, tmpdir):
- old = local()
- try:
- res = tmpdir.chdir()
- assert str(res) == str(old)
- assert py.std.os.getcwd() == str(tmpdir)
- finally:
- old.chdir()
-
- def test_ensure_filepath_withdir(self, tmpdir):
- newfile = tmpdir.join('test1','test')
- newfile.ensure()
- assert newfile.check(file=1)
- newfile.write("42")
- newfile.ensure()
- s = newfile.read()
- assert s == "42"
-
- def test_ensure_filepath_withoutdir(self, tmpdir):
- newfile = tmpdir.join('test1file')
- t = newfile.ensure()
- assert t == newfile
- assert newfile.check(file=1)
-
- def test_ensure_dirpath(self, tmpdir):
- newfile = tmpdir.join('test1','testfile')
- t = newfile.ensure(dir=1)
- assert t == newfile
- assert newfile.check(dir=1)
-
- def test_init_from_path(self, tmpdir):
- l = local()
- l2 = local(l)
- assert l2 == l
-
- wc = py.path.svnwc('.')
- l3 = local(wc)
- assert l3 is not wc
- assert l3.strpath == wc.strpath
- assert not hasattr(l3, 'commit')
-
- @py.test.mark.xfail(run=False, reason="unreliable est for long filenames")
- def test_long_filenames(self, tmpdir):
- if sys.platform == "win32":
- py.test.skip("win32: work around needed for path length limit")
- # see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
-
- # testing paths > 260 chars (which is Windows' limitation, but
- # depending on how the paths are used), but > 4096 (which is the
- # Linux' limitation) - the behaviour of paths with names > 4096 chars
- # is undetermined
- newfilename = '/test' * 60
- l = tmpdir.join(newfilename)
- l.ensure(file=True)
- l.write('foo')
- l2 = tmpdir.join(newfilename)
- assert l2.read() == 'foo'
-
- def test_visit_depth_first(self, tmpdir):
- p1 = tmpdir.ensure("a","1")
- p2 = tmpdir.ensure("b","2")
- p3 = tmpdir.ensure("breadth")
- l = list(tmpdir.visit(lambda x: x.check(file=1)))
- assert len(l) == 3
- # check that breadth comes last
- assert l[2] == p3
-
- def test_visit_rec_fnmatch(self, tmpdir):
- p1 = tmpdir.ensure("a","123")
- p2 = tmpdir.ensure(".b","345")
- l = list(tmpdir.visit("???", rec="[!.]*"))
- assert len(l) == 1
- # check that breadth comes last
- assert l[0] == p1
-
- def test_fnmatch_file_abspath(self, tmpdir):
- b = tmpdir.join("a", "b")
- assert b.fnmatch(os.sep.join("ab"))
- pattern = os.sep.join([str(tmpdir), "*", "b"])
- assert b.fnmatch(pattern)
-
- def test_sysfind(self):
- name = sys.platform == "win32" and "cmd" or "test"
- x = py.path.local.sysfind(name)
- assert x.check(file=1)
- assert py.path.local.sysfind('jaksdkasldqwe') is None
- assert py.path.local.sysfind(name, paths=[]) is None
- x2 = py.path.local.sysfind(name, paths=[x.dirpath()])
- assert x2 == x
-
- def test_fspath_protocol_other_class(self, fake_fspath_obj):
- # py.path is always absolute
- py_path = py.path.local(fake_fspath_obj)
- str_path = fake_fspath_obj.__fspath__()
- assert py_path.check(endswith=str_path)
- assert py_path.join(fake_fspath_obj).strpath == os.path.join(
- py_path.strpath, str_path)
-
-
-class TestExecutionOnWindows:
- pytestmark = win32only
-
- def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
- monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
- tmpdir.ensure("hello")
- h = tmpdir.ensure("hello.bat")
- x = py.path.local.sysfind("hello")
- assert x == h
-
-
-class TestExecution:
- pytestmark = skiponwin32
-
- def test_sysfind_no_permisson_ignored(self, monkeypatch, tmpdir):
- noperm = tmpdir.ensure('noperm', dir=True)
- monkeypatch.setenv("PATH", noperm, prepend=":")
- noperm.chmod(0)
- assert py.path.local.sysfind('jaksdkasldqwe') is None
-
- def test_sysfind_absolute(self):
- x = py.path.local.sysfind('test')
- assert x.check(file=1)
- y = py.path.local.sysfind(str(x))
- assert y.check(file=1)
- assert y == x
-
- def test_sysfind_multiple(self, tmpdir, monkeypatch):
- monkeypatch.setenv('PATH',
- "%s:%s" % (tmpdir.ensure('a'),
- tmpdir.join('b')),
- prepend=":")
- tmpdir.ensure('b', 'a')
- checker = lambda x: x.dirpath().basename == 'b'
- x = py.path.local.sysfind('a', checker=checker)
- assert x.basename == 'a'
- assert x.dirpath().basename == 'b'
- checker = lambda x: None
- assert py.path.local.sysfind('a', checker=checker) is None
-
- def test_sysexec(self):
- x = py.path.local.sysfind('ls')
- out = x.sysexec('-a')
- for x in py.path.local().listdir():
- assert out.find(x.basename) != -1
-
- def test_sysexec_failing(self):
- x = py.path.local.sysfind('false')
- py.test.raises(py.process.cmdexec.Error, """
- x.sysexec('aksjdkasjd')
- """)
-
- def test_make_numbered_dir(self, tmpdir):
- tmpdir.ensure('base.not_an_int', dir=1)
- for i in range(10):
- numdir = local.make_numbered_dir(prefix='base.', rootdir=tmpdir,
- keep=2, lock_timeout=0)
- assert numdir.check()
- assert numdir.basename == 'base.%d' %i
- if i>=1:
- assert numdir.new(ext=str(i-1)).check()
- if i>=2:
- assert numdir.new(ext=str(i-2)).check()
- if i>=3:
- assert not numdir.new(ext=str(i-3)).check()
-
- def test_make_numbered_dir_NotImplemented_Error(self, tmpdir, monkeypatch):
- def notimpl(x, y):
- raise NotImplementedError(42)
- monkeypatch.setattr(py.std.os, 'symlink', notimpl)
- x = tmpdir.make_numbered_dir(rootdir=tmpdir, lock_timeout=0)
- assert x.relto(tmpdir)
- assert x.check()
-
- def test_locked_make_numbered_dir(self, tmpdir):
- for i in range(10):
- numdir = local.make_numbered_dir(prefix='base2.', rootdir=tmpdir,
- keep=2)
- assert numdir.check()
- assert numdir.basename == 'base2.%d' %i
- for j in range(i):
- assert numdir.new(ext=str(j)).check()
-
- def test_error_preservation(self, path1):
- py.test.raises (EnvironmentError, path1.join('qwoeqiwe').mtime)
- py.test.raises (EnvironmentError, path1.join('qwoeqiwe').read)
-
- #def test_parentdirmatch(self):
- # local.parentdirmatch('std', startmodule=__name__)
- #
-
-
-class TestImport:
- def test_pyimport(self, path1):
- obj = path1.join('execfile.py').pyimport()
- assert obj.x == 42
- assert obj.__name__ == 'execfile'
-
- def test_pyimport_renamed_dir_creates_mismatch(self, tmpdir):
- p = tmpdir.ensure("a", "test_x123.py")
- p.pyimport()
- tmpdir.join("a").move(tmpdir.join("b"))
- pytest.raises(tmpdir.ImportMismatchError,
- lambda: tmpdir.join("b", "test_x123.py").pyimport())
-
- def test_pyimport_messy_name(self, tmpdir):
- # http://bitbucket.org/hpk42/py-trunk/issue/129
- path = tmpdir.ensure('foo__init__.py')
- obj = path.pyimport()
-
- def test_pyimport_dir(self, tmpdir):
- p = tmpdir.join("hello_123")
- p_init = p.ensure("__init__.py")
- m = p.pyimport()
- assert m.__name__ == "hello_123"
- m = p_init.pyimport()
- assert m.__name__ == "hello_123"
-
- def test_pyimport_execfile_different_name(self, path1):
- obj = path1.join('execfile.py').pyimport(modname="0x.y.z")
- assert obj.x == 42
- assert obj.__name__ == '0x.y.z'
-
- def test_pyimport_a(self, path1):
- otherdir = path1.join('otherdir')
- mod = otherdir.join('a.py').pyimport()
- assert mod.result == "got it"
- assert mod.__name__ == 'otherdir.a'
-
- def test_pyimport_b(self, path1):
- otherdir = path1.join('otherdir')
- mod = otherdir.join('b.py').pyimport()
- assert mod.stuff == "got it"
- assert mod.__name__ == 'otherdir.b'
-
- def test_pyimport_c(self, path1):
- otherdir = path1.join('otherdir')
- mod = otherdir.join('c.py').pyimport()
- assert mod.value == "got it"
-
- def test_pyimport_d(self, path1):
- otherdir = path1.join('otherdir')
- mod = otherdir.join('d.py').pyimport()
- assert mod.value2 == "got it"
-
- def test_pyimport_and_import(self, tmpdir):
- tmpdir.ensure('xxxpackage', '__init__.py')
- mod1path = tmpdir.ensure('xxxpackage', 'module1.py')
- mod1 = mod1path.pyimport()
- assert mod1.__name__ == 'xxxpackage.module1'
- from xxxpackage import module1
- assert module1 is mod1
-
- def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir):
- name = 'pointsback123'
- ModuleType = type(py.std.os)
- p = tmpdir.ensure(name + '.py')
- for ending in ('.pyc', '$py.class', '.pyo'):
- mod = ModuleType(name)
- pseudopath = tmpdir.ensure(name+ending)
- mod.__file__ = str(pseudopath)
- monkeypatch.setitem(sys.modules, name, mod)
- newmod = p.pyimport()
- assert mod == newmod
- monkeypatch.undo()
- mod = ModuleType(name)
- pseudopath = tmpdir.ensure(name+"123.py")
- mod.__file__ = str(pseudopath)
- monkeypatch.setitem(sys.modules, name, mod)
- excinfo = py.test.raises(pseudopath.ImportMismatchError,
- "p.pyimport()")
- modname, modfile, orig = excinfo.value.args
- assert modname == name
- assert modfile == pseudopath
- assert orig == p
- assert issubclass(pseudopath.ImportMismatchError, ImportError)
-
- def test_issue131_pyimport_on__init__(self, tmpdir):
- # __init__.py files may be namespace packages, and thus the
- # __file__ of an imported module may not be ourselves
- # see issue
- p1 = tmpdir.ensure("proja", "__init__.py")
- p2 = tmpdir.ensure("sub", "proja", "__init__.py")
- m1 = p1.pyimport()
- m2 = p2.pyimport()
- assert m1 == m2
-
- def test_ensuresyspath_append(self, tmpdir):
- root1 = tmpdir.mkdir("root1")
- file1 = root1.ensure("x123.py")
- assert str(root1) not in sys.path
- file1.pyimport(ensuresyspath="append")
- assert str(root1) == sys.path[-1]
- assert str(root1) not in sys.path[:-1]
-
-
-def test_pypkgdir(tmpdir):
- pkg = tmpdir.ensure('pkg1', dir=1)
- pkg.ensure("__init__.py")
- pkg.ensure("subdir/__init__.py")
- assert pkg.pypkgpath() == pkg
- assert pkg.join('subdir', '__init__.py').pypkgpath() == pkg
-
-def test_pypkgdir_unimportable(tmpdir):
- pkg = tmpdir.ensure('pkg1-1', dir=1) # unimportable
- pkg.ensure("__init__.py")
- subdir = pkg.ensure("subdir/__init__.py").dirpath()
- assert subdir.pypkgpath() == subdir
- assert subdir.ensure("xyz.py").pypkgpath() == subdir
- assert not pkg.pypkgpath()
-
-def test_isimportable():
- from py._path.local import isimportable
- assert not isimportable("")
- assert isimportable("x")
- assert isimportable("x1")
- assert isimportable("x_1")
- assert isimportable("_")
- assert isimportable("_1")
- assert not isimportable("x-1")
- assert not isimportable("x:1")
-
-def test_homedir_from_HOME(monkeypatch):
- path = os.getcwd()
- monkeypatch.setenv("HOME", path)
- assert py.path.local._gethomedir() == py.path.local(path)
-
-def test_homedir_not_exists(monkeypatch):
- monkeypatch.delenv("HOME", raising=False)
- monkeypatch.delenv("HOMEDRIVE", raising=False)
- homedir = py.path.local._gethomedir()
- assert homedir is None
-
-def test_samefile(tmpdir):
- assert tmpdir.samefile(tmpdir)
- p = tmpdir.ensure("hello")
- assert p.samefile(p)
- with p.dirpath().as_cwd():
- assert p.samefile(p.basename)
- if sys.platform == "win32":
- p1 = p.__class__(str(p).lower())
- p2 = p.__class__(str(p).upper())
- assert p1.samefile(p2)
-
-def test_listdir_single_arg(tmpdir):
- tmpdir.ensure("hello")
- assert tmpdir.listdir("hello")[0].basename == "hello"
-
-def test_mkdtemp_rootdir(tmpdir):
- dtmp = local.mkdtemp(rootdir=tmpdir)
- assert tmpdir.listdir() == [dtmp]
-
-class TestWINLocalPath:
- pytestmark = win32only
-
- def test_owner_group_not_implemented(self, path1):
- py.test.raises(NotImplementedError, "path1.stat().owner")
- py.test.raises(NotImplementedError, "path1.stat().group")
-
- def test_chmod_simple_int(self, path1):
- py.builtin.print_("path1 is", path1)
- mode = path1.stat().mode
- # Ensure that we actually change the mode to something different.
- path1.chmod(mode == 0 and 1 or 0)
- try:
- print(path1.stat().mode)
- print(mode)
- assert path1.stat().mode != mode
- finally:
- path1.chmod(mode)
- assert path1.stat().mode == mode
-
- def test_path_comparison_lowercase_mixed(self, path1):
- t1 = path1.join("a_path")
- t2 = path1.join("A_path")
- assert t1 == t1
- assert t1 == t2
-
- def test_relto_with_mixed_case(self, path1):
- t1 = path1.join("a_path", "fiLe")
- t2 = path1.join("A_path")
- assert t1.relto(t2) == "fiLe"
-
- def test_allow_unix_style_paths(self, path1):
- t1 = path1.join('a_path')
- assert t1 == str(path1) + '\\a_path'
- t1 = path1.join('a_path/')
- assert t1 == str(path1) + '\\a_path'
- t1 = path1.join('dir/a_path')
- assert t1 == str(path1) + '\\dir\\a_path'
-
- def test_sysfind_in_currentdir(self, path1):
- cmd = py.path.local.sysfind('cmd')
- root = cmd.new(dirname='', basename='') # c:\ in most installations
- with root.as_cwd():
- x = py.path.local.sysfind(cmd.relto(root))
- assert x.check(file=1)
-
- def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
- # path-matching patterns might contain a posix path separator '/'
- # Test that we can match that pattern on windows.
- import posixpath
- b = tmpdir.join("a", "b")
- assert b.fnmatch(posixpath.sep.join("ab"))
- pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
- assert b.fnmatch(pattern)
-
-class TestPOSIXLocalPath:
- pytestmark = skiponwin32
-
- def test_hardlink(self, tmpdir):
- linkpath = tmpdir.join('test')
- filepath = tmpdir.join('file')
- filepath.write("Hello")
- nlink = filepath.stat().nlink
- linkpath.mklinkto(filepath)
- assert filepath.stat().nlink == nlink + 1
-
- def test_symlink_are_identical(self, tmpdir):
- filepath = tmpdir.join('file')
- filepath.write("Hello")
- linkpath = tmpdir.join('test')
- linkpath.mksymlinkto(filepath)
- assert linkpath.readlink() == str(filepath)
-
- def test_symlink_isfile(self, tmpdir):
- linkpath = tmpdir.join('test')
- filepath = tmpdir.join('file')
- filepath.write("")
- linkpath.mksymlinkto(filepath)
- assert linkpath.check(file=1)
- assert not linkpath.check(link=0, file=1)
- assert linkpath.islink()
-
- def test_symlink_relative(self, tmpdir):
- linkpath = tmpdir.join('test')
- filepath = tmpdir.join('file')
- filepath.write("Hello")
- linkpath.mksymlinkto(filepath, absolute=False)
- assert linkpath.readlink() == "file"
- assert filepath.read() == linkpath.read()
-
- def test_symlink_not_existing(self, tmpdir):
- linkpath = tmpdir.join('testnotexisting')
- assert not linkpath.check(link=1)
- assert linkpath.check(link=0)
-
- def test_relto_with_root(self, path1, tmpdir):
- y = path1.join('x').relto(py.path.local('/'))
- assert y[0] == str(path1)[1]
-
- def test_visit_recursive_symlink(self, tmpdir):
- linkpath = tmpdir.join('test')
- linkpath.mksymlinkto(tmpdir)
- visitor = tmpdir.visit(None, lambda x: x.check(link=0))
- assert list(visitor) == [linkpath]
-
- def test_symlink_isdir(self, tmpdir):
- linkpath = tmpdir.join('test')
- linkpath.mksymlinkto(tmpdir)
- assert linkpath.check(dir=1)
- assert not linkpath.check(link=0, dir=1)
-
- def test_symlink_remove(self, tmpdir):
- linkpath = tmpdir.join('test')
- linkpath.mksymlinkto(linkpath) # point to itself
- assert linkpath.check(link=1)
- linkpath.remove()
- assert not linkpath.check()
-
- def test_realpath_file(self, tmpdir):
- linkpath = tmpdir.join('test')
- filepath = tmpdir.join('file')
- filepath.write("")
- linkpath.mksymlinkto(filepath)
- realpath = linkpath.realpath()
- assert realpath.basename == 'file'
-
- def test_owner(self, path1, tmpdir):
- from pwd import getpwuid
- from grp import getgrgid
- stat = path1.stat()
- assert stat.path == path1
-
- uid = stat.uid
- gid = stat.gid
- owner = getpwuid(uid)[0]
- group = getgrgid(gid)[0]
-
- assert uid == stat.uid
- assert owner == stat.owner
- assert gid == stat.gid
- assert group == stat.group
-
- def test_stat_helpers(self, tmpdir, monkeypatch):
- path1 = tmpdir.ensure("file")
- stat1 = path1.stat()
- stat2 = tmpdir.stat()
- assert stat1.isfile()
- assert stat2.isdir()
- assert not stat1.islink()
- assert not stat2.islink()
-
- def test_stat_non_raising(self, tmpdir):
- path1 = tmpdir.join("file")
- pytest.raises(py.error.ENOENT, lambda: path1.stat())
- res = path1.stat(raising=False)
- assert res is None
-
- def test_atime(self, tmpdir):
- import time
- path = tmpdir.ensure('samplefile')
- now = time.time()
- atime1 = path.atime()
- # we could wait here but timer resolution is very
- # system dependent
- path.read()
- time.sleep(ATIME_RESOLUTION)
- atime2 = path.atime()
- time.sleep(ATIME_RESOLUTION)
- duration = time.time() - now
- assert (atime2-atime1) <= duration
-
- def test_commondir(self, path1):
- # XXX This is here in local until we find a way to implement this
- # using the subversion command line api.
- p1 = path1.join('something')
- p2 = path1.join('otherthing')
- assert p1.common(p2) == path1
- assert p2.common(p1) == path1
-
- def test_commondir_nocommon(self, path1):
- # XXX This is here in local until we find a way to implement this
- # using the subversion command line api.
- p1 = path1.join('something')
- p2 = py.path.local(path1.sep+'blabla')
- assert p1.common(p2) == '/'
-
- def test_join_to_root(self, path1):
- root = path1.parts()[0]
- assert len(str(root)) == 1
- assert str(root.join('a')) == '//a' # posix allows two slashes
-
- def test_join_root_to_root_with_no_abs(self, path1):
- nroot = path1.join('/')
- assert str(path1) == str(nroot)
- assert path1 == nroot
-
- def test_chmod_simple_int(self, path1):
- mode = path1.stat().mode
- path1.chmod(int(mode/2))
- try:
- assert path1.stat().mode != mode
- finally:
- path1.chmod(mode)
- assert path1.stat().mode == mode
-
- def test_chmod_rec_int(self, path1):
- # XXX fragile test
- recfilter = lambda x: x.check(dotfile=0, link=0)
- oldmodes = {}
- for x in path1.visit(rec=recfilter):
- oldmodes[x] = x.stat().mode
- path1.chmod(int("772", 8), rec=recfilter)
- try:
- for x in path1.visit(rec=recfilter):
- assert x.stat().mode & int("777", 8) == int("772", 8)
- finally:
- for x,y in oldmodes.items():
- x.chmod(y)
-
- def test_copy_archiving(self, tmpdir):
- unicode_fn = u"something-\342\200\223.txt"
- f = tmpdir.ensure("a", unicode_fn)
- a = f.dirpath()
- oldmode = f.stat().mode
- newmode = oldmode ^ 1
- f.chmod(newmode)
- b = tmpdir.join("b")
- a.copy(b, mode=True)
- assert b.join(f.basename).stat().mode == newmode
-
- def test_copy_stat_file(self, tmpdir):
- src = tmpdir.ensure('src')
- dst = tmpdir.join('dst')
- # a small delay before the copy
- time.sleep(ATIME_RESOLUTION)
- src.copy(dst, stat=True)
- oldstat = src.stat()
- newstat = dst.stat()
- assert oldstat.mode == newstat.mode
- assert (dst.atime() - src.atime()) < ATIME_RESOLUTION
- assert (dst.mtime() - src.mtime()) < ATIME_RESOLUTION
-
- def test_copy_stat_dir(self, tmpdir):
- test_files = ['a', 'b', 'c']
- src = tmpdir.join('src')
- for f in test_files:
- src.join(f).write(f, ensure=True)
- dst = tmpdir.join('dst')
- # a small delay before the copy
- time.sleep(ATIME_RESOLUTION)
- src.copy(dst, stat=True)
- for f in test_files:
- oldstat = src.join(f).stat()
- newstat = dst.join(f).stat()
- assert (newstat.atime - oldstat.atime) < ATIME_RESOLUTION
- assert (newstat.mtime - oldstat.mtime) < ATIME_RESOLUTION
- assert oldstat.mode == newstat.mode
-
- @failsonjython
- def test_chown_identity(self, path1):
- owner = path1.stat().owner
- group = path1.stat().group
- path1.chown(owner, group)
-
- @failsonjython
- def test_chown_dangling_link(self, path1):
- owner = path1.stat().owner
- group = path1.stat().group
- x = path1.join('hello')
- x.mksymlinkto('qlwkejqwlek')
- try:
- path1.chown(owner, group, rec=1)
- finally:
- x.remove(rec=0)
-
- @failsonjython
- def test_chown_identity_rec_mayfail(self, path1):
- owner = path1.stat().owner
- group = path1.stat().group
- path1.chown(owner, group)
-
-
-class TestUnicodePy2Py3:
- def test_join_ensure(self, tmpdir, monkeypatch):
- if sys.version_info >= (3,0) and "LANG" not in os.environ:
- pytest.skip("cannot run test without locale")
- x = py.path.local(tmpdir.strpath)
- part = "hällo"
- y = x.ensure(part)
- assert x.join(part) == y
-
- def test_listdir(self, tmpdir):
- if sys.version_info >= (3,0) and "LANG" not in os.environ:
- pytest.skip("cannot run test without locale")
- x = py.path.local(tmpdir.strpath)
- part = "hällo"
- y = x.ensure(part)
- assert x.listdir(part)[0] == y
-
- @pytest.mark.xfail(reason="changing read/write might break existing usages")
- def test_read_write(self, tmpdir):
- x = tmpdir.join("hello")
- part = py.builtin._totext("hällo", "utf8")
- x.write(part)
- assert x.read() == part
- x.write(part.encode(sys.getdefaultencoding()))
- assert x.read() == part.encode(sys.getdefaultencoding())
-
-class TestBinaryAndTextMethods:
- def test_read_binwrite(self, tmpdir):
- x = tmpdir.join("hello")
- part = py.builtin._totext("hällo", "utf8")
- part_utf8 = part.encode("utf8")
- x.write_binary(part_utf8)
- assert x.read_binary() == part_utf8
- s = x.read_text(encoding="utf8")
- assert s == part
- assert py.builtin._istext(s)
-
- def test_read_textwrite(self, tmpdir):
- x = tmpdir.join("hello")
- part = py.builtin._totext("hällo", "utf8")
- part_utf8 = part.encode("utf8")
- x.write_text(part, encoding="utf8")
- assert x.read_binary() == part_utf8
- assert x.read_text(encoding="utf8") == part
-
- def test_default_encoding(self, tmpdir):
- x = tmpdir.join("hello")
- # Can't use UTF8 as the default encoding (ASCII) doesn't support it
- part = py.builtin._totext("hello", "ascii")
- x.write_text(part, "ascii")
- s = x.read_text("ascii")
- assert s == part
- assert type(s) == type(part)
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+import time
+import py
+import pytest
+import os, sys
+from py.path import local
+import common
+
+failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
+failsonjywin32 = py.test.mark.xfail("sys.platform.startswith('java') "
+ "and getattr(os, '_name', None) == 'nt'")
+win32only = py.test.mark.skipif(
+ "not (sys.platform == 'win32' or getattr(os, '_name', None) == 'nt')")
+skiponwin32 = py.test.mark.skipif(
+ "sys.platform == 'win32' or getattr(os, '_name', None) == 'nt'")
+
+ATIME_RESOLUTION = 0.01
+
+
+def pytest_funcarg__path1(request):
+ def setup():
+ path1 = request.getfuncargvalue("tmpdir")
+ common.setuptestfs(path1)
+ return path1
+ def teardown(path1):
+ # post check
+ assert path1.join("samplefile").check()
+ return request.cached_setup(setup, teardown, scope="session")
+
+def pytest_funcarg__fake_fspath_obj(request):
+ class FakeFSPathClass(object):
+ def __init__(self, path):
+ self._path = path
+ def __fspath__(self):
+ return self._path
+ return FakeFSPathClass(os.path.join("this", "is", "a", "fake", "path"))
+
+class TestLocalPath(common.CommonFSTests):
+ def test_join_normpath(self, tmpdir):
+ assert tmpdir.join(".") == tmpdir
+ p = tmpdir.join("../%s" % tmpdir.basename)
+ assert p == tmpdir
+ p = tmpdir.join("..//%s/" % tmpdir.basename)
+ assert p == tmpdir
+
+ @skiponwin32
+ def test_dirpath_abs_no_abs(self, tmpdir):
+ p = tmpdir.join('foo')
+ assert p.dirpath('/bar') == tmpdir.join('bar')
+ assert tmpdir.dirpath('/bar', abs=True) == py.path.local('/bar')
+
+ def test_gethash(self, tmpdir):
+ md5 = py.builtin._tryimport('md5', 'hashlib').md5
+ lib = py.builtin._tryimport('sha', 'hashlib')
+ sha = getattr(lib, 'sha1', getattr(lib, 'sha', None))
+ fn = tmpdir.join("testhashfile")
+ data = 'hello'.encode('ascii')
+ fn.write(data, mode="wb")
+ assert fn.computehash("md5") == md5(data).hexdigest()
+ assert fn.computehash("sha1") == sha(data).hexdigest()
+ py.test.raises(ValueError, fn.computehash, "asdasd")
+
+ def test_remove_removes_readonly_file(self, tmpdir):
+ readonly_file = tmpdir.join('readonly').ensure()
+ readonly_file.chmod(0)
+ readonly_file.remove()
+ assert not readonly_file.check(exists=1)
+
+ def test_remove_removes_readonly_dir(self, tmpdir):
+ readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
+ readonly_dir.chmod(int("500", 8))
+ readonly_dir.remove()
+ assert not readonly_dir.check(exists=1)
+
+ def test_remove_removes_dir_and_readonly_file(self, tmpdir):
+ readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
+ readonly_file = readonly_dir.join('readonlyfile').ensure()
+ readonly_file.chmod(0)
+ readonly_dir.remove()
+ assert not readonly_dir.check(exists=1)
+
+ def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
+ l = []
+ monkeypatch.setattr(py.std.shutil, 'rmtree',
+ lambda *args, **kwargs: l.append(kwargs))
+ tmpdir.remove()
+ assert not l[0]['ignore_errors']
+ for val in (True, False):
+ l[:] = []
+ tmpdir.remove(ignore_errors=val)
+ assert l[0]['ignore_errors'] == val
+
+ def test_initialize_curdir(self):
+ assert str(local()) == py.std.os.getcwd()
+
+ @skiponwin32
+ def test_chdir_gone(self, path1):
+ p = path1.ensure("dir_to_be_removed", dir=1)
+ p.chdir()
+ p.remove()
+ pytest.raises(py.error.ENOENT, py.path.local)
+ assert path1.chdir() is None
+ assert os.getcwd() == str(path1)
+
+ def test_as_cwd(self, path1):
+ dir = path1.ensure("subdir", dir=1)
+ old = py.path.local()
+ with dir.as_cwd() as x:
+ assert x == old
+ assert py.path.local() == dir
+ assert os.getcwd() == str(old)
+
+ def test_as_cwd_exception(self, path1):
+ old = py.path.local()
+ dir = path1.ensure("subdir", dir=1)
+ with pytest.raises(ValueError):
+ with dir.as_cwd():
+ raise ValueError()
+ assert old == py.path.local()
+
+ def test_initialize_reldir(self, path1):
+ with path1.as_cwd():
+ p = local('samplefile')
+ assert p.check()
+
+ @pytest.mark.xfail("sys.version_info < (2,6) and sys.platform == 'win32'")
+ def test_tilde_expansion(self, monkeypatch, tmpdir):
+ monkeypatch.setenv("HOME", str(tmpdir))
+ p = py.path.local("~", expanduser=True)
+ assert p == os.path.expanduser("~")
+
+ def test_eq_with_strings(self, path1):
+ path1 = path1.join('sampledir')
+ path2 = str(path1)
+ assert path1 == path2
+ assert path2 == path1
+ path3 = path1.join('samplefile')
+ assert path3 != path2
+ assert path2 != path3
+
+ def test_eq_with_none(self, path1):
+ assert path1 != None
+
+ def test_eq_non_ascii_unicode(self, path1):
+ path2 = path1.join(u'temp')
+ path3 = path1.join(u'ação')
+ path4 = path1.join(u'ディレクトリ')
+
+ assert path2 != path3
+ assert path2 != path4
+ assert path4 != path3
+
+ def test_gt_with_strings(self, path1):
+ path2 = path1.join('sampledir')
+ path3 = str(path1.join("ttt"))
+ assert path3 > path2
+ assert path2 < path3
+ assert path2 < "ttt"
+ assert "ttt" > path2
+ path4 = path1.join("aaa")
+ l = [path2, path4,path3]
+ assert sorted(l) == [path4, path2, path3]
+
+ def test_open_and_ensure(self, path1):
+ p = path1.join("sub1", "sub2", "file")
+ with p.open("w", ensure=1) as f:
+ f.write("hello")
+ assert p.read() == "hello"
+
+ def test_write_and_ensure(self, path1):
+ p = path1.join("sub1", "sub2", "file")
+ p.write("hello", ensure=1)
+ assert p.read() == "hello"
+
+ @py.test.mark.multi(bin=(False, True))
+ def test_dump(self, tmpdir, bin):
+ path = tmpdir.join("dumpfile%s" % int(bin))
+ try:
+ d = {'answer' : 42}
+ path.dump(d, bin=bin)
+ f = path.open('rb+')
+ dnew = py.std.pickle.load(f)
+ assert d == dnew
+ finally:
+ f.close()
+
+ @failsonjywin32
+ def test_setmtime(self):
+ import tempfile
+ import time
+ try:
+ fd, name = tempfile.mkstemp()
+ py.std.os.close(fd)
+ except AttributeError:
+ name = tempfile.mktemp()
+ open(name, 'w').close()
+ try:
+ mtime = int(time.time())-100
+ path = local(name)
+ assert path.mtime() != mtime
+ path.setmtime(mtime)
+ assert path.mtime() == mtime
+ path.setmtime()
+ assert path.mtime() != mtime
+ finally:
+ py.std.os.remove(name)
+
+ def test_normpath(self, path1):
+ new1 = path1.join("/otherdir")
+ new2 = path1.join("otherdir")
+ assert str(new1) == str(new2)
+
+ def test_mkdtemp_creation(self):
+ d = local.mkdtemp()
+ try:
+ assert d.check(dir=1)
+ finally:
+ d.remove(rec=1)
+
+ def test_tmproot(self):
+ d = local.mkdtemp()
+ tmproot = local.get_temproot()
+ try:
+ assert d.check(dir=1)
+ assert d.dirpath() == tmproot
+ finally:
+ d.remove(rec=1)
+
+ def test_chdir(self, tmpdir):
+ old = local()
+ try:
+ res = tmpdir.chdir()
+ assert str(res) == str(old)
+ assert py.std.os.getcwd() == str(tmpdir)
+ finally:
+ old.chdir()
+
+ def test_ensure_filepath_withdir(self, tmpdir):
+ newfile = tmpdir.join('test1','test')
+ newfile.ensure()
+ assert newfile.check(file=1)
+ newfile.write("42")
+ newfile.ensure()
+ s = newfile.read()
+ assert s == "42"
+
+ def test_ensure_filepath_withoutdir(self, tmpdir):
+ newfile = tmpdir.join('test1file')
+ t = newfile.ensure()
+ assert t == newfile
+ assert newfile.check(file=1)
+
+ def test_ensure_dirpath(self, tmpdir):
+ newfile = tmpdir.join('test1','testfile')
+ t = newfile.ensure(dir=1)
+ assert t == newfile
+ assert newfile.check(dir=1)
+
+ def test_ensure_non_ascii_unicode(self, tmpdir):
+ newfile = tmpdir.join(u'ação',u'ディレクトリ')
+ t = newfile.ensure(dir=1)
+ assert t == newfile
+ assert newfile.check(dir=1)
+
+ def test_init_from_path(self, tmpdir):
+ l = local()
+ l2 = local(l)
+ assert l2 == l
+
+ wc = py.path.svnwc('.')
+ l3 = local(wc)
+ assert l3 is not wc
+ assert l3.strpath == wc.strpath
+ assert not hasattr(l3, 'commit')
+
+ @py.test.mark.xfail(run=False, reason="unreliable est for long filenames")
+ def test_long_filenames(self, tmpdir):
+ if sys.platform == "win32":
+ py.test.skip("win32: work around needed for path length limit")
+ # see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
+
+ # testing paths > 260 chars (which is Windows' limitation, but
+ # depending on how the paths are used), but > 4096 (which is the
+ # Linux' limitation) - the behaviour of paths with names > 4096 chars
+ # is undetermined
+ newfilename = '/test' * 60
+ l = tmpdir.join(newfilename)
+ l.ensure(file=True)
+ l.write('foo')
+ l2 = tmpdir.join(newfilename)
+ assert l2.read() == 'foo'
+
+ def test_visit_depth_first(self, tmpdir):
+ p1 = tmpdir.ensure("a","1")
+ p2 = tmpdir.ensure("b","2")
+ p3 = tmpdir.ensure("breadth")
+ l = list(tmpdir.visit(lambda x: x.check(file=1)))
+ assert len(l) == 3
+ # check that breadth comes last
+ assert l[2] == p3
+
+ def test_visit_rec_fnmatch(self, tmpdir):
+ p1 = tmpdir.ensure("a","123")
+ p2 = tmpdir.ensure(".b","345")
+ l = list(tmpdir.visit("???", rec="[!.]*"))
+ assert len(l) == 1
+ # check that breadth comes last
+ assert l[0] == p1
+
+ def test_fnmatch_file_abspath(self, tmpdir):
+ b = tmpdir.join("a", "b")
+ assert b.fnmatch(os.sep.join("ab"))
+ pattern = os.sep.join([str(tmpdir), "*", "b"])
+ assert b.fnmatch(pattern)
+
+ def test_sysfind(self):
+ name = sys.platform == "win32" and "cmd" or "test"
+ x = py.path.local.sysfind(name)
+ assert x.check(file=1)
+ assert py.path.local.sysfind('jaksdkasldqwe') is None
+ assert py.path.local.sysfind(name, paths=[]) is None
+ x2 = py.path.local.sysfind(name, paths=[x.dirpath()])
+ assert x2 == x
+
+ def test_fspath_protocol_other_class(self, fake_fspath_obj):
+ # py.path is always absolute
+ py_path = py.path.local(fake_fspath_obj)
+ str_path = fake_fspath_obj.__fspath__()
+ assert py_path.check(endswith=str_path)
+ assert py_path.join(fake_fspath_obj).strpath == os.path.join(
+ py_path.strpath, str_path)
+
+
+class TestExecutionOnWindows:
+ pytestmark = win32only
+
+ def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
+ monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
+ tmpdir.ensure("hello")
+ h = tmpdir.ensure("hello.bat")
+ x = py.path.local.sysfind("hello")
+ assert x == h
+
+
+class TestExecution:
+ pytestmark = skiponwin32
+
+ def test_sysfind_no_permisson_ignored(self, monkeypatch, tmpdir):
+ noperm = tmpdir.ensure('noperm', dir=True)
+ monkeypatch.setenv("PATH", noperm, prepend=":")
+ noperm.chmod(0)
+ assert py.path.local.sysfind('jaksdkasldqwe') is None
+
+ def test_sysfind_absolute(self):
+ x = py.path.local.sysfind('test')
+ assert x.check(file=1)
+ y = py.path.local.sysfind(str(x))
+ assert y.check(file=1)
+ assert y == x
+
+ def test_sysfind_multiple(self, tmpdir, monkeypatch):
+ monkeypatch.setenv('PATH',
+ "%s:%s" % (tmpdir.ensure('a'),
+ tmpdir.join('b')),
+ prepend=":")
+ tmpdir.ensure('b', 'a')
+ checker = lambda x: x.dirpath().basename == 'b'
+ x = py.path.local.sysfind('a', checker=checker)
+ assert x.basename == 'a'
+ assert x.dirpath().basename == 'b'
+ checker = lambda x: None
+ assert py.path.local.sysfind('a', checker=checker) is None
+
+ def test_sysexec(self):
+ x = py.path.local.sysfind('ls')
+ out = x.sysexec('-a')
+ for x in py.path.local().listdir():
+ assert out.find(x.basename) != -1
+
+ def test_sysexec_failing(self):
+ x = py.path.local.sysfind('false')
+ py.test.raises(py.process.cmdexec.Error, """
+ x.sysexec('aksjdkasjd')
+ """)
+
+ def test_make_numbered_dir(self, tmpdir):
+ tmpdir.ensure('base.not_an_int', dir=1)
+ for i in range(10):
+ numdir = local.make_numbered_dir(prefix='base.', rootdir=tmpdir,
+ keep=2, lock_timeout=0)
+ assert numdir.check()
+ assert numdir.basename == 'base.%d' %i
+ if i>=1:
+ assert numdir.new(ext=str(i-1)).check()
+ if i>=2:
+ assert numdir.new(ext=str(i-2)).check()
+ if i>=3:
+ assert not numdir.new(ext=str(i-3)).check()
+
+ def test_make_numbered_dir_NotImplemented_Error(self, tmpdir, monkeypatch):
+ def notimpl(x, y):
+ raise NotImplementedError(42)
+ monkeypatch.setattr(py.std.os, 'symlink', notimpl)
+ x = tmpdir.make_numbered_dir(rootdir=tmpdir, lock_timeout=0)
+ assert x.relto(tmpdir)
+ assert x.check()
+
+ def test_locked_make_numbered_dir(self, tmpdir):
+ for i in range(10):
+ numdir = local.make_numbered_dir(prefix='base2.', rootdir=tmpdir,
+ keep=2)
+ assert numdir.check()
+ assert numdir.basename == 'base2.%d' %i
+ for j in range(i):
+ assert numdir.new(ext=str(j)).check()
+
+ def test_error_preservation(self, path1):
+ py.test.raises (EnvironmentError, path1.join('qwoeqiwe').mtime)
+ py.test.raises (EnvironmentError, path1.join('qwoeqiwe').read)
+
+ #def test_parentdirmatch(self):
+ # local.parentdirmatch('std', startmodule=__name__)
+ #
+
+
+class TestImport:
+ def test_pyimport(self, path1):
+ obj = path1.join('execfile.py').pyimport()
+ assert obj.x == 42
+ assert obj.__name__ == 'execfile'
+
+ def test_pyimport_renamed_dir_creates_mismatch(self, tmpdir):
+ p = tmpdir.ensure("a", "test_x123.py")
+ p.pyimport()
+ tmpdir.join("a").move(tmpdir.join("b"))
+ pytest.raises(tmpdir.ImportMismatchError,
+ lambda: tmpdir.join("b", "test_x123.py").pyimport())
+
+ def test_pyimport_messy_name(self, tmpdir):
+ # http://bitbucket.org/hpk42/py-trunk/issue/129
+ path = tmpdir.ensure('foo__init__.py')
+ obj = path.pyimport()
+
+ def test_pyimport_dir(self, tmpdir):
+ p = tmpdir.join("hello_123")
+ p_init = p.ensure("__init__.py")
+ m = p.pyimport()
+ assert m.__name__ == "hello_123"
+ m = p_init.pyimport()
+ assert m.__name__ == "hello_123"
+
+ def test_pyimport_execfile_different_name(self, path1):
+ obj = path1.join('execfile.py').pyimport(modname="0x.y.z")
+ assert obj.x == 42
+ assert obj.__name__ == '0x.y.z'
+
+ def test_pyimport_a(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('a.py').pyimport()
+ assert mod.result == "got it"
+ assert mod.__name__ == 'otherdir.a'
+
+ def test_pyimport_b(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('b.py').pyimport()
+ assert mod.stuff == "got it"
+ assert mod.__name__ == 'otherdir.b'
+
+ def test_pyimport_c(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('c.py').pyimport()
+ assert mod.value == "got it"
+
+ def test_pyimport_d(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('d.py').pyimport()
+ assert mod.value2 == "got it"
+
+ def test_pyimport_and_import(self, tmpdir):
+ tmpdir.ensure('xxxpackage', '__init__.py')
+ mod1path = tmpdir.ensure('xxxpackage', 'module1.py')
+ mod1 = mod1path.pyimport()
+ assert mod1.__name__ == 'xxxpackage.module1'
+ from xxxpackage import module1
+ assert module1 is mod1
+
+ def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir):
+ name = 'pointsback123'
+ ModuleType = type(py.std.os)
+ p = tmpdir.ensure(name + '.py')
+ for ending in ('.pyc', '$py.class', '.pyo'):
+ mod = ModuleType(name)
+ pseudopath = tmpdir.ensure(name+ending)
+ mod.__file__ = str(pseudopath)
+ monkeypatch.setitem(sys.modules, name, mod)
+ newmod = p.pyimport()
+ assert mod == newmod
+ monkeypatch.undo()
+ mod = ModuleType(name)
+ pseudopath = tmpdir.ensure(name+"123.py")
+ mod.__file__ = str(pseudopath)
+ monkeypatch.setitem(sys.modules, name, mod)
+ excinfo = py.test.raises(pseudopath.ImportMismatchError,
+ "p.pyimport()")
+ modname, modfile, orig = excinfo.value.args
+ assert modname == name
+ assert modfile == pseudopath
+ assert orig == p
+ assert issubclass(pseudopath.ImportMismatchError, ImportError)
+
+ def test_issue131_pyimport_on__init__(self, tmpdir):
+ # __init__.py files may be namespace packages, and thus the
+ # __file__ of an imported module may not be ourselves
+ # see issue
+ p1 = tmpdir.ensure("proja", "__init__.py")
+ p2 = tmpdir.ensure("sub", "proja", "__init__.py")
+ m1 = p1.pyimport()
+ m2 = p2.pyimport()
+ assert m1 == m2
+
+ def test_ensuresyspath_append(self, tmpdir):
+ root1 = tmpdir.mkdir("root1")
+ file1 = root1.ensure("x123.py")
+ assert str(root1) not in sys.path
+ file1.pyimport(ensuresyspath="append")
+ assert str(root1) == sys.path[-1]
+ assert str(root1) not in sys.path[:-1]
+
+
+def test_pypkgdir(tmpdir):
+ pkg = tmpdir.ensure('pkg1', dir=1)
+ pkg.ensure("__init__.py")
+ pkg.ensure("subdir/__init__.py")
+ assert pkg.pypkgpath() == pkg
+ assert pkg.join('subdir', '__init__.py').pypkgpath() == pkg
+
+def test_pypkgdir_unimportable(tmpdir):
+ pkg = tmpdir.ensure('pkg1-1', dir=1) # unimportable
+ pkg.ensure("__init__.py")
+ subdir = pkg.ensure("subdir/__init__.py").dirpath()
+ assert subdir.pypkgpath() == subdir
+ assert subdir.ensure("xyz.py").pypkgpath() == subdir
+ assert not pkg.pypkgpath()
+
+def test_isimportable():
+ from py._path.local import isimportable
+ assert not isimportable("")
+ assert isimportable("x")
+ assert isimportable("x1")
+ assert isimportable("x_1")
+ assert isimportable("_")
+ assert isimportable("_1")
+ assert not isimportable("x-1")
+ assert not isimportable("x:1")
+
+def test_homedir_from_HOME(monkeypatch):
+ path = os.getcwd()
+ monkeypatch.setenv("HOME", path)
+ assert py.path.local._gethomedir() == py.path.local(path)
+
+def test_homedir_not_exists(monkeypatch):
+ monkeypatch.delenv("HOME", raising=False)
+ monkeypatch.delenv("HOMEDRIVE", raising=False)
+ homedir = py.path.local._gethomedir()
+ assert homedir is None
+
+def test_samefile(tmpdir):
+ assert tmpdir.samefile(tmpdir)
+ p = tmpdir.ensure("hello")
+ assert p.samefile(p)
+ with p.dirpath().as_cwd():
+ assert p.samefile(p.basename)
+ if sys.platform == "win32":
+ p1 = p.__class__(str(p).lower())
+ p2 = p.__class__(str(p).upper())
+ assert p1.samefile(p2)
+
+def test_listdir_single_arg(tmpdir):
+ tmpdir.ensure("hello")
+ assert tmpdir.listdir("hello")[0].basename == "hello"
+
+def test_mkdtemp_rootdir(tmpdir):
+ dtmp = local.mkdtemp(rootdir=tmpdir)
+ assert tmpdir.listdir() == [dtmp]
+
+class TestWINLocalPath:
+ pytestmark = win32only
+
+ def test_owner_group_not_implemented(self, path1):
+ py.test.raises(NotImplementedError, "path1.stat().owner")
+ py.test.raises(NotImplementedError, "path1.stat().group")
+
+ def test_chmod_simple_int(self, path1):
+ py.builtin.print_("path1 is", path1)
+ mode = path1.stat().mode
+ # Ensure that we actually change the mode to something different.
+ path1.chmod(mode == 0 and 1 or 0)
+ try:
+ print(path1.stat().mode)
+ print(mode)
+ assert path1.stat().mode != mode
+ finally:
+ path1.chmod(mode)
+ assert path1.stat().mode == mode
+
+ def test_path_comparison_lowercase_mixed(self, path1):
+ t1 = path1.join("a_path")
+ t2 = path1.join("A_path")
+ assert t1 == t1
+ assert t1 == t2
+
+ def test_relto_with_mixed_case(self, path1):
+ t1 = path1.join("a_path", "fiLe")
+ t2 = path1.join("A_path")
+ assert t1.relto(t2) == "fiLe"
+
+ def test_allow_unix_style_paths(self, path1):
+ t1 = path1.join('a_path')
+ assert t1 == str(path1) + '\\a_path'
+ t1 = path1.join('a_path/')
+ assert t1 == str(path1) + '\\a_path'
+ t1 = path1.join('dir/a_path')
+ assert t1 == str(path1) + '\\dir\\a_path'
+
+ def test_sysfind_in_currentdir(self, path1):
+ cmd = py.path.local.sysfind('cmd')
+ root = cmd.new(dirname='', basename='') # c:\ in most installations
+ with root.as_cwd():
+ x = py.path.local.sysfind(cmd.relto(root))
+ assert x.check(file=1)
+
+ def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
+ # path-matching patterns might contain a posix path separator '/'
+ # Test that we can match that pattern on windows.
+ import posixpath
+ b = tmpdir.join("a", "b")
+ assert b.fnmatch(posixpath.sep.join("ab"))
+ pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
+ assert b.fnmatch(pattern)
+
+class TestPOSIXLocalPath:
+ pytestmark = skiponwin32
+
+ def test_hardlink(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("Hello")
+ nlink = filepath.stat().nlink
+ linkpath.mklinkto(filepath)
+ assert filepath.stat().nlink == nlink + 1
+
+ def test_symlink_are_identical(self, tmpdir):
+ filepath = tmpdir.join('file')
+ filepath.write("Hello")
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(filepath)
+ assert linkpath.readlink() == str(filepath)
+
+ def test_symlink_isfile(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("")
+ linkpath.mksymlinkto(filepath)
+ assert linkpath.check(file=1)
+ assert not linkpath.check(link=0, file=1)
+ assert linkpath.islink()
+
+ def test_symlink_relative(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("Hello")
+ linkpath.mksymlinkto(filepath, absolute=False)
+ assert linkpath.readlink() == "file"
+ assert filepath.read() == linkpath.read()
+
+ def test_symlink_not_existing(self, tmpdir):
+ linkpath = tmpdir.join('testnotexisting')
+ assert not linkpath.check(link=1)
+ assert linkpath.check(link=0)
+
+ def test_relto_with_root(self, path1, tmpdir):
+ y = path1.join('x').relto(py.path.local('/'))
+ assert y[0] == str(path1)[1]
+
+ def test_visit_recursive_symlink(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(tmpdir)
+ visitor = tmpdir.visit(None, lambda x: x.check(link=0))
+ assert list(visitor) == [linkpath]
+
+ def test_symlink_isdir(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(tmpdir)
+ assert linkpath.check(dir=1)
+ assert not linkpath.check(link=0, dir=1)
+
+ def test_symlink_remove(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(linkpath) # point to itself
+ assert linkpath.check(link=1)
+ linkpath.remove()
+ assert not linkpath.check()
+
+ def test_realpath_file(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("")
+ linkpath.mksymlinkto(filepath)
+ realpath = linkpath.realpath()
+ assert realpath.basename == 'file'
+
+ def test_owner(self, path1, tmpdir):
+ from pwd import getpwuid
+ from grp import getgrgid
+ stat = path1.stat()
+ assert stat.path == path1
+
+ uid = stat.uid
+ gid = stat.gid
+ owner = getpwuid(uid)[0]
+ group = getgrgid(gid)[0]
+
+ assert uid == stat.uid
+ assert owner == stat.owner
+ assert gid == stat.gid
+ assert group == stat.group
+
+ def test_stat_helpers(self, tmpdir, monkeypatch):
+ path1 = tmpdir.ensure("file")
+ stat1 = path1.stat()
+ stat2 = tmpdir.stat()
+ assert stat1.isfile()
+ assert stat2.isdir()
+ assert not stat1.islink()
+ assert not stat2.islink()
+
+ def test_stat_non_raising(self, tmpdir):
+ path1 = tmpdir.join("file")
+ pytest.raises(py.error.ENOENT, lambda: path1.stat())
+ res = path1.stat(raising=False)
+ assert res is None
+
+ def test_atime(self, tmpdir):
+ import time
+ path = tmpdir.ensure('samplefile')
+ now = time.time()
+ atime1 = path.atime()
+ # we could wait here but timer resolution is very
+ # system dependent
+ path.read()
+ time.sleep(ATIME_RESOLUTION)
+ atime2 = path.atime()
+ time.sleep(ATIME_RESOLUTION)
+ duration = time.time() - now
+ assert (atime2-atime1) <= duration
+
+ def test_commondir(self, path1):
+ # XXX This is here in local until we find a way to implement this
+ # using the subversion command line api.
+ p1 = path1.join('something')
+ p2 = path1.join('otherthing')
+ assert p1.common(p2) == path1
+ assert p2.common(p1) == path1
+
+ def test_commondir_nocommon(self, path1):
+ # XXX This is here in local until we find a way to implement this
+ # using the subversion command line api.
+ p1 = path1.join('something')
+ p2 = py.path.local(path1.sep+'blabla')
+ assert p1.common(p2) == '/'
+
+ def test_join_to_root(self, path1):
+ root = path1.parts()[0]
+ assert len(str(root)) == 1
+ assert str(root.join('a')) == '//a' # posix allows two slashes
+
+ def test_join_root_to_root_with_no_abs(self, path1):
+ nroot = path1.join('/')
+ assert str(path1) == str(nroot)
+ assert path1 == nroot
+
+ def test_chmod_simple_int(self, path1):
+ mode = path1.stat().mode
+ path1.chmod(int(mode/2))
+ try:
+ assert path1.stat().mode != mode
+ finally:
+ path1.chmod(mode)
+ assert path1.stat().mode == mode
+
+ def test_chmod_rec_int(self, path1):
+ # XXX fragile test
+ recfilter = lambda x: x.check(dotfile=0, link=0)
+ oldmodes = {}
+ for x in path1.visit(rec=recfilter):
+ oldmodes[x] = x.stat().mode
+ path1.chmod(int("772", 8), rec=recfilter)
+ try:
+ for x in path1.visit(rec=recfilter):
+ assert x.stat().mode & int("777", 8) == int("772", 8)
+ finally:
+ for x,y in oldmodes.items():
+ x.chmod(y)
+
+ def test_copy_archiving(self, tmpdir):
+ unicode_fn = u"something-\342\200\223.txt"
+ f = tmpdir.ensure("a", unicode_fn)
+ a = f.dirpath()
+ oldmode = f.stat().mode
+ newmode = oldmode ^ 1
+ f.chmod(newmode)
+ b = tmpdir.join("b")
+ a.copy(b, mode=True)
+ assert b.join(f.basename).stat().mode == newmode
+
+ def test_copy_stat_file(self, tmpdir):
+ src = tmpdir.ensure('src')
+ dst = tmpdir.join('dst')
+ # a small delay before the copy
+ time.sleep(ATIME_RESOLUTION)
+ src.copy(dst, stat=True)
+ oldstat = src.stat()
+ newstat = dst.stat()
+ assert oldstat.mode == newstat.mode
+ assert (dst.atime() - src.atime()) < ATIME_RESOLUTION
+ assert (dst.mtime() - src.mtime()) < ATIME_RESOLUTION
+
+ def test_copy_stat_dir(self, tmpdir):
+ test_files = ['a', 'b', 'c']
+ src = tmpdir.join('src')
+ for f in test_files:
+ src.join(f).write(f, ensure=True)
+ dst = tmpdir.join('dst')
+ # a small delay before the copy
+ time.sleep(ATIME_RESOLUTION)
+ src.copy(dst, stat=True)
+ for f in test_files:
+ oldstat = src.join(f).stat()
+ newstat = dst.join(f).stat()
+ assert (newstat.atime - oldstat.atime) < ATIME_RESOLUTION
+ assert (newstat.mtime - oldstat.mtime) < ATIME_RESOLUTION
+ assert oldstat.mode == newstat.mode
+
+ @failsonjython
+ def test_chown_identity(self, path1):
+ owner = path1.stat().owner
+ group = path1.stat().group
+ path1.chown(owner, group)
+
+ @failsonjython
+ def test_chown_dangling_link(self, path1):
+ owner = path1.stat().owner
+ group = path1.stat().group
+ x = path1.join('hello')
+ x.mksymlinkto('qlwkejqwlek')
+ try:
+ path1.chown(owner, group, rec=1)
+ finally:
+ x.remove(rec=0)
+
+ @failsonjython
+ def test_chown_identity_rec_mayfail(self, path1):
+ owner = path1.stat().owner
+ group = path1.stat().group
+ path1.chown(owner, group)
+
+
+class TestUnicodePy2Py3:
+ def test_join_ensure(self, tmpdir, monkeypatch):
+ if sys.version_info >= (3,0) and "LANG" not in os.environ:
+ pytest.skip("cannot run test without locale")
+ x = py.path.local(tmpdir.strpath)
+ part = "hällo"
+ y = x.ensure(part)
+ assert x.join(part) == y
+
+ def test_listdir(self, tmpdir):
+ if sys.version_info >= (3,0) and "LANG" not in os.environ:
+ pytest.skip("cannot run test without locale")
+ x = py.path.local(tmpdir.strpath)
+ part = "hällo"
+ y = x.ensure(part)
+ assert x.listdir(part)[0] == y
+
+ @pytest.mark.xfail(reason="changing read/write might break existing usages")
+ def test_read_write(self, tmpdir):
+ x = tmpdir.join("hello")
+ part = py.builtin._totext("hällo", "utf8")
+ x.write(part)
+ assert x.read() == part
+ x.write(part.encode(sys.getdefaultencoding()))
+ assert x.read() == part.encode(sys.getdefaultencoding())
+
+class TestBinaryAndTextMethods:
+ def test_read_binwrite(self, tmpdir):
+ x = tmpdir.join("hello")
+ part = py.builtin._totext("hällo", "utf8")
+ part_utf8 = part.encode("utf8")
+ x.write_binary(part_utf8)
+ assert x.read_binary() == part_utf8
+ s = x.read_text(encoding="utf8")
+ assert s == part
+ assert py.builtin._istext(s)
+
+ def test_read_textwrite(self, tmpdir):
+ x = tmpdir.join("hello")
+ part = py.builtin._totext("hällo", "utf8")
+ part_utf8 = part.encode("utf8")
+ x.write_text(part, encoding="utf8")
+ assert x.read_binary() == part_utf8
+ assert x.read_text(encoding="utf8") == part
+
+ def test_default_encoding(self, tmpdir):
+ x = tmpdir.join("hello")
+ # Can't use UTF8 as the default encoding (ASCII) doesn't support it
+ part = py.builtin._totext("hello", "ascii")
+ x.write_text(part, "ascii")
+ s = x.read_text("ascii")
+ assert s == part
+ assert type(s) == type(part)
diff --git a/testing/path/test_svnauth.py b/testing/path/test_svnauth.py
index b3f3665..ead22c8 100644
--- a/testing/path/test_svnauth.py
+++ b/testing/path/test_svnauth.py
@@ -1,454 +1,454 @@
-import py
-import svntestbase
-from py.path import SvnAuth
-import time
-import sys
-
-svnbin = py.path.local.sysfind('svn')
-
-def make_repo_auth(repo, userdata):
- """ write config to repo
-
- user information in userdata is used for auth
- userdata has user names as keys, and a tuple (password, readwrite) as
- values, where 'readwrite' is either 'r' or 'rw'
- """
- confdir = py.path.local(repo).join('conf')
- confdir.join('svnserve.conf').write('''\
-[general]
-anon-access = none
-password-db = passwd
-authz-db = authz
-realm = TestRepo
-''')
- authzdata = '[/]\n'
- passwddata = '[users]\n'
- for user in userdata:
- authzdata += '%s = %s\n' % (user, userdata[user][1])
- passwddata += '%s = %s\n' % (user, userdata[user][0])
- confdir.join('authz').write(authzdata)
- confdir.join('passwd').write(passwddata)
-
-def serve_bg(repopath):
- pidfile = py.path.local(repopath).join('pid')
- port = 10000
- e = None
- while port < 10010:
- cmd = 'svnserve -d -T --listen-port=%d --pid-file=%s -r %s' % (
- port, pidfile, repopath)
- print(cmd)
- try:
- py.process.cmdexec(cmd)
- except py.process.cmdexec.Error:
- e = sys.exc_info()[1]
- else:
- # XXX we assume here that the pid file gets written somewhere, I
- # guess this should be relatively safe... (I hope, at least?)
- counter = pid = 0
- while counter < 10:
- counter += 1
- try:
- pid = pidfile.read()
- except py.error.ENOENT:
- pass
- if pid:
- break
- time.sleep(0.2)
- return port, int(pid)
- port += 1
- raise IOError('could not start svnserve: %s' % (e,))
-
-class TestSvnAuth(object):
- def test_basic(self):
- auth = SvnAuth('foo', 'bar')
- assert auth.username == 'foo'
- assert auth.password == 'bar'
- assert str(auth)
-
- def test_makecmdoptions_uname_pw_makestr(self):
- auth = SvnAuth('foo', 'bar')
- assert auth.makecmdoptions() == '--username="foo" --password="bar"'
-
- def test_makecmdoptions_quote_escape(self):
- auth = SvnAuth('fo"o', '"ba\'r"')
- assert auth.makecmdoptions() == '--username="fo\\"o" --password="\\"ba\'r\\""'
-
- def test_makecmdoptions_no_cache_auth(self):
- auth = SvnAuth('foo', 'bar', cache_auth=False)
- assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
- '--no-auth-cache')
-
- def test_makecmdoptions_no_interactive(self):
- auth = SvnAuth('foo', 'bar', interactive=False)
- assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
- '--non-interactive')
-
- def test_makecmdoptions_no_interactive_no_cache_auth(self):
- auth = SvnAuth('foo', 'bar', cache_auth=False,
- interactive=False)
- assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
- '--no-auth-cache --non-interactive')
-
-class svnwc_no_svn(py.path.svnwc):
- def __new__(cls, *args, **kwargs):
- self = super(svnwc_no_svn, cls).__new__(cls, *args, **kwargs)
- self.commands = []
- return self
-
- def _svn(self, *args):
- self.commands.append(args)
-
-class TestSvnWCAuth(object):
- def setup_method(self, meth):
- if not svnbin:
- py.test.skip("svn binary required")
- self.auth = SvnAuth('user', 'pass', cache_auth=False)
-
- def test_checkout(self):
- wc = svnwc_no_svn('foo', auth=self.auth)
- wc.checkout('url')
- assert wc.commands[0][-1] == ('--username="user" --password="pass" '
- '--no-auth-cache')
-
- def test_commit(self):
- wc = svnwc_no_svn('foo', auth=self.auth)
- wc.commit('msg')
- assert wc.commands[0][-1] == ('--username="user" --password="pass" '
- '--no-auth-cache')
-
- def test_checkout_no_cache_auth(self):
- wc = svnwc_no_svn('foo', auth=self.auth)
- wc.checkout('url')
- assert wc.commands[0][-1] == ('--username="user" --password="pass" '
- '--no-auth-cache')
-
- def test_checkout_auth_from_constructor(self):
- wc = svnwc_no_svn('foo', auth=self.auth)
- wc.checkout('url')
- assert wc.commands[0][-1] == ('--username="user" --password="pass" '
- '--no-auth-cache')
-
-class svnurl_no_svn(py.path.svnurl):
- cmdexec_output = 'test'
- popen_output = 'test'
- def __new__(cls, *args, **kwargs):
- self = super(svnurl_no_svn, cls).__new__(cls, *args, **kwargs)
- self.commands = []
- return self
-
- def _cmdexec(self, cmd):
- self.commands.append(cmd)
- return self.cmdexec_output
-
- def _popen(self, cmd):
- self.commands.append(cmd)
- return self.popen_output
-
-class TestSvnURLAuth(object):
- def setup_method(self, meth):
- self.auth = SvnAuth('foo', 'bar')
-
- def test_init(self):
- u = svnurl_no_svn('http://foo.bar/svn')
- assert u.auth is None
-
- u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
- assert u.auth is self.auth
-
- def test_new(self):
- u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
- new = u.new(basename='bar')
- assert new.auth is self.auth
- assert new.url == 'http://foo.bar/svn/bar'
-
- def test_join(self):
- u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
- new = u.join('foo')
- assert new.auth is self.auth
- assert new.url == 'http://foo.bar/svn/foo'
-
- def test_listdir(self):
- u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
- u.cmdexec_output = '''\
- 1717 johnny 1529 Nov 04 14:32 LICENSE.txt
- 1716 johnny 5352 Nov 04 14:28 README.txt
-'''
- paths = u.listdir()
- assert paths[0].auth is self.auth
- assert paths[1].auth is self.auth
- assert paths[0].basename == 'LICENSE.txt'
-
- def test_info(self):
- u = svnurl_no_svn('http://foo.bar/svn/LICENSE.txt', auth=self.auth)
- def dirpath(self):
- return self
- u.cmdexec_output = '''\
- 1717 johnny 1529 Nov 04 14:32 LICENSE.txt
- 1716 johnny 5352 Nov 04 14:28 README.txt
-'''
- org_dp = u.__class__.dirpath
- u.__class__.dirpath = dirpath
- try:
- info = u.info()
- finally:
- u.dirpath = org_dp
- assert info.size == 1529
-
- def test_open(self):
- u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
- foo = u.join('foo')
- foo.check = lambda *args, **kwargs: True
- ret = foo.open()
- assert ret == 'test'
- assert '--username="foo" --password="bar"' in foo.commands[0]
-
- def test_dirpath(self):
- u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
- parent = u.dirpath()
- assert parent.auth is self.auth
-
- def test_mkdir(self):
- u = svnurl_no_svn('http://foo.bar/svn/qweqwe', auth=self.auth)
- assert not u.commands
- u.mkdir(msg='created dir foo')
- assert u.commands
- assert '--username="foo" --password="bar"' in u.commands[0]
-
- def test_copy(self):
- u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
- u2 = svnurl_no_svn('http://foo.bar/svn2')
- u.copy(u2, 'copied dir')
- assert '--username="foo" --password="bar"' in u.commands[0]
-
- def test_rename(self):
- u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
- u.rename('http://foo.bar/svn/bar', 'moved foo to bar')
- assert '--username="foo" --password="bar"' in u.commands[0]
-
- def test_remove(self):
- u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
- u.remove(msg='removing foo')
- assert '--username="foo" --password="bar"' in u.commands[0]
-
- def test_export(self):
- u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
- target = py.path.local('/foo')
- u.export(target)
- assert '--username="foo" --password="bar"' in u.commands[0]
-
- def test_log(self):
- u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
- u.popen_output = py.io.TextIO(py.builtin._totext('''\
-<?xml version="1.0"?>
-<log>
-<logentry revision="51381">
-<author>guido</author>
-<date>2008-02-11T12:12:18.476481Z</date>
-<msg>Creating branch to work on auth support for py.path.svn*.
-</msg>
-</logentry>
-</log>
-''', 'ascii'))
- u.check = lambda *args, **kwargs: True
- ret = u.log(10, 20, verbose=True)
- assert '--username="foo" --password="bar"' in u.commands[0]
- assert len(ret) == 1
- assert int(ret[0].rev) == 51381
- assert ret[0].author == 'guido'
-
- def test_propget(self):
- u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
- u.propget('foo')
- assert '--username="foo" --password="bar"' in u.commands[0]
-
-def pytest_funcarg__setup(request):
- return Setup(request)
-
-class Setup:
- def __init__(self, request):
- if not svnbin:
- py.test.skip("svn binary required")
- if not request.config.option.runslowtests:
- py.test.skip('use --runslowtests to run these tests')
-
- tmpdir = request.getfuncargvalue("tmpdir")
- repodir = tmpdir.join("repo")
- py.process.cmdexec('svnadmin create %s' % repodir)
- if sys.platform == 'win32':
- repodir = '/' + str(repodir).replace('\\', '/')
- self.repo = py.path.svnurl("file://%s" % repodir)
- if py.std.sys.platform == 'win32':
- # remove trailing slash...
- repodir = repodir[1:]
- self.repopath = py.path.local(repodir)
- self.temppath = tmpdir.mkdir("temppath")
- self.auth = SvnAuth('johnny', 'foo', cache_auth=False,
- interactive=False)
- make_repo_auth(self.repopath, {'johnny': ('foo', 'rw')})
- self.port, self.pid = serve_bg(self.repopath.dirpath())
- # XXX caching is too global
- py.path.svnurl._lsnorevcache._dict.clear()
- request.addfinalizer(lambda: py.process.kill(self.pid))
-
-class TestSvnWCAuthFunctional:
- def test_checkout_constructor_arg(self, setup):
- wc = py.path.svnwc(setup.temppath, auth=setup.auth)
- wc.checkout(
- 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
- assert wc.join('.svn').check()
-
- def test_checkout_function_arg(self, setup):
- wc = py.path.svnwc(setup.temppath, auth=setup.auth)
- wc.checkout(
- 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
- assert wc.join('.svn').check()
-
- def test_checkout_failing_non_interactive(self, setup):
- auth = SvnAuth('johnny', 'bar', cache_auth=False,
- interactive=False)
- wc = py.path.svnwc(setup.temppath, auth)
- py.test.raises(Exception,
- ("wc.checkout('svn://localhost:%(port)s/%(repopath)s')" %
- setup.__dict__))
-
- def test_log(self, setup):
- wc = py.path.svnwc(setup.temppath, setup.auth)
- wc.checkout(
- 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
- foo = wc.ensure('foo.txt')
- wc.commit('added foo.txt')
- log = foo.log()
- assert len(log) == 1
- assert log[0].msg == 'added foo.txt'
-
- def test_switch(self, setup):
- wc = py.path.svnwc(setup.temppath, auth=setup.auth)
- svnurl = 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename)
- wc.checkout(svnurl)
- wc.ensure('foo', dir=True).ensure('foo.txt').write('foo')
- wc.commit('added foo dir with foo.txt file')
- wc.ensure('bar', dir=True)
- wc.commit('added bar dir')
- bar = wc.join('bar')
- bar.switch(svnurl + '/foo')
- assert bar.join('foo.txt')
-
- def test_update(self, setup):
- wc1 = py.path.svnwc(setup.temppath.ensure('wc1', dir=True),
- auth=setup.auth)
- wc2 = py.path.svnwc(setup.temppath.ensure('wc2', dir=True),
- auth=setup.auth)
- wc1.checkout(
- 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
- wc2.checkout(
- 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
- wc1.ensure('foo', dir=True)
- wc1.commit('added foo dir')
- wc2.update()
- assert wc2.join('foo').check()
-
- auth = SvnAuth('unknown', 'unknown', interactive=False)
- wc2.auth = auth
- py.test.raises(Exception, 'wc2.update()')
-
- def test_lock_unlock_status(self, setup):
- port = setup.port
- wc = py.path.svnwc(setup.temppath, auth=setup.auth)
- wc.checkout(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
- wc.ensure('foo', file=True)
- wc.commit('added foo file')
- foo = wc.join('foo')
- foo.lock()
- status = foo.status()
- assert status.locked
- foo.unlock()
- status = foo.status()
- assert not status.locked
-
- auth = SvnAuth('unknown', 'unknown', interactive=False)
- foo.auth = auth
- py.test.raises(Exception, 'foo.lock()')
- py.test.raises(Exception, 'foo.unlock()')
-
- def test_diff(self, setup):
- port = setup.port
- wc = py.path.svnwc(setup.temppath, auth=setup.auth)
- wc.checkout(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
- wc.ensure('foo', file=True)
- wc.commit('added foo file')
- wc.update()
- rev = int(wc.status().rev)
- foo = wc.join('foo')
- foo.write('bar')
- diff = foo.diff()
- assert '\n+bar\n' in diff
- foo.commit('added some content')
- diff = foo.diff()
- assert not diff
- diff = foo.diff(rev=rev)
- assert '\n+bar\n' in diff
-
- auth = SvnAuth('unknown', 'unknown', interactive=False)
- foo.auth = auth
- py.test.raises(Exception, 'foo.diff(rev=rev)')
-
-class TestSvnURLAuthFunctional:
- def test_listdir(self, setup):
- port = setup.port
- u = py.path.svnurl(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
- auth=setup.auth)
- u.ensure('foo')
- paths = u.listdir()
- assert len(paths) == 1
- assert paths[0].auth is setup.auth
-
- auth = SvnAuth('foo', 'bar', interactive=False)
- u = py.path.svnurl(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
- auth=auth)
- py.test.raises(Exception, 'u.listdir()')
-
- def test_copy(self, setup):
- port = setup.port
- u = py.path.svnurl(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
- auth=setup.auth)
- foo = u.mkdir('foo')
- assert foo.check()
- bar = u.join('bar')
- foo.copy(bar)
- assert bar.check()
- assert bar.auth is setup.auth
-
- auth = SvnAuth('foo', 'bar', interactive=False)
- u = py.path.svnurl(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
- auth=auth)
- foo = u.join('foo')
- bar = u.join('bar')
- py.test.raises(Exception, 'foo.copy(bar)')
-
- def test_write_read(self, setup):
- port = setup.port
- u = py.path.svnurl(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
- auth=setup.auth)
- foo = u.ensure('foo')
- fp = foo.open()
- try:
- data = fp.read()
- finally:
- fp.close()
- assert data == ''
-
- auth = SvnAuth('foo', 'bar', interactive=False)
- u = py.path.svnurl(
- 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
- auth=auth)
- foo = u.join('foo')
- py.test.raises(Exception, 'foo.open()')
-
- # XXX rinse, repeat... :|
+import py
+import svntestbase
+from py.path import SvnAuth
+import time
+import sys
+
+svnbin = py.path.local.sysfind('svn')
+
+def make_repo_auth(repo, userdata):
+ """ write config to repo
+
+ user information in userdata is used for auth
+ userdata has user names as keys, and a tuple (password, readwrite) as
+ values, where 'readwrite' is either 'r' or 'rw'
+ """
+ confdir = py.path.local(repo).join('conf')
+ confdir.join('svnserve.conf').write('''\
+[general]
+anon-access = none
+password-db = passwd
+authz-db = authz
+realm = TestRepo
+''')
+ authzdata = '[/]\n'
+ passwddata = '[users]\n'
+ for user in userdata:
+ authzdata += '%s = %s\n' % (user, userdata[user][1])
+ passwddata += '%s = %s\n' % (user, userdata[user][0])
+ confdir.join('authz').write(authzdata)
+ confdir.join('passwd').write(passwddata)
+
+def serve_bg(repopath):
+ pidfile = py.path.local(repopath).join('pid')
+ port = 10000
+ e = None
+ while port < 10010:
+ cmd = 'svnserve -d -T --listen-port=%d --pid-file=%s -r %s' % (
+ port, pidfile, repopath)
+ print(cmd)
+ try:
+ py.process.cmdexec(cmd)
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ else:
+ # XXX we assume here that the pid file gets written somewhere, I
+ # guess this should be relatively safe... (I hope, at least?)
+ counter = pid = 0
+ while counter < 10:
+ counter += 1
+ try:
+ pid = pidfile.read()
+ except py.error.ENOENT:
+ pass
+ if pid:
+ break
+ time.sleep(0.2)
+ return port, int(pid)
+ port += 1
+ raise IOError('could not start svnserve: %s' % (e,))
+
+class TestSvnAuth(object):
+ def test_basic(self):
+ auth = SvnAuth('foo', 'bar')
+ assert auth.username == 'foo'
+ assert auth.password == 'bar'
+ assert str(auth)
+
+ def test_makecmdoptions_uname_pw_makestr(self):
+ auth = SvnAuth('foo', 'bar')
+ assert auth.makecmdoptions() == '--username="foo" --password="bar"'
+
+ def test_makecmdoptions_quote_escape(self):
+ auth = SvnAuth('fo"o', '"ba\'r"')
+ assert auth.makecmdoptions() == '--username="fo\\"o" --password="\\"ba\'r\\""'
+
+ def test_makecmdoptions_no_cache_auth(self):
+ auth = SvnAuth('foo', 'bar', cache_auth=False)
+ assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
+ '--no-auth-cache')
+
+ def test_makecmdoptions_no_interactive(self):
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
+ '--non-interactive')
+
+ def test_makecmdoptions_no_interactive_no_cache_auth(self):
+ auth = SvnAuth('foo', 'bar', cache_auth=False,
+ interactive=False)
+ assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
+ '--no-auth-cache --non-interactive')
+
+class svnwc_no_svn(py.path.svnwc):
+ def __new__(cls, *args, **kwargs):
+ self = super(svnwc_no_svn, cls).__new__(cls, *args, **kwargs)
+ self.commands = []
+ return self
+
+ def _svn(self, *args):
+ self.commands.append(args)
+
+class TestSvnWCAuth(object):
+ def setup_method(self, meth):
+ if not svnbin:
+ py.test.skip("svn binary required")
+ self.auth = SvnAuth('user', 'pass', cache_auth=False)
+
+ def test_checkout(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.checkout('url')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+ def test_commit(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.commit('msg')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+ def test_checkout_no_cache_auth(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.checkout('url')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+ def test_checkout_auth_from_constructor(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.checkout('url')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+class svnurl_no_svn(py.path.svnurl):
+ cmdexec_output = 'test'
+ popen_output = 'test'
+ def __new__(cls, *args, **kwargs):
+ self = super(svnurl_no_svn, cls).__new__(cls, *args, **kwargs)
+ self.commands = []
+ return self
+
+ def _cmdexec(self, cmd):
+ self.commands.append(cmd)
+ return self.cmdexec_output
+
+ def _popen(self, cmd):
+ self.commands.append(cmd)
+ return self.popen_output
+
+class TestSvnURLAuth(object):
+ def setup_method(self, meth):
+ self.auth = SvnAuth('foo', 'bar')
+
+ def test_init(self):
+ u = svnurl_no_svn('http://foo.bar/svn')
+ assert u.auth is None
+
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ assert u.auth is self.auth
+
+ def test_new(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ new = u.new(basename='bar')
+ assert new.auth is self.auth
+ assert new.url == 'http://foo.bar/svn/bar'
+
+ def test_join(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ new = u.join('foo')
+ assert new.auth is self.auth
+ assert new.url == 'http://foo.bar/svn/foo'
+
+ def test_listdir(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ u.cmdexec_output = '''\
+ 1717 johnny 1529 Nov 04 14:32 LICENSE.txt
+ 1716 johnny 5352 Nov 04 14:28 README.txt
+'''
+ paths = u.listdir()
+ assert paths[0].auth is self.auth
+ assert paths[1].auth is self.auth
+ assert paths[0].basename == 'LICENSE.txt'
+
+ def test_info(self):
+ u = svnurl_no_svn('http://foo.bar/svn/LICENSE.txt', auth=self.auth)
+ def dirpath(self):
+ return self
+ u.cmdexec_output = '''\
+ 1717 johnny 1529 Nov 04 14:32 LICENSE.txt
+ 1716 johnny 5352 Nov 04 14:28 README.txt
+'''
+ org_dp = u.__class__.dirpath
+ u.__class__.dirpath = dirpath
+ try:
+ info = u.info()
+ finally:
+ u.dirpath = org_dp
+ assert info.size == 1529
+
+ def test_open(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ foo = u.join('foo')
+ foo.check = lambda *args, **kwargs: True
+ ret = foo.open()
+ assert ret == 'test'
+ assert '--username="foo" --password="bar"' in foo.commands[0]
+
+ def test_dirpath(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ parent = u.dirpath()
+ assert parent.auth is self.auth
+
+ def test_mkdir(self):
+ u = svnurl_no_svn('http://foo.bar/svn/qweqwe', auth=self.auth)
+ assert not u.commands
+ u.mkdir(msg='created dir foo')
+ assert u.commands
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_copy(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ u2 = svnurl_no_svn('http://foo.bar/svn2')
+ u.copy(u2, 'copied dir')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_rename(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ u.rename('http://foo.bar/svn/bar', 'moved foo to bar')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_remove(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ u.remove(msg='removing foo')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_export(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ target = py.path.local('/foo')
+ u.export(target)
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_log(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ u.popen_output = py.io.TextIO(py.builtin._totext('''\
+<?xml version="1.0"?>
+<log>
+<logentry revision="51381">
+<author>guido</author>
+<date>2008-02-11T12:12:18.476481Z</date>
+<msg>Creating branch to work on auth support for py.path.svn*.
+</msg>
+</logentry>
+</log>
+''', 'ascii'))
+ u.check = lambda *args, **kwargs: True
+ ret = u.log(10, 20, verbose=True)
+ assert '--username="foo" --password="bar"' in u.commands[0]
+ assert len(ret) == 1
+ assert int(ret[0].rev) == 51381
+ assert ret[0].author == 'guido'
+
+ def test_propget(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ u.propget('foo')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+def pytest_funcarg__setup(request):
+ return Setup(request)
+
+class Setup:
+ def __init__(self, request):
+ if not svnbin:
+ py.test.skip("svn binary required")
+ if not request.config.option.runslowtests:
+ py.test.skip('use --runslowtests to run these tests')
+
+ tmpdir = request.getfuncargvalue("tmpdir")
+ repodir = tmpdir.join("repo")
+ py.process.cmdexec('svnadmin create %s' % repodir)
+ if sys.platform == 'win32':
+ repodir = '/' + str(repodir).replace('\\', '/')
+ self.repo = py.path.svnurl("file://%s" % repodir)
+ if py.std.sys.platform == 'win32':
+ # remove trailing slash...
+ repodir = repodir[1:]
+ self.repopath = py.path.local(repodir)
+ self.temppath = tmpdir.mkdir("temppath")
+ self.auth = SvnAuth('johnny', 'foo', cache_auth=False,
+ interactive=False)
+ make_repo_auth(self.repopath, {'johnny': ('foo', 'rw')})
+ self.port, self.pid = serve_bg(self.repopath.dirpath())
+ # XXX caching is too global
+ py.path.svnurl._lsnorevcache._dict.clear()
+ request.addfinalizer(lambda: py.process.kill(self.pid))
+
+class TestSvnWCAuthFunctional:
+ def test_checkout_constructor_arg(self, setup):
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ assert wc.join('.svn').check()
+
+ def test_checkout_function_arg(self, setup):
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ assert wc.join('.svn').check()
+
+ def test_checkout_failing_non_interactive(self, setup):
+ auth = SvnAuth('johnny', 'bar', cache_auth=False,
+ interactive=False)
+ wc = py.path.svnwc(setup.temppath, auth)
+ py.test.raises(Exception,
+ ("wc.checkout('svn://localhost:%(port)s/%(repopath)s')" %
+ setup.__dict__))
+
+ def test_log(self, setup):
+ wc = py.path.svnwc(setup.temppath, setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ foo = wc.ensure('foo.txt')
+ wc.commit('added foo.txt')
+ log = foo.log()
+ assert len(log) == 1
+ assert log[0].msg == 'added foo.txt'
+
+ def test_switch(self, setup):
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ svnurl = 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename)
+ wc.checkout(svnurl)
+ wc.ensure('foo', dir=True).ensure('foo.txt').write('foo')
+ wc.commit('added foo dir with foo.txt file')
+ wc.ensure('bar', dir=True)
+ wc.commit('added bar dir')
+ bar = wc.join('bar')
+ bar.switch(svnurl + '/foo')
+ assert bar.join('foo.txt')
+
+ def test_update(self, setup):
+ wc1 = py.path.svnwc(setup.temppath.ensure('wc1', dir=True),
+ auth=setup.auth)
+ wc2 = py.path.svnwc(setup.temppath.ensure('wc2', dir=True),
+ auth=setup.auth)
+ wc1.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ wc2.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ wc1.ensure('foo', dir=True)
+ wc1.commit('added foo dir')
+ wc2.update()
+ assert wc2.join('foo').check()
+
+ auth = SvnAuth('unknown', 'unknown', interactive=False)
+ wc2.auth = auth
+ py.test.raises(Exception, 'wc2.update()')
+
+ def test_lock_unlock_status(self, setup):
+ port = setup.port
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
+ wc.ensure('foo', file=True)
+ wc.commit('added foo file')
+ foo = wc.join('foo')
+ foo.lock()
+ status = foo.status()
+ assert status.locked
+ foo.unlock()
+ status = foo.status()
+ assert not status.locked
+
+ auth = SvnAuth('unknown', 'unknown', interactive=False)
+ foo.auth = auth
+ py.test.raises(Exception, 'foo.lock()')
+ py.test.raises(Exception, 'foo.unlock()')
+
+ def test_diff(self, setup):
+ port = setup.port
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
+ wc.ensure('foo', file=True)
+ wc.commit('added foo file')
+ wc.update()
+ rev = int(wc.status().rev)
+ foo = wc.join('foo')
+ foo.write('bar')
+ diff = foo.diff()
+ assert '\n+bar\n' in diff
+ foo.commit('added some content')
+ diff = foo.diff()
+ assert not diff
+ diff = foo.diff(rev=rev)
+ assert '\n+bar\n' in diff
+
+ auth = SvnAuth('unknown', 'unknown', interactive=False)
+ foo.auth = auth
+ py.test.raises(Exception, 'foo.diff(rev=rev)')
+
+class TestSvnURLAuthFunctional:
+ def test_listdir(self, setup):
+ port = setup.port
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=setup.auth)
+ u.ensure('foo')
+ paths = u.listdir()
+ assert len(paths) == 1
+ assert paths[0].auth is setup.auth
+
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=auth)
+ py.test.raises(Exception, 'u.listdir()')
+
+ def test_copy(self, setup):
+ port = setup.port
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=setup.auth)
+ foo = u.mkdir('foo')
+ assert foo.check()
+ bar = u.join('bar')
+ foo.copy(bar)
+ assert bar.check()
+ assert bar.auth is setup.auth
+
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=auth)
+ foo = u.join('foo')
+ bar = u.join('bar')
+ py.test.raises(Exception, 'foo.copy(bar)')
+
+ def test_write_read(self, setup):
+ port = setup.port
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=setup.auth)
+ foo = u.ensure('foo')
+ fp = foo.open()
+ try:
+ data = fp.read()
+ finally:
+ fp.close()
+ assert data == ''
+
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=auth)
+ foo = u.join('foo')
+ py.test.raises(Exception, 'foo.open()')
+
+ # XXX rinse, repeat... :|
diff --git a/testing/path/test_svnurl.py b/testing/path/test_svnurl.py
index 15fbea5..a4771fa 100644
--- a/testing/path/test_svnurl.py
+++ b/testing/path/test_svnurl.py
@@ -1,95 +1,95 @@
-import py
-from py._path.svnurl import InfoSvnCommand
-import datetime
-import time
-from svntestbase import CommonSvnTests
-
-def pytest_funcarg__path1(request):
- repo, repourl, wc = request.getfuncargvalue("repowc1")
- return py.path.svnurl(repourl)
-
-class TestSvnURLCommandPath(CommonSvnTests):
- @py.test.mark.xfail
- def test_load(self, path1):
- super(TestSvnURLCommandPath, self).test_load(path1)
-
- # the following two work on jython but not in local/svnwc
- def test_listdir(self, path1):
- super(TestSvnURLCommandPath, self).test_listdir(path1)
- def test_visit_ignore(self, path1):
- super(TestSvnURLCommandPath, self).test_visit_ignore(path1)
-
- def test_svnurl_needs_arg(self, path1):
- py.test.raises(TypeError, "py.path.svnurl()")
-
- def test_svnurl_does_not_accept_None_either(self, path1):
- py.test.raises(Exception, "py.path.svnurl(None)")
-
- def test_svnurl_characters_simple(self, path1):
- py.path.svnurl("svn+ssh://hello/world")
-
- def test_svnurl_characters_at_user(self, path1):
- py.path.svnurl("http://user@host.com/some/dir")
-
- def test_svnurl_characters_at_path(self, path1):
- py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo@bar")')
-
- def test_svnurl_characters_colon_port(self, path1):
- py.path.svnurl("http://host.com:8080/some/dir")
-
- def test_svnurl_characters_tilde_end(self, path1):
- py.path.svnurl("http://host.com/some/file~")
-
- @py.test.mark.xfail("sys.platform == 'win32'")
- def test_svnurl_characters_colon_path(self, path1):
- # colons are allowed on win32, because they're part of the drive
- # part of an absolute path... however, they shouldn't be allowed in
- # other parts, I think
- py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo:bar")')
-
- def test_export(self, path1, tmpdir):
- tmpdir = tmpdir.join("empty")
- p = path1.export(tmpdir)
- assert p == tmpdir # XXX should return None
- n1 = [x.basename for x in tmpdir.listdir()]
- n2 = [x.basename for x in path1.listdir()]
- n1.sort()
- n2.sort()
- assert n1 == n2
- assert not p.join('.svn').check()
- rev = path1.mkdir("newdir")
- tmpdir.remove()
- assert not tmpdir.check()
- path1.new(rev=1).export(tmpdir)
- for p in tmpdir.listdir():
- assert p.basename in n2
-
-class TestSvnInfoCommand:
-
- def test_svn_1_2(self):
- line = " 2256 hpk 165 Nov 24 17:55 __init__.py"
- info = InfoSvnCommand(line)
- now = datetime.datetime.now()
- assert info.last_author == 'hpk'
- assert info.created_rev == 2256
- assert info.kind == 'file'
- # we don't check for the year (2006), because that depends
- # on the clock correctly being setup
- assert time.gmtime(info.mtime)[1:6] == (11, 24, 17, 55, 0)
- assert info.size == 165
- assert info.time == info.mtime * 1000000
-
- def test_svn_1_3(self):
- line =" 4784 hpk 2 Jun 01 2004 __init__.py"
- info = InfoSvnCommand(line)
- assert info.last_author == 'hpk'
- assert info.kind == 'file'
-
- def test_svn_1_3_b(self):
- line =" 74 autoadmi Oct 06 23:59 plonesolutions.com/"
- info = InfoSvnCommand(line)
- assert info.last_author == 'autoadmi'
- assert info.kind == 'dir'
-
-def test_badchars():
- py.test.raises(ValueError, "py.path.svnurl('http://host/tmp/@@@:')")
+import py
+from py._path.svnurl import InfoSvnCommand
+import datetime
+import time
+from svntestbase import CommonSvnTests
+
+def pytest_funcarg__path1(request):
+ repo, repourl, wc = request.getfuncargvalue("repowc1")
+ return py.path.svnurl(repourl)
+
+class TestSvnURLCommandPath(CommonSvnTests):
+ @py.test.mark.xfail
+ def test_load(self, path1):
+ super(TestSvnURLCommandPath, self).test_load(path1)
+
+ # the following two work on jython but not in local/svnwc
+ def test_listdir(self, path1):
+ super(TestSvnURLCommandPath, self).test_listdir(path1)
+ def test_visit_ignore(self, path1):
+ super(TestSvnURLCommandPath, self).test_visit_ignore(path1)
+
+ def test_svnurl_needs_arg(self, path1):
+ py.test.raises(TypeError, "py.path.svnurl()")
+
+ def test_svnurl_does_not_accept_None_either(self, path1):
+ py.test.raises(Exception, "py.path.svnurl(None)")
+
+ def test_svnurl_characters_simple(self, path1):
+ py.path.svnurl("svn+ssh://hello/world")
+
+ def test_svnurl_characters_at_user(self, path1):
+ py.path.svnurl("http://user@host.com/some/dir")
+
+ def test_svnurl_characters_at_path(self, path1):
+ py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo@bar")')
+
+ def test_svnurl_characters_colon_port(self, path1):
+ py.path.svnurl("http://host.com:8080/some/dir")
+
+ def test_svnurl_characters_tilde_end(self, path1):
+ py.path.svnurl("http://host.com/some/file~")
+
+ @py.test.mark.xfail("sys.platform == 'win32'")
+ def test_svnurl_characters_colon_path(self, path1):
+ # colons are allowed on win32, because they're part of the drive
+ # part of an absolute path... however, they shouldn't be allowed in
+ # other parts, I think
+ py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo:bar")')
+
+ def test_export(self, path1, tmpdir):
+ tmpdir = tmpdir.join("empty")
+ p = path1.export(tmpdir)
+ assert p == tmpdir # XXX should return None
+ n1 = [x.basename for x in tmpdir.listdir()]
+ n2 = [x.basename for x in path1.listdir()]
+ n1.sort()
+ n2.sort()
+ assert n1 == n2
+ assert not p.join('.svn').check()
+ rev = path1.mkdir("newdir")
+ tmpdir.remove()
+ assert not tmpdir.check()
+ path1.new(rev=1).export(tmpdir)
+ for p in tmpdir.listdir():
+ assert p.basename in n2
+
+class TestSvnInfoCommand:
+
+ def test_svn_1_2(self):
+ line = " 2256 hpk 165 Nov 24 17:55 __init__.py"
+ info = InfoSvnCommand(line)
+ now = datetime.datetime.now()
+ assert info.last_author == 'hpk'
+ assert info.created_rev == 2256
+ assert info.kind == 'file'
+ # we don't check for the year (2006), because that depends
+ # on the clock correctly being setup
+ assert time.gmtime(info.mtime)[1:6] == (11, 24, 17, 55, 0)
+ assert info.size == 165
+ assert info.time == info.mtime * 1000000
+
+ def test_svn_1_3(self):
+ line =" 4784 hpk 2 Jun 01 2004 __init__.py"
+ info = InfoSvnCommand(line)
+ assert info.last_author == 'hpk'
+ assert info.kind == 'file'
+
+ def test_svn_1_3_b(self):
+ line =" 74 autoadmi Oct 06 23:59 plonesolutions.com/"
+ info = InfoSvnCommand(line)
+ assert info.last_author == 'autoadmi'
+ assert info.kind == 'dir'
+
+def test_badchars():
+ py.test.raises(ValueError, "py.path.svnurl('http://host/tmp/@@@:')")
diff --git a/testing/path/test_svnwc.py b/testing/path/test_svnwc.py
index 9e6b524..ad5f055 100644
--- a/testing/path/test_svnwc.py
+++ b/testing/path/test_svnwc.py
@@ -1,549 +1,549 @@
-import py
-import os, sys
-import pytest
-from py._path.svnwc import InfoSvnWCCommand, XMLWCStatus, parse_wcinfotime
-from py._path import svnwc as svncommon
-from svntestbase import CommonSvnTests
-
-def test_make_repo(path1, tmpdir):
- repo = tmpdir.join("repo")
- py.process.cmdexec('svnadmin create %s' % repo)
- if sys.platform == 'win32':
- repo = '/' + str(repo).replace('\\', '/')
- repo = py.path.svnurl("file://%s" % repo)
- wc = py.path.svnwc(tmpdir.join("wc"))
- wc.checkout(repo)
- assert wc.rev == 0
- assert len(wc.listdir()) == 0
- p = wc.join("a_file")
- p.write("test file")
- p.add()
- rev = wc.commit("some test")
- assert p.info().rev == 1
- assert rev == 1
- rev = wc.commit()
- assert rev is None
-
-def pytest_funcarg__path1(request):
- repo, repourl, wc = request.getfuncargvalue("repowc1")
- return wc
-
-class TestWCSvnCommandPath(CommonSvnTests):
- def test_status_attributes_simple(self, path1):
- def assert_nochange(p):
- s = p.status()
- assert not s.modified
- assert not s.prop_modified
- assert not s.added
- assert not s.deleted
- assert not s.replaced
-
- dpath = path1.join('sampledir')
- assert_nochange(path1.join('sampledir'))
- assert_nochange(path1.join('samplefile'))
-
- def test_status_added(self, path1):
- nf = path1.join('newfile')
- nf.write('hello')
- nf.add()
- try:
- s = nf.status()
- assert s.added
- assert not s.modified
- assert not s.prop_modified
- assert not s.replaced
- finally:
- nf.revert()
-
- def test_status_change(self, path1):
- nf = path1.join('samplefile')
- try:
- nf.write(nf.read() + 'change')
- s = nf.status()
- assert not s.added
- assert s.modified
- assert not s.prop_modified
- assert not s.replaced
- finally:
- nf.revert()
-
- def test_status_added_ondirectory(self, path1):
- sampledir = path1.join('sampledir')
- try:
- t2 = sampledir.mkdir('t2')
- t1 = t2.join('t1')
- t1.write('test')
- t1.add()
- s = sampledir.status(rec=1)
- # Comparing just the file names, because paths are unpredictable
- # on Windows. (long vs. 8.3 paths)
- assert t1.basename in [item.basename for item in s.added]
- assert t2.basename in [item.basename for item in s.added]
- finally:
- t2.revert(rec=1)
- t2.localpath.remove(rec=1)
-
- def test_status_unknown(self, path1):
- t1 = path1.join('un1')
- try:
- t1.write('test')
- s = path1.status()
- # Comparing just the file names, because paths are unpredictable
- # on Windows. (long vs. 8.3 paths)
- assert t1.basename in [item.basename for item in s.unknown]
- finally:
- t1.localpath.remove()
-
- def test_status_unchanged(self, path1):
- r = path1
- s = path1.status(rec=1)
- # Comparing just the file names, because paths are unpredictable
- # on Windows. (long vs. 8.3 paths)
- assert r.join('samplefile').basename in [item.basename
- for item in s.unchanged]
- assert r.join('sampledir').basename in [item.basename
- for item in s.unchanged]
- assert r.join('sampledir/otherfile').basename in [item.basename
- for item in s.unchanged]
-
- @pytest.mark.xfail(reason="svn-1.7 has buggy 'status --xml' output")
- def test_status_update(self, path1):
- r = path1
- try:
- r.update(rev=1)
- s = r.status(updates=1, rec=1)
- # Comparing just the file names, because paths are unpredictable
- # on Windows. (long vs. 8.3 paths)
- py.std.pprint.pprint(s.allpath())
- assert r.join('anotherfile').basename in [item.basename for
- item in s.update_available]
- #assert len(s.update_available) == 1
- finally:
- r.update()
-
- def test_status_replaced(self, path1):
- p = path1.join("samplefile")
- p.remove()
- p.ensure(dir=0)
- try:
- s = path1.status()
- assert p.basename in [item.basename for item in s.replaced]
- finally:
- path1.revert(rec=1)
-
- def test_status_ignored(self, path1):
- try:
- d = path1.join('sampledir')
- p = py.path.local(d).join('ignoredfile')
- p.ensure(file=True)
- s = d.status()
- assert [x.basename for x in s.unknown] == ['ignoredfile']
- assert [x.basename for x in s.ignored] == []
- d.propset('svn:ignore', 'ignoredfile')
- s = d.status()
- assert [x.basename for x in s.unknown] == []
- assert [x.basename for x in s.ignored] == ['ignoredfile']
- finally:
- path1.revert(rec=1)
-
- def test_status_conflict(self, path1, tmpdir):
- wc = path1
- wccopy = py.path.svnwc(tmpdir.join("conflict_copy"))
- wccopy.checkout(wc.url)
- p = wc.ensure('conflictsamplefile', file=1)
- p.write('foo')
- wc.commit('added conflictsamplefile')
- wccopy.update()
- assert wccopy.join('conflictsamplefile').check()
- p.write('bar')
- wc.commit('wrote some data')
- wccopy.join('conflictsamplefile').write('baz')
- wccopy.update(interactive=False)
- s = wccopy.status()
- assert [x.basename for x in s.conflict] == ['conflictsamplefile']
-
- def test_status_external(self, path1, repowc2):
- otherrepo, otherrepourl, otherwc = repowc2
- d = path1.ensure('sampledir', dir=1)
- try:
- d.update()
- d.propset('svn:externals', 'otherwc %s' % (otherwc.url,))
- d.update()
- s = d.status()
- assert [x.basename for x in s.external] == ['otherwc']
- assert 'otherwc' not in [x.basename for x in s.unchanged]
- s = d.status(rec=1)
- assert [x.basename for x in s.external] == ['otherwc']
- assert 'otherwc' in [x.basename for x in s.unchanged]
- finally:
- path1.revert(rec=1)
-
- def test_status_deleted(self, path1):
- d = path1.ensure('sampledir', dir=1)
- d.remove()
- d.ensure(dir=1)
- path1.commit()
- d.ensure('deletefile', dir=0)
- d.commit()
- s = d.status()
- assert 'deletefile' in [x.basename for x in s.unchanged]
- assert not s.deleted
- p = d.join('deletefile')
- p.remove()
- s = d.status()
- assert 'deletefile' not in s.unchanged
- assert [x.basename for x in s.deleted] == ['deletefile']
-
- def test_status_noauthor(self, path1):
- # testing for XML without author - this used to raise an exception
- xml = '''\
- <entry path="/tmp/pytest-23/wc">
- <wc-status item="normal" props="none" revision="0">
- <commit revision="0">
- <date>2008-08-19T16:50:53.400198Z</date>
- </commit>
- </wc-status>
- </entry>
- '''
- XMLWCStatus.fromstring(xml, path1)
-
- def test_status_wrong_xml(self, path1):
- # testing for XML without author - this used to raise an exception
- xml = '<entry path="/home/jean/zope/venv/projectdb/parts/development-products/DataGridField">\n<wc-status item="incomplete" props="none" revision="784">\n</wc-status>\n</entry>'
- st = XMLWCStatus.fromstring(xml, path1)
- assert len(st.incomplete) == 1
-
- def test_diff(self, path1):
- p = path1 / 'anotherfile'
- out = p.diff(rev=2)
- assert out.find('hello') != -1
-
- def test_blame(self, path1):
- p = path1.join('samplepickle')
- lines = p.blame()
- assert sum([l[0] for l in lines]) == len(lines)
- for l1, l2 in zip(p.readlines(), [l[2] for l in lines]):
- assert l1 == l2
- assert [l[1] for l in lines] == ['hpk'] * len(lines)
- p = path1.join('samplefile')
- lines = p.blame()
- assert sum([l[0] for l in lines]) == len(lines)
- for l1, l2 in zip(p.readlines(), [l[2] for l in lines]):
- assert l1 == l2
- assert [l[1] for l in lines] == ['hpk'] * len(lines)
-
- def test_join_abs(self, path1):
- s = str(path1.localpath)
- n = path1.join(s, abs=1)
- assert path1 == n
-
- def test_join_abs2(self, path1):
- assert path1.join('samplefile', abs=1) == path1.join('samplefile')
-
- def test_str_gives_localpath(self, path1):
- assert str(path1) == str(path1.localpath)
-
- def test_versioned(self, path1):
- assert path1.check(versioned=1)
- # TODO: Why does my copy of svn think .svn is versioned?
- #assert path1.join('.svn').check(versioned=0)
- assert path1.join('samplefile').check(versioned=1)
- assert not path1.join('notexisting').check(versioned=1)
- notexisting = path1.join('hello').localpath
- try:
- notexisting.write("")
- assert path1.join('hello').check(versioned=0)
- finally:
- notexisting.remove()
-
- def test_listdir_versioned(self, path1):
- assert path1.check(versioned=1)
- p = path1.localpath.ensure("not_a_versioned_file")
- l = [x.localpath
- for x in path1.listdir(lambda x: x.check(versioned=True))]
- assert p not in l
-
- def test_nonversioned_remove(self, path1):
- assert path1.check(versioned=1)
- somefile = path1.join('nonversioned/somefile')
- nonwc = py.path.local(somefile)
- nonwc.ensure()
- assert somefile.check()
- assert not somefile.check(versioned=True)
- somefile.remove() # this used to fail because it tried to 'svn rm'
-
- def test_properties(self, path1):
- try:
- path1.propset('gaga', 'this')
- assert path1.propget('gaga') == 'this'
- # Comparing just the file names, because paths are unpredictable
- # on Windows. (long vs. 8.3 paths)
- assert path1.basename in [item.basename for item in
- path1.status().prop_modified]
- assert 'gaga' in path1.proplist()
- assert path1.proplist()['gaga'] == 'this'
-
- finally:
- path1.propdel('gaga')
-
- def test_proplist_recursive(self, path1):
- s = path1.join('samplefile')
- s.propset('gugu', 'that')
- try:
- p = path1.proplist(rec=1)
- # Comparing just the file names, because paths are unpredictable
- # on Windows. (long vs. 8.3 paths)
- assert (path1 / 'samplefile').basename in [item.basename
- for item in p]
- finally:
- s.propdel('gugu')
-
- def test_long_properties(self, path1):
- value = """
- vadm:posix : root root 0100755
- Properties on 'chroot/dns/var/bind/db.net.xots':
- """
- try:
- path1.propset('gaga', value)
- backvalue = path1.propget('gaga')
- assert backvalue == value
- #assert len(backvalue.split('\n')) == 1
- finally:
- path1.propdel('gaga')
-
-
- def test_ensure(self, path1):
- newpath = path1.ensure('a', 'b', 'c')
- try:
- assert newpath.check(exists=1, versioned=1)
- newpath.write("hello")
- newpath.ensure()
- assert newpath.read() == "hello"
- finally:
- path1.join('a').remove(force=1)
-
- def test_not_versioned(self, path1):
- p = path1.localpath.mkdir('whatever')
- f = path1.localpath.ensure('testcreatedfile')
- try:
- assert path1.join('whatever').check(versioned=0)
- assert path1.join('testcreatedfile').check(versioned=0)
- assert not path1.join('testcreatedfile').check(versioned=1)
- finally:
- p.remove(rec=1)
- f.remove()
-
- def test_lock_unlock(self, path1):
- root = path1
- somefile = root.join('somefile')
- somefile.ensure(file=True)
- # not yet added to repo
- py.test.raises(Exception, 'somefile.lock()')
- somefile.write('foo')
- somefile.commit('test')
- assert somefile.check(versioned=True)
- somefile.lock()
- try:
- locked = root.status().locked
- assert len(locked) == 1
- assert locked[0].basename == somefile.basename
- assert locked[0].dirpath().basename == somefile.dirpath().basename
- #assert somefile.locked()
- py.test.raises(Exception, 'somefile.lock()')
- finally:
- somefile.unlock()
- #assert not somefile.locked()
- locked = root.status().locked
- assert locked == []
- py.test.raises(Exception, 'somefile,unlock()')
- somefile.remove()
-
- def test_commit_nonrecursive(self, path1):
- somedir = path1.join('sampledir')
- somedir.mkdir("subsubdir")
- somedir.propset('foo', 'bar')
- status = somedir.status()
- assert len(status.prop_modified) == 1
- assert len(status.added) == 1
-
- somedir.commit('non-recursive commit', rec=0)
- status = somedir.status()
- assert len(status.prop_modified) == 0
- assert len(status.added) == 1
-
- somedir.commit('recursive commit')
- status = somedir.status()
- assert len(status.prop_modified) == 0
- assert len(status.added) == 0
-
- def test_commit_return_value(self, path1):
- testfile = path1.join('test.txt').ensure(file=True)
- testfile.write('test')
- rev = path1.commit('testing')
- assert type(rev) == int
-
- anotherfile = path1.join('another.txt').ensure(file=True)
- anotherfile.write('test')
- rev2 = path1.commit('testing more')
- assert type(rev2) == int
- assert rev2 == rev + 1
-
- #def test_log(self, path1):
- # l = path1.log()
- # assert len(l) == 3 # might need to be upped if more tests are added
-
-class XTestWCSvnCommandPathSpecial:
-
- rooturl = 'http://codespeak.net/svn/py.path/trunk/dist/py.path/test/data'
- #def test_update_none_rev(self, path1):
- # path = tmpdir.join('checkouttest')
- # wcpath = newpath(xsvnwc=str(path), url=path1url)
- # try:
- # wcpath.checkout(rev=2100)
- # wcpath.update()
- # assert wcpath.info().rev > 2100
- # finally:
- # wcpath.localpath.remove(rec=1)
-
-def test_parse_wcinfotime():
- assert (parse_wcinfotime('2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)') ==
- 1149021926)
- assert (parse_wcinfotime('2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)') ==
- 1067287394)
-
-class TestInfoSvnWCCommand:
-
- def test_svn_1_2(self, path1):
- output = """
- Path: test_svnwc.py
- Name: test_svnwc.py
- URL: http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py
- Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
- Revision: 28137
- Node Kind: file
- Schedule: normal
- Last Changed Author: jan
- Last Changed Rev: 27939
- Last Changed Date: 2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)
- Text Last Updated: 2006-06-01 00:42:53 +0200 (Thu, 01 Jun 2006)
- Properties Last Updated: 2006-05-23 11:54:59 +0200 (Tue, 23 May 2006)
- Checksum: 357e44880e5d80157cc5fbc3ce9822e3
- """
- path = py.path.local(__file__).dirpath().chdir()
- try:
- info = InfoSvnWCCommand(output)
- finally:
- path.chdir()
- assert info.last_author == 'jan'
- assert info.kind == 'file'
- assert info.mtime == 1149021926.0
- assert info.url == 'http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py'
- assert info.time == 1149021926000000.0
- assert info.rev == 28137
-
-
- def test_svn_1_3(self, path1):
- output = """
- Path: test_svnwc.py
- Name: test_svnwc.py
- URL: http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py
- Repository Root: http://codespeak.net/svn
- Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
- Revision: 28124
- Node Kind: file
- Schedule: normal
- Last Changed Author: jan
- Last Changed Rev: 27939
- Last Changed Date: 2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)
- Text Last Updated: 2006-06-02 23:46:11 +0200 (Fri, 02 Jun 2006)
- Properties Last Updated: 2006-06-02 23:45:28 +0200 (Fri, 02 Jun 2006)
- Checksum: 357e44880e5d80157cc5fbc3ce9822e3
- """
- path = py.path.local(__file__).dirpath().chdir()
- try:
- info = InfoSvnWCCommand(output)
- finally:
- path.chdir()
- assert info.last_author == 'jan'
- assert info.kind == 'file'
- assert info.mtime == 1149021926.0
- assert info.url == 'http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py'
- assert info.rev == 28124
- assert info.time == 1149021926000000.0
-
-
-def test_characters_at():
- py.test.raises(ValueError, "py.path.svnwc('/tmp/@@@:')")
-
-def test_characters_tilde():
- py.path.svnwc('/tmp/test~')
-
-
-class TestRepo:
- def test_trailing_slash_is_stripped(self, path1):
- # XXX we need to test more normalizing properties
- url = path1.join("/")
- assert path1 == url
-
- #def test_different_revs_compare_unequal(self, path1):
- # newpath = path1.new(rev=1199)
- # assert newpath != path1
-
- def test_exists_svn_root(self, path1):
- assert path1.check()
-
- #def test_not_exists_rev(self, path1):
- # url = path1.__class__(path1url, rev=500)
- # assert url.check(exists=0)
-
- #def test_nonexisting_listdir_rev(self, path1):
- # url = path1.__class__(path1url, rev=500)
- # raises(py.error.ENOENT, url.listdir)
-
- #def test_newrev(self, path1):
- # url = path1.new(rev=None)
- # assert url.rev == None
- # assert url.strpath == path1.strpath
- # url = path1.new(rev=10)
- # assert url.rev == 10
-
- #def test_info_rev(self, path1):
- # url = path1.__class__(path1url, rev=1155)
- # url = url.join("samplefile")
- # res = url.info()
- # assert res.size > len("samplefile") and res.created_rev == 1155
-
- # the following tests are easier if we have a path class
- def test_repocache_simple(self, path1):
- repocache = svncommon.RepoCache()
- repocache.put(path1.strpath, 42)
- url, rev = repocache.get(path1.join('test').strpath)
- assert rev == 42
- assert url == path1.strpath
-
- def test_repocache_notimeout(self, path1):
- repocache = svncommon.RepoCache()
- repocache.timeout = 0
- repocache.put(path1.strpath, path1.rev)
- url, rev = repocache.get(path1.strpath)
- assert rev == -1
- assert url == path1.strpath
-
- def test_repocache_outdated(self, path1):
- repocache = svncommon.RepoCache()
- repocache.put(path1.strpath, 42, timestamp=0)
- url, rev = repocache.get(path1.join('test').strpath)
- assert rev == -1
- assert url == path1.strpath
-
- def _test_getreporev(self):
- """ this test runs so slow it's usually disabled """
- old = svncommon.repositories.repos
- try:
- _repocache.clear()
- root = path1.new(rev=-1)
- url, rev = cache.repocache.get(root.strpath)
- assert rev>=0
- assert url == svnrepourl
- finally:
- repositories.repos = old
+import py
+import os, sys
+import pytest
+from py._path.svnwc import InfoSvnWCCommand, XMLWCStatus, parse_wcinfotime
+from py._path import svnwc as svncommon
+from svntestbase import CommonSvnTests
+
+def test_make_repo(path1, tmpdir):
+ repo = tmpdir.join("repo")
+ py.process.cmdexec('svnadmin create %s' % repo)
+ if sys.platform == 'win32':
+ repo = '/' + str(repo).replace('\\', '/')
+ repo = py.path.svnurl("file://%s" % repo)
+ wc = py.path.svnwc(tmpdir.join("wc"))
+ wc.checkout(repo)
+ assert wc.rev == 0
+ assert len(wc.listdir()) == 0
+ p = wc.join("a_file")
+ p.write("test file")
+ p.add()
+ rev = wc.commit("some test")
+ assert p.info().rev == 1
+ assert rev == 1
+ rev = wc.commit()
+ assert rev is None
+
+def pytest_funcarg__path1(request):
+ repo, repourl, wc = request.getfuncargvalue("repowc1")
+ return wc
+
+class TestWCSvnCommandPath(CommonSvnTests):
+ def test_status_attributes_simple(self, path1):
+ def assert_nochange(p):
+ s = p.status()
+ assert not s.modified
+ assert not s.prop_modified
+ assert not s.added
+ assert not s.deleted
+ assert not s.replaced
+
+ dpath = path1.join('sampledir')
+ assert_nochange(path1.join('sampledir'))
+ assert_nochange(path1.join('samplefile'))
+
+ def test_status_added(self, path1):
+ nf = path1.join('newfile')
+ nf.write('hello')
+ nf.add()
+ try:
+ s = nf.status()
+ assert s.added
+ assert not s.modified
+ assert not s.prop_modified
+ assert not s.replaced
+ finally:
+ nf.revert()
+
+ def test_status_change(self, path1):
+ nf = path1.join('samplefile')
+ try:
+ nf.write(nf.read() + 'change')
+ s = nf.status()
+ assert not s.added
+ assert s.modified
+ assert not s.prop_modified
+ assert not s.replaced
+ finally:
+ nf.revert()
+
+ def test_status_added_ondirectory(self, path1):
+ sampledir = path1.join('sampledir')
+ try:
+ t2 = sampledir.mkdir('t2')
+ t1 = t2.join('t1')
+ t1.write('test')
+ t1.add()
+ s = sampledir.status(rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert t1.basename in [item.basename for item in s.added]
+ assert t2.basename in [item.basename for item in s.added]
+ finally:
+ t2.revert(rec=1)
+ t2.localpath.remove(rec=1)
+
+ def test_status_unknown(self, path1):
+ t1 = path1.join('un1')
+ try:
+ t1.write('test')
+ s = path1.status()
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert t1.basename in [item.basename for item in s.unknown]
+ finally:
+ t1.localpath.remove()
+
+ def test_status_unchanged(self, path1):
+ r = path1
+ s = path1.status(rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert r.join('samplefile').basename in [item.basename
+ for item in s.unchanged]
+ assert r.join('sampledir').basename in [item.basename
+ for item in s.unchanged]
+ assert r.join('sampledir/otherfile').basename in [item.basename
+ for item in s.unchanged]
+
+ @pytest.mark.xfail(reason="svn-1.7 has buggy 'status --xml' output")
+ def test_status_update(self, path1):
+ r = path1
+ try:
+ r.update(rev=1)
+ s = r.status(updates=1, rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ py.std.pprint.pprint(s.allpath())
+ assert r.join('anotherfile').basename in [item.basename for
+ item in s.update_available]
+ #assert len(s.update_available) == 1
+ finally:
+ r.update()
+
+ def test_status_replaced(self, path1):
+ p = path1.join("samplefile")
+ p.remove()
+ p.ensure(dir=0)
+ try:
+ s = path1.status()
+ assert p.basename in [item.basename for item in s.replaced]
+ finally:
+ path1.revert(rec=1)
+
+ def test_status_ignored(self, path1):
+ try:
+ d = path1.join('sampledir')
+ p = py.path.local(d).join('ignoredfile')
+ p.ensure(file=True)
+ s = d.status()
+ assert [x.basename for x in s.unknown] == ['ignoredfile']
+ assert [x.basename for x in s.ignored] == []
+ d.propset('svn:ignore', 'ignoredfile')
+ s = d.status()
+ assert [x.basename for x in s.unknown] == []
+ assert [x.basename for x in s.ignored] == ['ignoredfile']
+ finally:
+ path1.revert(rec=1)
+
+ def test_status_conflict(self, path1, tmpdir):
+ wc = path1
+ wccopy = py.path.svnwc(tmpdir.join("conflict_copy"))
+ wccopy.checkout(wc.url)
+ p = wc.ensure('conflictsamplefile', file=1)
+ p.write('foo')
+ wc.commit('added conflictsamplefile')
+ wccopy.update()
+ assert wccopy.join('conflictsamplefile').check()
+ p.write('bar')
+ wc.commit('wrote some data')
+ wccopy.join('conflictsamplefile').write('baz')
+ wccopy.update(interactive=False)
+ s = wccopy.status()
+ assert [x.basename for x in s.conflict] == ['conflictsamplefile']
+
+ def test_status_external(self, path1, repowc2):
+ otherrepo, otherrepourl, otherwc = repowc2
+ d = path1.ensure('sampledir', dir=1)
+ try:
+ d.update()
+ d.propset('svn:externals', 'otherwc %s' % (otherwc.url,))
+ d.update()
+ s = d.status()
+ assert [x.basename for x in s.external] == ['otherwc']
+ assert 'otherwc' not in [x.basename for x in s.unchanged]
+ s = d.status(rec=1)
+ assert [x.basename for x in s.external] == ['otherwc']
+ assert 'otherwc' in [x.basename for x in s.unchanged]
+ finally:
+ path1.revert(rec=1)
+
+ def test_status_deleted(self, path1):
+ d = path1.ensure('sampledir', dir=1)
+ d.remove()
+ d.ensure(dir=1)
+ path1.commit()
+ d.ensure('deletefile', dir=0)
+ d.commit()
+ s = d.status()
+ assert 'deletefile' in [x.basename for x in s.unchanged]
+ assert not s.deleted
+ p = d.join('deletefile')
+ p.remove()
+ s = d.status()
+ assert 'deletefile' not in s.unchanged
+ assert [x.basename for x in s.deleted] == ['deletefile']
+
+ def test_status_noauthor(self, path1):
+ # testing for XML without author - this used to raise an exception
+ xml = '''\
+ <entry path="/tmp/pytest-23/wc">
+ <wc-status item="normal" props="none" revision="0">
+ <commit revision="0">
+ <date>2008-08-19T16:50:53.400198Z</date>
+ </commit>
+ </wc-status>
+ </entry>
+ '''
+ XMLWCStatus.fromstring(xml, path1)
+
+ def test_status_wrong_xml(self, path1):
+ # testing for XML without author - this used to raise an exception
+ xml = '<entry path="/home/jean/zope/venv/projectdb/parts/development-products/DataGridField">\n<wc-status item="incomplete" props="none" revision="784">\n</wc-status>\n</entry>'
+ st = XMLWCStatus.fromstring(xml, path1)
+ assert len(st.incomplete) == 1
+
+ def test_diff(self, path1):
+ p = path1 / 'anotherfile'
+ out = p.diff(rev=2)
+ assert out.find('hello') != -1
+
+ def test_blame(self, path1):
+ p = path1.join('samplepickle')
+ lines = p.blame()
+ assert sum([l[0] for l in lines]) == len(lines)
+ for l1, l2 in zip(p.readlines(), [l[2] for l in lines]):
+ assert l1 == l2
+ assert [l[1] for l in lines] == ['hpk'] * len(lines)
+ p = path1.join('samplefile')
+ lines = p.blame()
+ assert sum([l[0] for l in lines]) == len(lines)
+ for l1, l2 in zip(p.readlines(), [l[2] for l in lines]):
+ assert l1 == l2
+ assert [l[1] for l in lines] == ['hpk'] * len(lines)
+
+ def test_join_abs(self, path1):
+ s = str(path1.localpath)
+ n = path1.join(s, abs=1)
+ assert path1 == n
+
+ def test_join_abs2(self, path1):
+ assert path1.join('samplefile', abs=1) == path1.join('samplefile')
+
+ def test_str_gives_localpath(self, path1):
+ assert str(path1) == str(path1.localpath)
+
+ def test_versioned(self, path1):
+ assert path1.check(versioned=1)
+ # TODO: Why does my copy of svn think .svn is versioned?
+ #assert path1.join('.svn').check(versioned=0)
+ assert path1.join('samplefile').check(versioned=1)
+ assert not path1.join('notexisting').check(versioned=1)
+ notexisting = path1.join('hello').localpath
+ try:
+ notexisting.write("")
+ assert path1.join('hello').check(versioned=0)
+ finally:
+ notexisting.remove()
+
+ def test_listdir_versioned(self, path1):
+ assert path1.check(versioned=1)
+ p = path1.localpath.ensure("not_a_versioned_file")
+ l = [x.localpath
+ for x in path1.listdir(lambda x: x.check(versioned=True))]
+ assert p not in l
+
+ def test_nonversioned_remove(self, path1):
+ assert path1.check(versioned=1)
+ somefile = path1.join('nonversioned/somefile')
+ nonwc = py.path.local(somefile)
+ nonwc.ensure()
+ assert somefile.check()
+ assert not somefile.check(versioned=True)
+ somefile.remove() # this used to fail because it tried to 'svn rm'
+
+ def test_properties(self, path1):
+ try:
+ path1.propset('gaga', 'this')
+ assert path1.propget('gaga') == 'this'
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert path1.basename in [item.basename for item in
+ path1.status().prop_modified]
+ assert 'gaga' in path1.proplist()
+ assert path1.proplist()['gaga'] == 'this'
+
+ finally:
+ path1.propdel('gaga')
+
+ def test_proplist_recursive(self, path1):
+ s = path1.join('samplefile')
+ s.propset('gugu', 'that')
+ try:
+ p = path1.proplist(rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert (path1 / 'samplefile').basename in [item.basename
+ for item in p]
+ finally:
+ s.propdel('gugu')
+
+ def test_long_properties(self, path1):
+ value = """
+ vadm:posix : root root 0100755
+ Properties on 'chroot/dns/var/bind/db.net.xots':
+ """
+ try:
+ path1.propset('gaga', value)
+ backvalue = path1.propget('gaga')
+ assert backvalue == value
+ #assert len(backvalue.split('\n')) == 1
+ finally:
+ path1.propdel('gaga')
+
+
+ def test_ensure(self, path1):
+ newpath = path1.ensure('a', 'b', 'c')
+ try:
+ assert newpath.check(exists=1, versioned=1)
+ newpath.write("hello")
+ newpath.ensure()
+ assert newpath.read() == "hello"
+ finally:
+ path1.join('a').remove(force=1)
+
+ def test_not_versioned(self, path1):
+ p = path1.localpath.mkdir('whatever')
+ f = path1.localpath.ensure('testcreatedfile')
+ try:
+ assert path1.join('whatever').check(versioned=0)
+ assert path1.join('testcreatedfile').check(versioned=0)
+ assert not path1.join('testcreatedfile').check(versioned=1)
+ finally:
+ p.remove(rec=1)
+ f.remove()
+
+ def test_lock_unlock(self, path1):
+ root = path1
+ somefile = root.join('somefile')
+ somefile.ensure(file=True)
+ # not yet added to repo
+ py.test.raises(Exception, 'somefile.lock()')
+ somefile.write('foo')
+ somefile.commit('test')
+ assert somefile.check(versioned=True)
+ somefile.lock()
+ try:
+ locked = root.status().locked
+ assert len(locked) == 1
+ assert locked[0].basename == somefile.basename
+ assert locked[0].dirpath().basename == somefile.dirpath().basename
+ #assert somefile.locked()
+ py.test.raises(Exception, 'somefile.lock()')
+ finally:
+ somefile.unlock()
+ #assert not somefile.locked()
+ locked = root.status().locked
+ assert locked == []
+ py.test.raises(Exception, 'somefile,unlock()')
+ somefile.remove()
+
+ def test_commit_nonrecursive(self, path1):
+ somedir = path1.join('sampledir')
+ somedir.mkdir("subsubdir")
+ somedir.propset('foo', 'bar')
+ status = somedir.status()
+ assert len(status.prop_modified) == 1
+ assert len(status.added) == 1
+
+ somedir.commit('non-recursive commit', rec=0)
+ status = somedir.status()
+ assert len(status.prop_modified) == 0
+ assert len(status.added) == 1
+
+ somedir.commit('recursive commit')
+ status = somedir.status()
+ assert len(status.prop_modified) == 0
+ assert len(status.added) == 0
+
+ def test_commit_return_value(self, path1):
+ testfile = path1.join('test.txt').ensure(file=True)
+ testfile.write('test')
+ rev = path1.commit('testing')
+ assert type(rev) == int
+
+ anotherfile = path1.join('another.txt').ensure(file=True)
+ anotherfile.write('test')
+ rev2 = path1.commit('testing more')
+ assert type(rev2) == int
+ assert rev2 == rev + 1
+
+ #def test_log(self, path1):
+ # l = path1.log()
+ # assert len(l) == 3 # might need to be upped if more tests are added
+
+class XTestWCSvnCommandPathSpecial:
+
+ rooturl = 'http://codespeak.net/svn/py.path/trunk/dist/py.path/test/data'
+ #def test_update_none_rev(self, path1):
+ # path = tmpdir.join('checkouttest')
+ # wcpath = newpath(xsvnwc=str(path), url=path1url)
+ # try:
+ # wcpath.checkout(rev=2100)
+ # wcpath.update()
+ # assert wcpath.info().rev > 2100
+ # finally:
+ # wcpath.localpath.remove(rec=1)
+
+def test_parse_wcinfotime():
+ assert (parse_wcinfotime('2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)') ==
+ 1149021926)
+ assert (parse_wcinfotime('2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)') ==
+ 1067287394)
+
+class TestInfoSvnWCCommand:
+
+ def test_svn_1_2(self, path1):
+ output = """
+ Path: test_svnwc.py
+ Name: test_svnwc.py
+ URL: http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py
+ Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+ Revision: 28137
+ Node Kind: file
+ Schedule: normal
+ Last Changed Author: jan
+ Last Changed Rev: 27939
+ Last Changed Date: 2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)
+ Text Last Updated: 2006-06-01 00:42:53 +0200 (Thu, 01 Jun 2006)
+ Properties Last Updated: 2006-05-23 11:54:59 +0200 (Tue, 23 May 2006)
+ Checksum: 357e44880e5d80157cc5fbc3ce9822e3
+ """
+ path = py.path.local(__file__).dirpath().chdir()
+ try:
+ info = InfoSvnWCCommand(output)
+ finally:
+ path.chdir()
+ assert info.last_author == 'jan'
+ assert info.kind == 'file'
+ assert info.mtime == 1149021926.0
+ assert info.url == 'http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py'
+ assert info.time == 1149021926000000.0
+ assert info.rev == 28137
+
+
+ def test_svn_1_3(self, path1):
+ output = """
+ Path: test_svnwc.py
+ Name: test_svnwc.py
+ URL: http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py
+ Repository Root: http://codespeak.net/svn
+ Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+ Revision: 28124
+ Node Kind: file
+ Schedule: normal
+ Last Changed Author: jan
+ Last Changed Rev: 27939
+ Last Changed Date: 2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)
+ Text Last Updated: 2006-06-02 23:46:11 +0200 (Fri, 02 Jun 2006)
+ Properties Last Updated: 2006-06-02 23:45:28 +0200 (Fri, 02 Jun 2006)
+ Checksum: 357e44880e5d80157cc5fbc3ce9822e3
+ """
+ path = py.path.local(__file__).dirpath().chdir()
+ try:
+ info = InfoSvnWCCommand(output)
+ finally:
+ path.chdir()
+ assert info.last_author == 'jan'
+ assert info.kind == 'file'
+ assert info.mtime == 1149021926.0
+ assert info.url == 'http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py'
+ assert info.rev == 28124
+ assert info.time == 1149021926000000.0
+
+
+def test_characters_at():
+ py.test.raises(ValueError, "py.path.svnwc('/tmp/@@@:')")
+
+def test_characters_tilde():
+ py.path.svnwc('/tmp/test~')
+
+
+class TestRepo:
+ def test_trailing_slash_is_stripped(self, path1):
+ # XXX we need to test more normalizing properties
+ url = path1.join("/")
+ assert path1 == url
+
+ #def test_different_revs_compare_unequal(self, path1):
+ # newpath = path1.new(rev=1199)
+ # assert newpath != path1
+
+ def test_exists_svn_root(self, path1):
+ assert path1.check()
+
+ #def test_not_exists_rev(self, path1):
+ # url = path1.__class__(path1url, rev=500)
+ # assert url.check(exists=0)
+
+ #def test_nonexisting_listdir_rev(self, path1):
+ # url = path1.__class__(path1url, rev=500)
+ # raises(py.error.ENOENT, url.listdir)
+
+ #def test_newrev(self, path1):
+ # url = path1.new(rev=None)
+ # assert url.rev == None
+ # assert url.strpath == path1.strpath
+ # url = path1.new(rev=10)
+ # assert url.rev == 10
+
+ #def test_info_rev(self, path1):
+ # url = path1.__class__(path1url, rev=1155)
+ # url = url.join("samplefile")
+ # res = url.info()
+ # assert res.size > len("samplefile") and res.created_rev == 1155
+
+ # the following tests are easier if we have a path class
+ def test_repocache_simple(self, path1):
+ repocache = svncommon.RepoCache()
+ repocache.put(path1.strpath, 42)
+ url, rev = repocache.get(path1.join('test').strpath)
+ assert rev == 42
+ assert url == path1.strpath
+
+ def test_repocache_notimeout(self, path1):
+ repocache = svncommon.RepoCache()
+ repocache.timeout = 0
+ repocache.put(path1.strpath, path1.rev)
+ url, rev = repocache.get(path1.strpath)
+ assert rev == -1
+ assert url == path1.strpath
+
+ def test_repocache_outdated(self, path1):
+ repocache = svncommon.RepoCache()
+ repocache.put(path1.strpath, 42, timestamp=0)
+ url, rev = repocache.get(path1.join('test').strpath)
+ assert rev == -1
+ assert url == path1.strpath
+
+ def _test_getreporev(self):
+ """ this test runs so slow it's usually disabled """
+ old = svncommon.repositories.repos
+ try:
+ _repocache.clear()
+ root = path1.new(rev=-1)
+ url, rev = cache.repocache.get(root.strpath)
+ assert rev>=0
+ assert url == svnrepourl
+ finally:
+ repositories.repos = old
diff --git a/testing/process/__init__.py b/testing/process/__init__.py
index 792d600..16e3853 100644
--- a/testing/process/__init__.py
+++ b/testing/process/__init__.py
@@ -1 +1 @@
-#
+#
diff --git a/testing/process/__init__.pyc b/testing/process/__init__.pyc
deleted file mode 100644
index db9c2e8..0000000
--- a/testing/process/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/__pycache__/__init__.cpython-35.pyc b/testing/process/__pycache__/__init__.cpython-35.pyc
deleted file mode 100644
index 20d1840..0000000
--- a/testing/process/__pycache__/__init__.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/__pycache__/test_cmdexec.cpython-27-PYTEST.pyc b/testing/process/__pycache__/test_cmdexec.cpython-27-PYTEST.pyc
deleted file mode 100644
index 6234397..0000000
--- a/testing/process/__pycache__/test_cmdexec.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/__pycache__/test_cmdexec.cpython-35-PYTEST.pyc b/testing/process/__pycache__/test_cmdexec.cpython-35-PYTEST.pyc
deleted file mode 100644
index 111f9b6..0000000
--- a/testing/process/__pycache__/test_cmdexec.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/__pycache__/test_forkedfunc.cpython-27-PYTEST.pyc b/testing/process/__pycache__/test_forkedfunc.cpython-27-PYTEST.pyc
deleted file mode 100644
index 56c6a9a..0000000
--- a/testing/process/__pycache__/test_forkedfunc.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/__pycache__/test_forkedfunc.cpython-35-PYTEST.pyc b/testing/process/__pycache__/test_forkedfunc.cpython-35-PYTEST.pyc
deleted file mode 100644
index 3eac53e..0000000
--- a/testing/process/__pycache__/test_forkedfunc.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/__pycache__/test_killproc.cpython-27-PYTEST.pyc b/testing/process/__pycache__/test_killproc.cpython-27-PYTEST.pyc
deleted file mode 100644
index 365bc7e..0000000
--- a/testing/process/__pycache__/test_killproc.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/__pycache__/test_killproc.cpython-35-PYTEST.pyc b/testing/process/__pycache__/test_killproc.cpython-35-PYTEST.pyc
deleted file mode 100644
index 8c3dfca..0000000
--- a/testing/process/__pycache__/test_killproc.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/process/test_cmdexec.py b/testing/process/test_cmdexec.py
index b539e0a..1dd6c77 100644
--- a/testing/process/test_cmdexec.py
+++ b/testing/process/test_cmdexec.py
@@ -1,39 +1,39 @@
-import py
-from py.process import cmdexec
-
-def exvalue():
- return py.std.sys.exc_info()[1]
-
-class Test_exec_cmd:
- def test_simple(self):
- out = cmdexec('echo hallo')
- assert out.strip() == 'hallo'
- assert py.builtin._istext(out)
-
- def test_simple_newline(self):
- import sys
- out = cmdexec(r"""%s -c "print ('hello')" """ % sys.executable)
- assert out == 'hello\n'
- assert py.builtin._istext(out)
-
- def test_simple_error(self):
- py.test.raises (cmdexec.Error, cmdexec, 'exit 1')
-
- def test_simple_error_exact_status(self):
- try:
- cmdexec('exit 1')
- except cmdexec.Error:
- e = exvalue()
- assert e.status == 1
- assert py.builtin._istext(e.out)
- assert py.builtin._istext(e.err)
-
- def test_err(self):
- try:
- cmdexec('echoqweqwe123 hallo')
- raise AssertionError("command succeeded but shouldn't")
- except cmdexec.Error:
- e = exvalue()
- assert hasattr(e, 'err')
- assert hasattr(e, 'out')
- assert e.err or e.out
+import py
+from py.process import cmdexec
+
+def exvalue():
+ return py.std.sys.exc_info()[1]
+
+class Test_exec_cmd:
+ def test_simple(self):
+ out = cmdexec('echo hallo')
+ assert out.strip() == 'hallo'
+ assert py.builtin._istext(out)
+
+ def test_simple_newline(self):
+ import sys
+ out = cmdexec(r"""%s -c "print ('hello')" """ % sys.executable)
+ assert out == 'hello\n'
+ assert py.builtin._istext(out)
+
+ def test_simple_error(self):
+ py.test.raises (cmdexec.Error, cmdexec, 'exit 1')
+
+ def test_simple_error_exact_status(self):
+ try:
+ cmdexec('exit 1')
+ except cmdexec.Error:
+ e = exvalue()
+ assert e.status == 1
+ assert py.builtin._istext(e.out)
+ assert py.builtin._istext(e.err)
+
+ def test_err(self):
+ try:
+ cmdexec('echoqweqwe123 hallo')
+ raise AssertionError("command succeeded but shouldn't")
+ except cmdexec.Error:
+ e = exvalue()
+ assert hasattr(e, 'err')
+ assert hasattr(e, 'out')
+ assert e.err or e.out
diff --git a/testing/process/test_forkedfunc.py b/testing/process/test_forkedfunc.py
index d4f9f98..0de8b0a 100644
--- a/testing/process/test_forkedfunc.py
+++ b/testing/process/test_forkedfunc.py
@@ -1,177 +1,177 @@
-import pytest
-import py, sys, os
-
-pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
-
-
-def test_waitfinish_removes_tempdir():
- ff = py.process.ForkedFunc(boxf1)
- assert ff.tempdir.check()
- ff.waitfinish()
- assert not ff.tempdir.check()
-
-def test_tempdir_gets_gc_collected(monkeypatch):
- monkeypatch.setattr(os, 'fork', lambda: os.getpid())
- ff = py.process.ForkedFunc(boxf1)
- assert ff.tempdir.check()
- ff.__del__()
- assert not ff.tempdir.check()
-
-def test_basic_forkedfunc():
- result = py.process.ForkedFunc(boxf1).waitfinish()
- assert result.out == "some out\n"
- assert result.err == "some err\n"
- assert result.exitstatus == 0
- assert result.signal == 0
- assert result.retval == 1
-
-def test_exitstatus():
- def func():
- os._exit(4)
- result = py.process.ForkedFunc(func).waitfinish()
- assert result.exitstatus == 4
- assert result.signal == 0
- assert not result.out
- assert not result.err
-
-def test_execption_in_func():
- def fun():
- raise ValueError(42)
- ff = py.process.ForkedFunc(fun)
- result = ff.waitfinish()
- assert result.exitstatus == ff.EXITSTATUS_EXCEPTION
- assert result.err.find("ValueError: 42") != -1
- assert result.signal == 0
- assert not result.retval
-
-def test_forkedfunc_on_fds():
- result = py.process.ForkedFunc(boxf2).waitfinish()
- assert result.out == "someout"
- assert result.err == "someerr"
- assert result.exitstatus == 0
- assert result.signal == 0
- assert result.retval == 2
-
-def test_forkedfunc_on_fds_output():
- result = py.process.ForkedFunc(boxf3).waitfinish()
- assert result.signal == 11
- assert result.out == "s"
-
-
-def test_forkedfunc_on_stdout():
- def boxf3():
- import sys
- sys.stdout.write("hello\n")
- os.kill(os.getpid(), 11)
- result = py.process.ForkedFunc(boxf3).waitfinish()
- assert result.signal == 11
- assert result.out == "hello\n"
-
-def test_forkedfunc_signal():
- result = py.process.ForkedFunc(boxseg).waitfinish()
- assert result.retval is None
- if sys.version_info < (2,4):
- py.test.skip("signal detection does not work with python prior 2.4")
- assert result.signal == 11
-
-def test_forkedfunc_huge_data():
- result = py.process.ForkedFunc(boxhuge).waitfinish()
- assert result.out
- assert result.exitstatus == 0
- assert result.signal == 0
- assert result.retval == 3
-
-def test_box_seq():
- # we run many boxes with huge data, just one after another
- for i in range(50):
- result = py.process.ForkedFunc(boxhuge).waitfinish()
- assert result.out
- assert result.exitstatus == 0
- assert result.signal == 0
- assert result.retval == 3
-
-def test_box_in_a_box():
- def boxfun():
- result = py.process.ForkedFunc(boxf2).waitfinish()
- print (result.out)
- sys.stderr.write(result.err + "\n")
- return result.retval
-
- result = py.process.ForkedFunc(boxfun).waitfinish()
- assert result.out == "someout\n"
- assert result.err == "someerr\n"
- assert result.exitstatus == 0
- assert result.signal == 0
- assert result.retval == 2
-
-def test_kill_func_forked():
- class A:
- pass
- info = A()
- import time
-
- def box_fun():
- time.sleep(10) # we don't want to last forever here
-
- ff = py.process.ForkedFunc(box_fun)
- os.kill(ff.pid, 15)
- result = ff.waitfinish()
- if py.std.sys.version_info < (2,4):
- py.test.skip("signal detection does not work with python prior 2.4")
- assert result.signal == 15
-
-
-def test_hooks(monkeypatch):
- def _boxed():
- return 1
-
- def _on_start():
- sys.stdout.write("some out\n")
- sys.stdout.flush()
-
- def _on_exit():
- sys.stderr.write("some err\n")
- sys.stderr.flush()
-
- result = py.process.ForkedFunc(_boxed, child_on_start=_on_start,
- child_on_exit=_on_exit).waitfinish()
- assert result.out == "some out\n"
- assert result.err == "some err\n"
- assert result.exitstatus == 0
- assert result.signal == 0
- assert result.retval == 1
-
-
-# ======================================================================
-# examples
-# ======================================================================
-#
-
-def boxf1():
- sys.stdout.write("some out\n")
- sys.stderr.write("some err\n")
- return 1
-
-def boxf2():
- os.write(1, "someout".encode('ascii'))
- os.write(2, "someerr".encode('ascii'))
- return 2
-
-def boxf3():
- os.write(1, "s".encode('ascii'))
- os.kill(os.getpid(), 11)
-
-def boxseg():
- os.kill(os.getpid(), 11)
-
-def boxhuge():
- s = " ".encode('ascii')
- os.write(1, s * 10000)
- os.write(2, s * 10000)
- os.write(1, s * 10000)
-
- os.write(1, s * 10000)
- os.write(2, s * 10000)
- os.write(2, s * 10000)
- os.write(1, s * 10000)
- return 3
+import pytest
+import py, sys, os
+
+pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
+
+
+def test_waitfinish_removes_tempdir():
+ ff = py.process.ForkedFunc(boxf1)
+ assert ff.tempdir.check()
+ ff.waitfinish()
+ assert not ff.tempdir.check()
+
+def test_tempdir_gets_gc_collected(monkeypatch):
+ monkeypatch.setattr(os, 'fork', lambda: os.getpid())
+ ff = py.process.ForkedFunc(boxf1)
+ assert ff.tempdir.check()
+ ff.__del__()
+ assert not ff.tempdir.check()
+
+def test_basic_forkedfunc():
+ result = py.process.ForkedFunc(boxf1).waitfinish()
+ assert result.out == "some out\n"
+ assert result.err == "some err\n"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 1
+
+def test_exitstatus():
+ def func():
+ os._exit(4)
+ result = py.process.ForkedFunc(func).waitfinish()
+ assert result.exitstatus == 4
+ assert result.signal == 0
+ assert not result.out
+ assert not result.err
+
+def test_execption_in_func():
+ def fun():
+ raise ValueError(42)
+ ff = py.process.ForkedFunc(fun)
+ result = ff.waitfinish()
+ assert result.exitstatus == ff.EXITSTATUS_EXCEPTION
+ assert result.err.find("ValueError: 42") != -1
+ assert result.signal == 0
+ assert not result.retval
+
+def test_forkedfunc_on_fds():
+ result = py.process.ForkedFunc(boxf2).waitfinish()
+ assert result.out == "someout"
+ assert result.err == "someerr"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 2
+
+def test_forkedfunc_on_fds_output():
+ result = py.process.ForkedFunc(boxf3).waitfinish()
+ assert result.signal == 11
+ assert result.out == "s"
+
+
+def test_forkedfunc_on_stdout():
+ def boxf3():
+ import sys
+ sys.stdout.write("hello\n")
+ os.kill(os.getpid(), 11)
+ result = py.process.ForkedFunc(boxf3).waitfinish()
+ assert result.signal == 11
+ assert result.out == "hello\n"
+
+def test_forkedfunc_signal():
+ result = py.process.ForkedFunc(boxseg).waitfinish()
+ assert result.retval is None
+ if sys.version_info < (2,4):
+ py.test.skip("signal detection does not work with python prior 2.4")
+ assert result.signal == 11
+
+def test_forkedfunc_huge_data():
+ result = py.process.ForkedFunc(boxhuge).waitfinish()
+ assert result.out
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 3
+
+def test_box_seq():
+ # we run many boxes with huge data, just one after another
+ for i in range(50):
+ result = py.process.ForkedFunc(boxhuge).waitfinish()
+ assert result.out
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 3
+
+def test_box_in_a_box():
+ def boxfun():
+ result = py.process.ForkedFunc(boxf2).waitfinish()
+ print (result.out)
+ sys.stderr.write(result.err + "\n")
+ return result.retval
+
+ result = py.process.ForkedFunc(boxfun).waitfinish()
+ assert result.out == "someout\n"
+ assert result.err == "someerr\n"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 2
+
+def test_kill_func_forked():
+ class A:
+ pass
+ info = A()
+ import time
+
+ def box_fun():
+ time.sleep(10) # we don't want to last forever here
+
+ ff = py.process.ForkedFunc(box_fun)
+ os.kill(ff.pid, 15)
+ result = ff.waitfinish()
+ if py.std.sys.version_info < (2,4):
+ py.test.skip("signal detection does not work with python prior 2.4")
+ assert result.signal == 15
+
+
+def test_hooks(monkeypatch):
+ def _boxed():
+ return 1
+
+ def _on_start():
+ sys.stdout.write("some out\n")
+ sys.stdout.flush()
+
+ def _on_exit():
+ sys.stderr.write("some err\n")
+ sys.stderr.flush()
+
+ result = py.process.ForkedFunc(_boxed, child_on_start=_on_start,
+ child_on_exit=_on_exit).waitfinish()
+ assert result.out == "some out\n"
+ assert result.err == "some err\n"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 1
+
+
+# ======================================================================
+# examples
+# ======================================================================
+#
+
+def boxf1():
+ sys.stdout.write("some out\n")
+ sys.stderr.write("some err\n")
+ return 1
+
+def boxf2():
+ os.write(1, "someout".encode('ascii'))
+ os.write(2, "someerr".encode('ascii'))
+ return 2
+
+def boxf3():
+ os.write(1, "s".encode('ascii'))
+ os.kill(os.getpid(), 11)
+
+def boxseg():
+ os.kill(os.getpid(), 11)
+
+def boxhuge():
+ s = " ".encode('ascii')
+ os.write(1, s * 10000)
+ os.write(2, s * 10000)
+ os.write(1, s * 10000)
+
+ os.write(1, s * 10000)
+ os.write(2, s * 10000)
+ os.write(2, s * 10000)
+ os.write(1, s * 10000)
+ return 3
diff --git a/testing/process/test_killproc.py b/testing/process/test_killproc.py
index 57088e1..16279e6 100644
--- a/testing/process/test_killproc.py
+++ b/testing/process/test_killproc.py
@@ -1,16 +1,16 @@
-
-import py, sys
-
-@py.test.mark.skipif("sys.platform.startswith('java')")
-def test_kill(tmpdir):
- subprocess = py.test.importorskip("subprocess")
- t = tmpdir.join("t.py")
- t.write("import time ; time.sleep(100)")
- proc = py.std.subprocess.Popen([sys.executable, str(t)])
- assert proc.poll() is None # no return value yet
- py.process.kill(proc.pid)
- ret = proc.wait()
- if sys.platform == "win32" and ret == 0:
- py.test.skip("XXX on win32, subprocess.Popen().wait() on a killed "
- "process does not yield return value != 0")
- assert ret != 0
+
+import py, sys
+
+@py.test.mark.skipif("sys.platform.startswith('java')")
+def test_kill(tmpdir):
+ subprocess = py.test.importorskip("subprocess")
+ t = tmpdir.join("t.py")
+ t.write("import time ; time.sleep(100)")
+ proc = py.std.subprocess.Popen([sys.executable, str(t)])
+ assert proc.poll() is None # no return value yet
+ py.process.kill(proc.pid)
+ ret = proc.wait()
+ if sys.platform == "win32" and ret == 0:
+ py.test.skip("XXX on win32, subprocess.Popen().wait() on a killed "
+ "process does not yield return value != 0")
+ assert ret != 0
diff --git a/testing/root/__init__.py b/testing/root/__init__.py
index 792d600..16e3853 100644
--- a/testing/root/__init__.py
+++ b/testing/root/__init__.py
@@ -1 +1 @@
-#
+#
diff --git a/testing/root/__init__.pyc b/testing/root/__init__.pyc
deleted file mode 100644
index 3434b3a..0000000
--- a/testing/root/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/__init__.cpython-35.pyc b/testing/root/__pycache__/__init__.cpython-35.pyc
deleted file mode 100644
index 7faca79..0000000
--- a/testing/root/__pycache__/__init__.cpython-35.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_builtin.cpython-27-PYTEST.pyc b/testing/root/__pycache__/test_builtin.cpython-27-PYTEST.pyc
deleted file mode 100644
index 01b3526..0000000
--- a/testing/root/__pycache__/test_builtin.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_builtin.cpython-35-PYTEST.pyc b/testing/root/__pycache__/test_builtin.cpython-35-PYTEST.pyc
deleted file mode 100644
index c1b6a6e..0000000
--- a/testing/root/__pycache__/test_builtin.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_error.cpython-27-PYTEST.pyc b/testing/root/__pycache__/test_error.cpython-27-PYTEST.pyc
deleted file mode 100644
index da13cc1..0000000
--- a/testing/root/__pycache__/test_error.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_error.cpython-35-PYTEST.pyc b/testing/root/__pycache__/test_error.cpython-35-PYTEST.pyc
deleted file mode 100644
index c90e872..0000000
--- a/testing/root/__pycache__/test_error.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_py_imports.cpython-27-PYTEST.pyc b/testing/root/__pycache__/test_py_imports.cpython-27-PYTEST.pyc
deleted file mode 100644
index d3b4732..0000000
--- a/testing/root/__pycache__/test_py_imports.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_py_imports.cpython-35-PYTEST.pyc b/testing/root/__pycache__/test_py_imports.cpython-35-PYTEST.pyc
deleted file mode 100644
index 2eb9547..0000000
--- a/testing/root/__pycache__/test_py_imports.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_std.cpython-27-PYTEST.pyc b/testing/root/__pycache__/test_std.cpython-27-PYTEST.pyc
deleted file mode 100644
index 3cc7697..0000000
--- a/testing/root/__pycache__/test_std.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_std.cpython-35-PYTEST.pyc b/testing/root/__pycache__/test_std.cpython-35-PYTEST.pyc
deleted file mode 100644
index 1e8900d..0000000
--- a/testing/root/__pycache__/test_std.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_xmlgen.cpython-27-PYTEST.pyc b/testing/root/__pycache__/test_xmlgen.cpython-27-PYTEST.pyc
deleted file mode 100644
index a6df95a..0000000
--- a/testing/root/__pycache__/test_xmlgen.cpython-27-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/__pycache__/test_xmlgen.cpython-35-PYTEST.pyc b/testing/root/__pycache__/test_xmlgen.cpython-35-PYTEST.pyc
deleted file mode 100644
index ddde425..0000000
--- a/testing/root/__pycache__/test_xmlgen.cpython-35-PYTEST.pyc
+++ /dev/null
Binary files differ
diff --git a/testing/root/test_builtin.py b/testing/root/test_builtin.py
index a6f1a3c..c0d804e 100644
--- a/testing/root/test_builtin.py
+++ b/testing/root/test_builtin.py
@@ -1,179 +1,179 @@
-import sys
-import types
-import py
-from py.builtin import set, frozenset, reversed, sorted
-
-def test_enumerate():
- l = [0,1,2]
- for i,x in enumerate(l):
- assert i == x
-
-def test_any():
- assert not py.builtin.any([0,False, None])
- assert py.builtin.any([0,False, None,1])
-
-def test_all():
- assert not py.builtin.all([True, 1, False])
- assert py.builtin.all([True, 1, object])
-
-def test_BaseException():
- assert issubclass(IndexError, py.builtin.BaseException)
- assert issubclass(Exception, py.builtin.BaseException)
- assert issubclass(KeyboardInterrupt, py.builtin.BaseException)
-
- class MyRandomClass(object):
- pass
- assert not issubclass(MyRandomClass, py.builtin.BaseException)
-
- assert py.builtin.BaseException.__module__ in ('exceptions', 'builtins')
- assert Exception.__name__ == 'Exception'
-
-
-def test_GeneratorExit():
- assert py.builtin.GeneratorExit.__module__ in ('exceptions', 'builtins')
- assert issubclass(py.builtin.GeneratorExit, py.builtin.BaseException)
-
-def test_reversed():
- reversed = py.builtin.reversed
- r = reversed("hello")
- assert iter(r) is r
- s = "".join(list(r))
- assert s == "olleh"
- assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o']
- py.test.raises(TypeError, reversed, reversed("hello"))
-
-def test_simple():
- s = set([1, 2, 3, 4])
- assert s == set([3, 4, 2, 1])
- s1 = s.union(set([5, 6]))
- assert 5 in s1
- assert 1 in s1
-
-def test_frozenset():
- s = set([frozenset([0, 1]), frozenset([1, 0])])
- assert len(s) == 1
-
-def test_sorted():
- if sorted == py.builtin.sorted:
- return # don't test a real builtin
- for s in [py.builtin.sorted]:
- def test():
- assert s([3, 2, 1]) == [1, 2, 3]
- assert s([1, 2, 3], reverse=True) == [3, 2, 1]
- l = s([1, 2, 3, 4, 5, 6], key=lambda x: x % 2)
- assert l == [2, 4, 6, 1, 3, 5]
- l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y))
- assert l == [4, 3, 2, 1]
- l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y),
- key=lambda x: x % 2)
- assert l == [1, 3, 2, 4]
-
- def compare(x, y):
- assert type(x) == str
- assert type(y) == str
- return cmp(x, y)
- data = 'The quick Brown fox Jumped over The lazy Dog'.split()
- s(data, cmp=compare, key=str.lower)
- yield test
-
-
-def test_print_simple():
- from py.builtin import print_
- py.test.raises(TypeError, "print_(hello=3)")
- f = py.io.TextIO()
- print_("hello", "world", file=f)
- s = f.getvalue()
- assert s == "hello world\n"
-
- f = py.io.TextIO()
- print_("hello", end="", file=f)
- s = f.getvalue()
- assert s == "hello"
-
- f = py.io.TextIO()
- print_("xyz", "abc", sep="", end="", file=f)
- s = f.getvalue()
- assert s == "xyzabc"
-
- class X:
- def __repr__(self): return "rep"
- f = py.io.TextIO()
- print_(X(), file=f)
- assert f.getvalue() == "rep\n"
-
-def test_execfile(tmpdir):
- test_file = tmpdir.join("test.py")
- test_file.write("x = y\ndef f(): pass")
- ns = {"y" : 42}
- py.builtin.execfile(str(test_file), ns)
- assert ns["x"] == 42
- assert py.code.getrawcode(ns["f"]).co_filename == str(test_file)
- class A:
- y = 3
- x = 4
- py.builtin.execfile(str(test_file))
- assert A.x == 3
-
-def test_getfuncdict():
- def f():
- pass
- f.x = 4
- assert py.builtin._getfuncdict(f)["x"] == 4
- assert py.builtin._getfuncdict(2) is None
-
-def test_callable():
- class A: pass
- assert py.builtin.callable(test_callable)
- assert py.builtin.callable(A)
- assert py.builtin.callable(list)
- assert py.builtin.callable(id)
- assert not py.builtin.callable(4)
- assert not py.builtin.callable("hi")
-
-def test_totext():
- py.builtin._totext("hello", "UTF-8")
-
-def test_bytes_text():
- if sys.version_info[0] < 3:
- assert py.builtin.text == unicode
- assert py.builtin.bytes == str
- else:
- assert py.builtin.text == str
- assert py.builtin.bytes == bytes
-
-def test_totext_badutf8():
- # this was in printouts within the pytest testsuite
- # totext would fail
- if sys.version_info >= (3,):
- errors = 'surrogateescape'
- else: # old python has crappy error handlers
- errors = 'replace'
- py.builtin._totext("\xa6", "UTF-8", errors)
-
-def test_reraise():
- from py.builtin import _reraise
- try:
- raise Exception()
- except Exception:
- cls, val, tb = sys.exc_info()
- excinfo = py.test.raises(Exception, "_reraise(cls, val, tb)")
-
-def test_exec():
- l = []
- py.builtin.exec_("l.append(1)")
- assert l == [1]
- d = {}
- py.builtin.exec_("x=4", d)
- assert d['x'] == 4
-
-def test_tryimport():
- py.test.raises(ImportError, py.builtin._tryimport, 'xqwe123')
- x = py.builtin._tryimport('asldkajsdl', 'py')
- assert x == py
- x = py.builtin._tryimport('asldkajsdl', 'py.path')
- assert x == py.path
-
-def test_getcode():
- code = py.builtin._getcode(test_getcode)
- assert isinstance(code, types.CodeType)
- assert py.builtin._getcode(4) is None
+import sys
+import types
+import py
+from py.builtin import set, frozenset, reversed, sorted
+
+def test_enumerate():
+ l = [0,1,2]
+ for i,x in enumerate(l):
+ assert i == x
+
+def test_any():
+ assert not py.builtin.any([0,False, None])
+ assert py.builtin.any([0,False, None,1])
+
+def test_all():
+ assert not py.builtin.all([True, 1, False])
+ assert py.builtin.all([True, 1, object])
+
+def test_BaseException():
+ assert issubclass(IndexError, py.builtin.BaseException)
+ assert issubclass(Exception, py.builtin.BaseException)
+ assert issubclass(KeyboardInterrupt, py.builtin.BaseException)
+
+ class MyRandomClass(object):
+ pass
+ assert not issubclass(MyRandomClass, py.builtin.BaseException)
+
+ assert py.builtin.BaseException.__module__ in ('exceptions', 'builtins')
+ assert Exception.__name__ == 'Exception'
+
+
+def test_GeneratorExit():
+ assert py.builtin.GeneratorExit.__module__ in ('exceptions', 'builtins')
+ assert issubclass(py.builtin.GeneratorExit, py.builtin.BaseException)
+
+def test_reversed():
+ reversed = py.builtin.reversed
+ r = reversed("hello")
+ assert iter(r) is r
+ s = "".join(list(r))
+ assert s == "olleh"
+ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o']
+ py.test.raises(TypeError, reversed, reversed("hello"))
+
+def test_simple():
+ s = set([1, 2, 3, 4])
+ assert s == set([3, 4, 2, 1])
+ s1 = s.union(set([5, 6]))
+ assert 5 in s1
+ assert 1 in s1
+
+def test_frozenset():
+ s = set([frozenset([0, 1]), frozenset([1, 0])])
+ assert len(s) == 1
+
+def test_sorted():
+ if sorted == py.builtin.sorted:
+ return # don't test a real builtin
+ for s in [py.builtin.sorted]:
+ def test():
+ assert s([3, 2, 1]) == [1, 2, 3]
+ assert s([1, 2, 3], reverse=True) == [3, 2, 1]
+ l = s([1, 2, 3, 4, 5, 6], key=lambda x: x % 2)
+ assert l == [2, 4, 6, 1, 3, 5]
+ l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y))
+ assert l == [4, 3, 2, 1]
+ l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y),
+ key=lambda x: x % 2)
+ assert l == [1, 3, 2, 4]
+
+ def compare(x, y):
+ assert type(x) == str
+ assert type(y) == str
+ return cmp(x, y)
+ data = 'The quick Brown fox Jumped over The lazy Dog'.split()
+ s(data, cmp=compare, key=str.lower)
+ yield test
+
+
+def test_print_simple():
+ from py.builtin import print_
+ py.test.raises(TypeError, "print_(hello=3)")
+ f = py.io.TextIO()
+ print_("hello", "world", file=f)
+ s = f.getvalue()
+ assert s == "hello world\n"
+
+ f = py.io.TextIO()
+ print_("hello", end="", file=f)
+ s = f.getvalue()
+ assert s == "hello"
+
+ f = py.io.TextIO()
+ print_("xyz", "abc", sep="", end="", file=f)
+ s = f.getvalue()
+ assert s == "xyzabc"
+
+ class X:
+ def __repr__(self): return "rep"
+ f = py.io.TextIO()
+ print_(X(), file=f)
+ assert f.getvalue() == "rep\n"
+
+def test_execfile(tmpdir):
+ test_file = tmpdir.join("test.py")
+ test_file.write("x = y\ndef f(): pass")
+ ns = {"y" : 42}
+ py.builtin.execfile(str(test_file), ns)
+ assert ns["x"] == 42
+ assert py.code.getrawcode(ns["f"]).co_filename == str(test_file)
+ class A:
+ y = 3
+ x = 4
+ py.builtin.execfile(str(test_file))
+ assert A.x == 3
+
+def test_getfuncdict():
+ def f():
+ pass
+ f.x = 4
+ assert py.builtin._getfuncdict(f)["x"] == 4
+ assert py.builtin._getfuncdict(2) is None
+
+def test_callable():
+ class A: pass
+ assert py.builtin.callable(test_callable)
+ assert py.builtin.callable(A)
+ assert py.builtin.callable(list)
+ assert py.builtin.callable(id)
+ assert not py.builtin.callable(4)
+ assert not py.builtin.callable("hi")
+
+def test_totext():
+ py.builtin._totext("hello", "UTF-8")
+
+def test_bytes_text():
+ if sys.version_info[0] < 3:
+ assert py.builtin.text == unicode
+ assert py.builtin.bytes == str
+ else:
+ assert py.builtin.text == str
+ assert py.builtin.bytes == bytes
+
+def test_totext_badutf8():
+ # this was in printouts within the pytest testsuite
+ # totext would fail
+ if sys.version_info >= (3,):
+ errors = 'surrogateescape'
+ else: # old python has crappy error handlers
+ errors = 'replace'
+ py.builtin._totext("\xa6", "UTF-8", errors)
+
+def test_reraise():
+ from py.builtin import _reraise
+ try:
+ raise Exception()
+ except Exception:
+ cls, val, tb = sys.exc_info()
+ excinfo = py.test.raises(Exception, "_reraise(cls, val, tb)")
+
+def test_exec():
+ l = []
+ py.builtin.exec_("l.append(1)")
+ assert l == [1]
+ d = {}
+ py.builtin.exec_("x=4", d)
+ assert d['x'] == 4
+
+def test_tryimport():
+ py.test.raises(ImportError, py.builtin._tryimport, 'xqwe123')
+ x = py.builtin._tryimport('asldkajsdl', 'py')
+ assert x == py
+ x = py.builtin._tryimport('asldkajsdl', 'py.path')
+ assert x == py.path
+
+def test_getcode():
+ code = py.builtin._getcode(test_getcode)
+ assert isinstance(code, types.CodeType)
+ assert py.builtin._getcode(4) is None
diff --git a/testing/root/test_error.py b/testing/root/test_error.py
index a34e006..99e0798 100644
--- a/testing/root/test_error.py
+++ b/testing/root/test_error.py
@@ -1,37 +1,59 @@
-
-import py
-
-import errno
-
-def test_error_classes():
- for name in errno.errorcode.values():
- x = getattr(py.error, name)
- assert issubclass(x, py.error.Error)
- assert issubclass(x, EnvironmentError)
-
-def test_picklability_issue1():
- e1 = py.error.ENOENT()
- s = py.std.pickle.dumps(e1)
- e2 = py.std.pickle.loads(s)
- assert isinstance(e2, py.error.ENOENT)
-
-def test_unknown_error():
- num = 3999
- cls = py.error._geterrnoclass(num)
- assert cls.__name__ == 'UnknownErrno%d' % (num,)
- assert issubclass(cls, py.error.Error)
- assert issubclass(cls, EnvironmentError)
- cls2 = py.error._geterrnoclass(num)
- assert cls is cls2
-
-def test_error_conversion_ENOTDIR(testdir):
- p = testdir.makepyfile("")
- excinfo = py.test.raises(py.error.Error, py.error.checked_call, p.listdir)
- assert isinstance(excinfo.value, EnvironmentError)
- assert isinstance(excinfo.value, py.error.Error)
- assert "ENOTDIR" in repr(excinfo.value)
-
-
-def test_checked_call_supports_kwargs(tmpdir):
- import tempfile
- py.error.checked_call(tempfile.mkdtemp, dir=str(tmpdir))
+
+import py
+
+import errno
+
+def test_error_classes():
+ for name in errno.errorcode.values():
+ x = getattr(py.error, name)
+ assert issubclass(x, py.error.Error)
+ assert issubclass(x, EnvironmentError)
+
+def test_picklability_issue1():
+ e1 = py.error.ENOENT()
+ s = py.std.pickle.dumps(e1)
+ e2 = py.std.pickle.loads(s)
+ assert isinstance(e2, py.error.ENOENT)
+
+def test_unknown_error():
+ num = 3999
+ cls = py.error._geterrnoclass(num)
+ assert cls.__name__ == 'UnknownErrno%d' % (num,)
+ assert issubclass(cls, py.error.Error)
+ assert issubclass(cls, EnvironmentError)
+ cls2 = py.error._geterrnoclass(num)
+ assert cls is cls2
+
+def test_error_conversion_ENOTDIR(testdir):
+ p = testdir.makepyfile("")
+ excinfo = py.test.raises(py.error.Error, py.error.checked_call, p.listdir)
+ assert isinstance(excinfo.value, EnvironmentError)
+ assert isinstance(excinfo.value, py.error.Error)
+ assert "ENOTDIR" in repr(excinfo.value)
+
+
+def test_checked_call_supports_kwargs(tmpdir):
+ import tempfile
+ py.error.checked_call(tempfile.mkdtemp, dir=str(tmpdir))
+
+
+try:
+ import unittest
+ unittest.TestCase.assertWarns
+except (ImportError, AttributeError):
+ pass # required interface not available
+else:
+ import sys
+ import warnings
+
+ class Case(unittest.TestCase):
+ def test_assertWarns(self):
+ # Clear everything "py.*" from sys.modules and re-import py
+ # as a fresh start
+ for mod in tuple(sys.modules.keys()):
+ if mod and (mod == 'py' or mod.startswith('py.')):
+ del sys.modules[mod]
+ import py
+
+ with self.assertWarns(UserWarning):
+ warnings.warn('this should work')
diff --git a/testing/root/test_py_imports.py b/testing/root/test_py_imports.py
index 5f5954e..71b5005 100644
--- a/testing/root/test_py_imports.py
+++ b/testing/root/test_py_imports.py
@@ -1,68 +1,68 @@
-import py
-import types
-import sys
-
-def checksubpackage(name):
- obj = getattr(py, name)
- if hasattr(obj, '__map__'): # isinstance(obj, Module):
- keys = dir(obj)
- assert len(keys) > 0
- print (obj.__map__)
- for name in list(obj.__map__):
- assert hasattr(obj, name), (obj, name)
-
-def test_dir():
- for name in dir(py):
- if not name.startswith('_'):
- yield checksubpackage, name
-
-def test_virtual_module_identity():
- from py import path as path1
- from py import path as path2
- assert path1 is path2
- from py.path import local as local1
- from py.path import local as local2
- assert local1 is local2
-
-def test_importall():
- base = py._pydir
- nodirs = [
- ]
- if sys.version_info >= (3,0):
- nodirs.append(base.join('_code', '_assertionold.py'))
- else:
- nodirs.append(base.join('_code', '_assertionnew.py'))
-
- def recurse(p):
- return p.check(dotfile=0) and p.basename != "attic"
-
- for p in base.visit('*.py', recurse):
- if p.basename == '__init__.py':
- continue
- relpath = p.new(ext='').relto(base)
- if base.sep in relpath: # not py/*.py itself
- for x in nodirs:
- if p == x or p.relto(x):
- break
- else:
- relpath = relpath.replace(base.sep, '.')
- modpath = 'py.%s' % relpath
- try:
- check_import(modpath)
- except py.test.skip.Exception:
- pass
-
-def check_import(modpath):
- py.builtin.print_("checking import", modpath)
- assert __import__(modpath)
-
-def test_all_resolves():
- seen = py.builtin.set([py])
- lastlength = None
- while len(seen) != lastlength:
- lastlength = len(seen)
- for item in py.builtin.frozenset(seen):
- for value in item.__dict__.values():
- if isinstance(value, type(py.test)):
- seen.add(value)
-
+import py
+import types
+import sys
+
+def checksubpackage(name):
+ obj = getattr(py, name)
+ if hasattr(obj, '__map__'): # isinstance(obj, Module):
+ keys = dir(obj)
+ assert len(keys) > 0
+ print (obj.__map__)
+ for name in list(obj.__map__):
+ assert hasattr(obj, name), (obj, name)
+
+def test_dir():
+ for name in dir(py):
+ if not name.startswith('_'):
+ yield checksubpackage, name
+
+def test_virtual_module_identity():
+ from py import path as path1
+ from py import path as path2
+ assert path1 is path2
+ from py.path import local as local1
+ from py.path import local as local2
+ assert local1 is local2
+
+def test_importall():
+ base = py._pydir
+ nodirs = [
+ ]
+ if sys.version_info >= (3,0):
+ nodirs.append(base.join('_code', '_assertionold.py'))
+ else:
+ nodirs.append(base.join('_code', '_assertionnew.py'))
+
+ def recurse(p):
+ return p.check(dotfile=0) and p.basename != "attic"
+
+ for p in base.visit('*.py', recurse):
+ if p.basename == '__init__.py':
+ continue
+ relpath = p.new(ext='').relto(base)
+ if base.sep in relpath: # not py/*.py itself
+ for x in nodirs:
+ if p == x or p.relto(x):
+ break
+ else:
+ relpath = relpath.replace(base.sep, '.')
+ modpath = 'py.%s' % relpath
+ try:
+ check_import(modpath)
+ except py.test.skip.Exception:
+ pass
+
+def check_import(modpath):
+ py.builtin.print_("checking import", modpath)
+ assert __import__(modpath)
+
+def test_all_resolves():
+ seen = py.builtin.set([py])
+ lastlength = None
+ while len(seen) != lastlength:
+ lastlength = len(seen)
+ for item in py.builtin.frozenset(seen):
+ for value in item.__dict__.values():
+ if isinstance(value, type(py.test)):
+ seen.add(value)
+
diff --git a/testing/root/test_std.py b/testing/root/test_std.py
index 143556a..1039304 100644
--- a/testing/root/test_std.py
+++ b/testing/root/test_std.py
@@ -1,13 +1,13 @@
-
-import py
-
-def test_os():
- import os
- assert py.std.os is os
-
-def test_import_error_converts_to_attributeerror():
- py.test.raises(AttributeError, "py.std.xyzalskdj")
-
-def test_std_gets_it():
- for x in py.std.sys.modules:
- assert x in py.std.__dict__
+
+import py
+
+def test_os():
+ import os
+ assert py.std.os is os
+
+def test_import_error_converts_to_attributeerror():
+ py.test.raises(AttributeError, "py.std.xyzalskdj")
+
+def test_std_gets_it():
+ for x in py.std.sys.modules:
+ assert x in py.std.__dict__
diff --git a/testing/root/test_xmlgen.py b/testing/root/test_xmlgen.py
index 704d149..55a6590 100644
--- a/testing/root/test_xmlgen.py
+++ b/testing/root/test_xmlgen.py
@@ -1,145 +1,145 @@
-
-import py
-from py._xmlgen import unicode, html, raw
-
-class ns(py.xml.Namespace):
- pass
-
-def test_escape():
- uvalue = py.builtin._totext('\xc4\x85\xc4\x87\n\xe2\x82\xac\n', 'utf-8')
- class A:
- def __unicode__(self):
- return uvalue
- def __str__(self):
- x = self.__unicode__()
- if py.std.sys.version_info[0] < 3:
- return x.encode('utf-8')
- return x
- y = py.xml.escape(uvalue)
- assert y == uvalue
- x = py.xml.escape(A())
- assert x == uvalue
- if py.std.sys.version_info[0] < 3:
- assert isinstance(x, unicode)
- assert isinstance(y, unicode)
- y = py.xml.escape(uvalue.encode('utf-8'))
- assert y == uvalue
-
-
-def test_tag_with_text():
- x = ns.hello("world")
- u = unicode(x)
- assert u == "<hello>world</hello>"
-
-def test_class_identity():
- assert ns.hello is ns.hello
-
-def test_tag_with_text_and_attributes():
- x = ns.some(name="hello", value="world")
- assert x.attr.name == 'hello'
- assert x.attr.value == 'world'
- u = unicode(x)
- assert u == '<some name="hello" value="world"/>'
-
-def test_tag_with_subclassed_attr_simple():
- class my(ns.hello):
- class Attr(ns.hello.Attr):
- hello="world"
- x = my()
- assert x.attr.hello == 'world'
- assert unicode(x) == '<my hello="world"/>'
-
-def test_tag_with_raw_attr():
- x = html.object(data=raw('&'))
- assert unicode(x) == '<object data="&"></object>'
-
-def test_tag_nested():
- x = ns.hello(ns.world())
- unicode(x) # triggers parentifying
- assert x[0].parent is x
- u = unicode(x)
- assert u == '<hello><world/></hello>'
-
-def test_list_nested():
- x = ns.hello([ns.world()]) #pass in a list here
- u = unicode(x)
- assert u == '<hello><world/></hello>'
-
-def test_tag_xmlname():
- class my(ns.hello):
- xmlname = 'world'
- u = unicode(my())
- assert u == '<world/>'
-
-def test_tag_with_text_entity():
- x = ns.hello('world & rest')
- u = unicode(x)
- assert u == "<hello>world &amp; rest</hello>"
-
-def test_tag_with_text_and_attributes_entity():
- x = ns.some(name="hello & world")
- assert x.attr.name == "hello & world"
- u = unicode(x)
- assert u == '<some name="hello &amp; world"/>'
-
-def test_raw():
- x = ns.some(py.xml.raw("<p>literal</p>"))
- u = unicode(x)
- assert u == "<some><p>literal</p></some>"
-
-
-def test_html_name_stickyness():
- class my(html.p):
- pass
- x = my("hello")
- assert unicode(x) == '<p>hello</p>'
-
-def test_stylenames():
- class my:
- class body(html.body):
- style = html.Style(font_size = "12pt")
- u = unicode(my.body())
- assert u == '<body style="font-size: 12pt"></body>'
-
-def test_class_None():
- t = html.body(class_=None)
- u = unicode(t)
- assert u == '<body></body>'
-
-def test_alternating_style():
- alternating = (
- html.Style(background="white"),
- html.Style(background="grey"),
- )
- class my(html):
- class li(html.li):
- def style(self):
- i = self.parent.index(self)
- return alternating[i%2]
- style = property(style)
-
- x = my.ul(
- my.li("hello"),
- my.li("world"),
- my.li("42"))
- u = unicode(x)
- assert u == ('<ul><li style="background: white">hello</li>'
- '<li style="background: grey">world</li>'
- '<li style="background: white">42</li>'
- '</ul>')
-
-def test_singleton():
- h = html.head(html.link(href="foo"))
- assert unicode(h) == '<head><link href="foo"/></head>'
-
- h = html.head(html.script(src="foo"))
- assert unicode(h) == '<head><script src="foo"></script></head>'
-
-def test_inline():
- h = html.div(html.span('foo'), html.span('bar'))
- assert (h.unicode(indent=2) ==
- '<div><span>foo</span><span>bar</span></div>')
-
-def test_object_tags():
- o = html.object(html.object())
- assert o.unicode(indent=0) == '<object><object></object></object>'
+
+import py
+from py._xmlgen import unicode, html, raw
+
+class ns(py.xml.Namespace):
+ pass
+
+def test_escape():
+ uvalue = py.builtin._totext('\xc4\x85\xc4\x87\n\xe2\x82\xac\n', 'utf-8')
+ class A:
+ def __unicode__(self):
+ return uvalue
+ def __str__(self):
+ x = self.__unicode__()
+ if py.std.sys.version_info[0] < 3:
+ return x.encode('utf-8')
+ return x
+ y = py.xml.escape(uvalue)
+ assert y == uvalue
+ x = py.xml.escape(A())
+ assert x == uvalue
+ if py.std.sys.version_info[0] < 3:
+ assert isinstance(x, unicode)
+ assert isinstance(y, unicode)
+ y = py.xml.escape(uvalue.encode('utf-8'))
+ assert y == uvalue
+
+
+def test_tag_with_text():
+ x = ns.hello("world")
+ u = unicode(x)
+ assert u == "<hello>world</hello>"
+
+def test_class_identity():
+ assert ns.hello is ns.hello
+
+def test_tag_with_text_and_attributes():
+ x = ns.some(name="hello", value="world")
+ assert x.attr.name == 'hello'
+ assert x.attr.value == 'world'
+ u = unicode(x)
+ assert u == '<some name="hello" value="world"/>'
+
+def test_tag_with_subclassed_attr_simple():
+ class my(ns.hello):
+ class Attr(ns.hello.Attr):
+ hello="world"
+ x = my()
+ assert x.attr.hello == 'world'
+ assert unicode(x) == '<my hello="world"/>'
+
+def test_tag_with_raw_attr():
+ x = html.object(data=raw('&'))
+ assert unicode(x) == '<object data="&"></object>'
+
+def test_tag_nested():
+ x = ns.hello(ns.world())
+ unicode(x) # triggers parentifying
+ assert x[0].parent is x
+ u = unicode(x)
+ assert u == '<hello><world/></hello>'
+
+def test_list_nested():
+ x = ns.hello([ns.world()]) #pass in a list here
+ u = unicode(x)
+ assert u == '<hello><world/></hello>'
+
+def test_tag_xmlname():
+ class my(ns.hello):
+ xmlname = 'world'
+ u = unicode(my())
+ assert u == '<world/>'
+
+def test_tag_with_text_entity():
+ x = ns.hello('world & rest')
+ u = unicode(x)
+ assert u == "<hello>world &amp; rest</hello>"
+
+def test_tag_with_text_and_attributes_entity():
+ x = ns.some(name="hello & world")
+ assert x.attr.name == "hello & world"
+ u = unicode(x)
+ assert u == '<some name="hello &amp; world"/>'
+
+def test_raw():
+ x = ns.some(py.xml.raw("<p>literal</p>"))
+ u = unicode(x)
+ assert u == "<some><p>literal</p></some>"
+
+
+def test_html_name_stickyness():
+ class my(html.p):
+ pass
+ x = my("hello")
+ assert unicode(x) == '<p>hello</p>'
+
+def test_stylenames():
+ class my:
+ class body(html.body):
+ style = html.Style(font_size = "12pt")
+ u = unicode(my.body())
+ assert u == '<body style="font-size: 12pt"></body>'
+
+def test_class_None():
+ t = html.body(class_=None)
+ u = unicode(t)
+ assert u == '<body></body>'
+
+def test_alternating_style():
+ alternating = (
+ html.Style(background="white"),
+ html.Style(background="grey"),
+ )
+ class my(html):
+ class li(html.li):
+ def style(self):
+ i = self.parent.index(self)
+ return alternating[i%2]
+ style = property(style)
+
+ x = my.ul(
+ my.li("hello"),
+ my.li("world"),
+ my.li("42"))
+ u = unicode(x)
+ assert u == ('<ul><li style="background: white">hello</li>'
+ '<li style="background: grey">world</li>'
+ '<li style="background: white">42</li>'
+ '</ul>')
+
+def test_singleton():
+ h = html.head(html.link(href="foo"))
+ assert unicode(h) == '<head><link href="foo"/></head>'
+
+ h = html.head(html.script(src="foo"))
+ assert unicode(h) == '<head><script src="foo"></script></head>'
+
+def test_inline():
+ h = html.div(html.span('foo'), html.span('bar'))
+ assert (h.unicode(indent=2) ==
+ '<div><span>foo</span><span>bar</span></div>')
+
+def test_object_tags():
+ o = html.object(html.object())
+ assert o.unicode(indent=0) == '<object><object></object></object>'
diff --git a/testing/test_iniconfig.py b/testing/test_iniconfig.py
index 9a7f72c..be68c3b 100644
--- a/testing/test_iniconfig.py
+++ b/testing/test_iniconfig.py
@@ -1,299 +1,299 @@
-import py
-import pytest
-from py._iniconfig import IniConfig, ParseError, __all__ as ALL
-from py._iniconfig import iscommentline
-from textwrap import dedent
-
-def pytest_generate_tests(metafunc):
- if 'input' in metafunc.funcargnames:
- for name, (input, expected) in check_tokens.items():
- metafunc.addcall(id=name, funcargs={
- 'input': input,
- 'expected': expected,
- })
- elif hasattr(metafunc.function, 'multi'):
- kwargs = metafunc.function.multi.kwargs
- names, values = zip(*kwargs.items())
- values = cartesian_product(*values)
- for p in values:
- metafunc.addcall(funcargs=dict(zip(names, p)))
-
-def cartesian_product(L,*lists):
- # copied from http://bit.ly/cyIXjn
- if not lists:
- for x in L:
- yield (x,)
- else:
- for x in L:
- for y in cartesian_product(lists[0],*lists[1:]):
- yield (x,)+y
-
-check_tokens = {
- 'section': (
- '[section]',
- [(0, 'section', None, None)]
- ),
- 'value': (
- 'value = 1',
- [(0, None, 'value', '1')]
- ),
- 'value in section': (
- '[section]\nvalue=1',
- [(0, 'section', None, None), (1, 'section', 'value', '1')]
- ),
- 'value with continuation': (
- 'names =\n Alice\n Bob',
- [(0, None, 'names', 'Alice\nBob')]
- ),
- 'value with aligned continuation': (
- 'names = Alice\n'
- ' Bob',
- [(0, None, 'names', 'Alice\nBob')]
- ),
- 'blank line':(
- '[section]\n\nvalue=1',
- [(0, 'section', None, None), (2, 'section', 'value', '1')]
- ),
- 'comment': (
- '# comment',
- []
- ),
- 'comment on value': (
- 'value = 1',
- [(0, None, 'value', '1')]
- ),
-
- 'comment on section': (
- '[section] #comment',
- [(0, 'section', None, None)]
- ),
- 'comment2': (
- '; comment',
- []
- ),
-
- 'comment2 on section': (
- '[section] ;comment',
- [(0, 'section', None, None)]
- ),
- 'pseudo section syntax in value': (
- 'name = value []',
- [(0, None, 'name', 'value []')]
- ),
- 'assignment in value': (
- 'value = x = 3',
- [(0, None, 'value', 'x = 3')]
- ),
- 'use of colon for name-values': (
- 'name: y',
- [(0, None, 'name', 'y')]
- ),
- 'use of colon without space': (
- 'value:y=5',
- [(0, None, 'value', 'y=5')]
- ),
- 'equality gets precedence': (
- 'value=xyz:5',
- [(0, None, 'value', 'xyz:5')]
- ),
-
-}
-
-def parse(input):
- # only for testing purposes - _parse() does not use state except path
- ini = object.__new__(IniConfig)
- ini.path = "sample"
- return ini._parse(input.splitlines(True))
-
-def parse_a_error(input):
- return py.test.raises(ParseError, parse, input)
-
-def test_tokenize(input, expected):
- parsed = parse(input)
- assert parsed == expected
-
-def test_parse_empty():
- parsed = parse("")
- assert not parsed
- ini = IniConfig("sample", "")
- assert not ini.sections
-
-def test_ParseError():
- e = ParseError("filename", 0, "hello")
- assert str(e) == "filename:1: hello"
-
-def test_continuation_needs_perceeding_token():
- excinfo = parse_a_error(' Foo')
- assert excinfo.value.lineno == 0
-
-def test_continuation_cant_be_after_section():
- excinfo = parse_a_error('[section]\n Foo')
- assert excinfo.value.lineno == 1
-
-def test_section_cant_be_empty():
- excinfo = parse_a_error('[]')
-
-@py.test.mark.multi(line=[
- '!!',
- ])
-def test_error_on_weird_lines(line):
- parse_a_error(line)
-
-def test_iniconfig_from_file(tmpdir):
- path = tmpdir/'test.txt'
- path.write('[metadata]\nname=1')
-
- config = IniConfig(path=path)
- assert list(config.sections) == ['metadata']
- config = IniConfig(path, "[diff]")
- assert list(config.sections) == ['diff']
- py.test.raises(TypeError, "IniConfig(data=path.read())")
-
-def test_iniconfig_section_first(tmpdir):
- excinfo = py.test.raises(ParseError, """
- IniConfig("x", data='name=1')
- """)
- assert excinfo.value.msg == "no section header defined"
-
-def test_iniconig_section_duplicate_fails():
- excinfo = py.test.raises(ParseError, r"""
- IniConfig("x", data='[section]\n[section]')
- """)
- assert 'duplicate section' in str(excinfo.value)
-
-def test_iniconfig_duplicate_key_fails():
- excinfo = py.test.raises(ParseError, r"""
- IniConfig("x", data='[section]\nname = Alice\nname = bob')
- """)
-
- assert 'duplicate name' in str(excinfo.value)
-
-def test_iniconfig_lineof():
- config = IniConfig("x.ini", data=
- '[section]\n'
- 'value = 1\n'
- '[section2]\n'
- '# comment\n'
- 'value =2'
- )
-
- assert config.lineof('missing') is None
- assert config.lineof('section') == 1
- assert config.lineof('section2') == 3
- assert config.lineof('section', 'value') == 2
- assert config.lineof('section2','value') == 5
-
- assert config['section'].lineof('value') == 2
- assert config['section2'].lineof('value') == 5
-
-def test_iniconfig_get_convert():
- config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
- assert config.get('section', 'int') == '1'
- assert config.get('section', 'int', convert=int) == 1
-
-def test_iniconfig_get_missing():
- config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
- assert config.get('section', 'missing', default=1) == 1
- assert config.get('section', 'missing') is None
-
-def test_section_get():
- config = IniConfig("x", data='[section]\nvalue=1')
- section = config['section']
- assert section.get('value', convert=int) == 1
- assert section.get('value', 1) == "1"
- assert section.get('missing', 2) == 2
-
-def test_missing_section():
- config = IniConfig("x", data='[section]\nvalue=1')
- py.test.raises(KeyError,'config["other"]')
-
-def test_section_getitem():
- config = IniConfig("x", data='[section]\nvalue=1')
- assert config['section']['value'] == '1'
- assert config['section']['value'] == '1'
-
-def test_section_iter():
- config = IniConfig("x", data='[section]\nvalue=1')
- names = list(config['section'])
- assert names == ['value']
- items = list(config['section'].items())
- assert items==[('value', '1')]
-
-def test_config_iter():
- config = IniConfig("x.ini", data=dedent('''
- [section1]
- value=1
- [section2]
- value=2
- '''))
- l = list(config)
- assert len(l) == 2
- assert l[0].name == 'section1'
- assert l[0]['value'] == '1'
- assert l[1].name == 'section2'
- assert l[1]['value'] == '2'
-
-def test_config_contains():
- config = IniConfig("x.ini", data=dedent('''
- [section1]
- value=1
- [section2]
- value=2
- '''))
- assert 'xyz' not in config
- assert 'section1' in config
- assert 'section2' in config
-
-def test_iter_file_order():
- config = IniConfig("x.ini", data="""
-[section2] #cpython dict ordered before section
-value = 1
-value2 = 2 # dict ordered before value
-[section]
-a = 1
-b = 2
-""")
- l = list(config)
- secnames = [x.name for x in l]
- assert secnames == ['section2', 'section']
- assert list(config['section2']) == ['value', 'value2']
- assert list(config['section']) == ['a', 'b']
-
-def test_example_pypirc():
- config = IniConfig("pypirc", data=dedent('''
- [distutils]
- index-servers =
- pypi
- other
-
- [pypi]
- repository: <repository-url>
- username: <username>
- password: <password>
-
- [other]
- repository: http://example.com/pypi
- username: <username>
- password: <password>
- '''))
- distutils, pypi, other = list(config)
- assert distutils["index-servers"] == "pypi\nother"
- assert pypi['repository'] == '<repository-url>'
- assert pypi['username'] == '<username>'
- assert pypi['password'] == '<password>'
- assert ['repository', 'username', 'password'] == list(other)
-
-
-def test_api_import():
- assert ALL == ['IniConfig', 'ParseError']
-
-@pytest.mark.parametrize("line", [
- "#qwe",
- " #qwe",
- ";qwe",
- " ;qwe",
-])
-def test_iscommentline_true(line):
- assert iscommentline(line)
-
-
+import py
+import pytest
+from py._iniconfig import IniConfig, ParseError, __all__ as ALL
+from py._iniconfig import iscommentline
+from textwrap import dedent
+
+def pytest_generate_tests(metafunc):
+ if 'input' in metafunc.funcargnames:
+ for name, (input, expected) in check_tokens.items():
+ metafunc.addcall(id=name, funcargs={
+ 'input': input,
+ 'expected': expected,
+ })
+ elif hasattr(metafunc.function, 'multi'):
+ kwargs = metafunc.function.multi.kwargs
+ names, values = zip(*kwargs.items())
+ values = cartesian_product(*values)
+ for p in values:
+ metafunc.addcall(funcargs=dict(zip(names, p)))
+
+def cartesian_product(L,*lists):
+ # copied from http://bit.ly/cyIXjn
+ if not lists:
+ for x in L:
+ yield (x,)
+ else:
+ for x in L:
+ for y in cartesian_product(lists[0],*lists[1:]):
+ yield (x,)+y
+
+check_tokens = {
+ 'section': (
+ '[section]',
+ [(0, 'section', None, None)]
+ ),
+ 'value': (
+ 'value = 1',
+ [(0, None, 'value', '1')]
+ ),
+ 'value in section': (
+ '[section]\nvalue=1',
+ [(0, 'section', None, None), (1, 'section', 'value', '1')]
+ ),
+ 'value with continuation': (
+ 'names =\n Alice\n Bob',
+ [(0, None, 'names', 'Alice\nBob')]
+ ),
+ 'value with aligned continuation': (
+ 'names = Alice\n'
+ ' Bob',
+ [(0, None, 'names', 'Alice\nBob')]
+ ),
+ 'blank line':(
+ '[section]\n\nvalue=1',
+ [(0, 'section', None, None), (2, 'section', 'value', '1')]
+ ),
+ 'comment': (
+ '# comment',
+ []
+ ),
+ 'comment on value': (
+ 'value = 1',
+ [(0, None, 'value', '1')]
+ ),
+
+ 'comment on section': (
+ '[section] #comment',
+ [(0, 'section', None, None)]
+ ),
+ 'comment2': (
+ '; comment',
+ []
+ ),
+
+ 'comment2 on section': (
+ '[section] ;comment',
+ [(0, 'section', None, None)]
+ ),
+ 'pseudo section syntax in value': (
+ 'name = value []',
+ [(0, None, 'name', 'value []')]
+ ),
+ 'assignment in value': (
+ 'value = x = 3',
+ [(0, None, 'value', 'x = 3')]
+ ),
+ 'use of colon for name-values': (
+ 'name: y',
+ [(0, None, 'name', 'y')]
+ ),
+ 'use of colon without space': (
+ 'value:y=5',
+ [(0, None, 'value', 'y=5')]
+ ),
+ 'equality gets precedence': (
+ 'value=xyz:5',
+ [(0, None, 'value', 'xyz:5')]
+ ),
+
+}
+
+def parse(input):
+ # only for testing purposes - _parse() does not use state except path
+ ini = object.__new__(IniConfig)
+ ini.path = "sample"
+ return ini._parse(input.splitlines(True))
+
+def parse_a_error(input):
+ return py.test.raises(ParseError, parse, input)
+
+def test_tokenize(input, expected):
+ parsed = parse(input)
+ assert parsed == expected
+
+def test_parse_empty():
+ parsed = parse("")
+ assert not parsed
+ ini = IniConfig("sample", "")
+ assert not ini.sections
+
+def test_ParseError():
+ e = ParseError("filename", 0, "hello")
+ assert str(e) == "filename:1: hello"
+
+def test_continuation_needs_perceeding_token():
+ excinfo = parse_a_error(' Foo')
+ assert excinfo.value.lineno == 0
+
+def test_continuation_cant_be_after_section():
+ excinfo = parse_a_error('[section]\n Foo')
+ assert excinfo.value.lineno == 1
+
+def test_section_cant_be_empty():
+ excinfo = parse_a_error('[]')
+
+@py.test.mark.multi(line=[
+ '!!',
+ ])
+def test_error_on_weird_lines(line):
+ parse_a_error(line)
+
+def test_iniconfig_from_file(tmpdir):
+ path = tmpdir/'test.txt'
+ path.write('[metadata]\nname=1')
+
+ config = IniConfig(path=path)
+ assert list(config.sections) == ['metadata']
+ config = IniConfig(path, "[diff]")
+ assert list(config.sections) == ['diff']
+ py.test.raises(TypeError, "IniConfig(data=path.read())")
+
+def test_iniconfig_section_first(tmpdir):
+ excinfo = py.test.raises(ParseError, """
+ IniConfig("x", data='name=1')
+ """)
+ assert "no section header defined" in str(excinfo.value)
+
+def test_iniconig_section_duplicate_fails():
+ excinfo = py.test.raises(ParseError, r"""
+ IniConfig("x", data='[section]\n[section]')
+ """)
+ assert 'duplicate section' in str(excinfo.value)
+
+def test_iniconfig_duplicate_key_fails():
+ excinfo = py.test.raises(ParseError, r"""
+ IniConfig("x", data='[section]\nname = Alice\nname = bob')
+ """)
+
+ assert 'duplicate name' in str(excinfo.value)
+
+def test_iniconfig_lineof():
+ config = IniConfig("x.ini", data=
+ '[section]\n'
+ 'value = 1\n'
+ '[section2]\n'
+ '# comment\n'
+ 'value =2'
+ )
+
+ assert config.lineof('missing') is None
+ assert config.lineof('section') == 1
+ assert config.lineof('section2') == 3
+ assert config.lineof('section', 'value') == 2
+ assert config.lineof('section2','value') == 5
+
+ assert config['section'].lineof('value') == 2
+ assert config['section2'].lineof('value') == 5
+
+def test_iniconfig_get_convert():
+ config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
+ assert config.get('section', 'int') == '1'
+ assert config.get('section', 'int', convert=int) == 1
+
+def test_iniconfig_get_missing():
+ config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
+ assert config.get('section', 'missing', default=1) == 1
+ assert config.get('section', 'missing') is None
+
+def test_section_get():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ section = config['section']
+ assert section.get('value', convert=int) == 1
+ assert section.get('value', 1) == "1"
+ assert section.get('missing', 2) == 2
+
+def test_missing_section():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ py.test.raises(KeyError,'config["other"]')
+
+def test_section_getitem():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ assert config['section']['value'] == '1'
+ assert config['section']['value'] == '1'
+
+def test_section_iter():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ names = list(config['section'])
+ assert names == ['value']
+ items = list(config['section'].items())
+ assert items==[('value', '1')]
+
+def test_config_iter():
+ config = IniConfig("x.ini", data=dedent('''
+ [section1]
+ value=1
+ [section2]
+ value=2
+ '''))
+ l = list(config)
+ assert len(l) == 2
+ assert l[0].name == 'section1'
+ assert l[0]['value'] == '1'
+ assert l[1].name == 'section2'
+ assert l[1]['value'] == '2'
+
+def test_config_contains():
+ config = IniConfig("x.ini", data=dedent('''
+ [section1]
+ value=1
+ [section2]
+ value=2
+ '''))
+ assert 'xyz' not in config
+ assert 'section1' in config
+ assert 'section2' in config
+
+def test_iter_file_order():
+ config = IniConfig("x.ini", data="""
+[section2] #cpython dict ordered before section
+value = 1
+value2 = 2 # dict ordered before value
+[section]
+a = 1
+b = 2
+""")
+ l = list(config)
+ secnames = [x.name for x in l]
+ assert secnames == ['section2', 'section']
+ assert list(config['section2']) == ['value', 'value2']
+ assert list(config['section']) == ['a', 'b']
+
+def test_example_pypirc():
+ config = IniConfig("pypirc", data=dedent('''
+ [distutils]
+ index-servers =
+ pypi
+ other
+
+ [pypi]
+ repository: <repository-url>
+ username: <username>
+ password: <password>
+
+ [other]
+ repository: http://example.com/pypi
+ username: <username>
+ password: <password>
+ '''))
+ distutils, pypi, other = list(config)
+ assert distutils["index-servers"] == "pypi\nother"
+ assert pypi['repository'] == '<repository-url>'
+ assert pypi['username'] == '<username>'
+ assert pypi['password'] == '<password>'
+ assert ['repository', 'username', 'password'] == list(other)
+
+
+def test_api_import():
+ assert ALL == ['IniConfig', 'ParseError']
+
+@pytest.mark.parametrize("line", [
+ "#qwe",
+ " #qwe",
+ ";qwe",
+ " ;qwe",
+])
+def test_iscommentline_true(line):
+ assert iscommentline(line)
+
+
diff --git a/tox.ini b/tox.ini
index f1feb66..a50329d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,39 +1,25 @@
-[tox]
-envlist=py26,py27,py33,py34,py35,external
-# py27-xdist causes problems with svn, py25 requires virtualenv==1.9.1
-#indexserver=
-# default=http://pypi.testrun.org
-
-[testenv]
-changedir=testing
-commands=py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
-deps=pytest~=2.9.0
-
-[testenv:py27-xdist]
-basepython=python2.7
-deps=
- pytest~=2.9.0
- pytest-xdist
-commands=
- py.test -n3 -rfsxX --confcutdir=.. --runslowtests \
- --junitxml={envlogdir}/junit-{envname}.xml []
-
-[testenv:jython]
-changedir=testing
-commands=
- {envpython} -m pytest --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}0.xml {posargs:io_ code}
-
-[testenv:py25]
-setenv = PIP_INSECURE=1
-
-[testenv:external]
-deps=
- pytest
- jinja2
- decorator
-commands=
- py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml {posargs:code}
-
-[pytest]
-rsyncdirs = conftest.py py doc testing
-addopts = -rxXf
+[tox]
+envlist=py26,py27,py33,py34,py35
+
+[testenv]
+changedir=testing
+commands=py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
+deps=pytest~=2.9.0
+
+[testenv:py27-xdist]
+basepython=python2.7
+deps=
+ pytest~=2.9.0
+ pytest-xdist
+commands=
+ py.test -n3 -rfsxX --confcutdir=.. --runslowtests \
+ --junitxml={envlogdir}/junit-{envname}.xml []
+
+[testenv:jython]
+changedir=testing
+commands=
+ {envpython} -m pytest --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}0.xml {posargs:io_ code}
+
+[pytest]
+rsyncdirs = conftest.py py doc testing
+addopts = -ra