Bug 1382362 - Update pytest to v3.1.3 and py to v1.4.34, r?davehunt draft
authorAndrew Halberstadt <ahalberstadt@mozilla.com>
Wed, 19 Jul 2017 16:50:57 -0400
changeset 612458 82bfed4f5f556c3eb6e1f34fb8d739075b9f3cd7
parent 612171 68046a58f82913eb7804e4796ec981f6f8ea490e
child 638400 88f1380e793a5ef1d161762e3fde901d73326acd
push id69489
push userahalberstadt@mozilla.com
push dateThu, 20 Jul 2017 17:43:28 +0000
reviewersdavehunt
bugs1382362
milestone56.0a1
Bug 1382362 - Update pytest to v3.1.3 and py to v1.4.34, r?davehunt This patch was generated by something similar to: $ cd third_party/python $ hg rm pytest/* py/* $ pip wheel pytest $ unzip pytest.whl $ unzip py.whl $ hg add pytest/* py/* MozReview-Commit-ID: 3LKVrbKfMgK
third_party/python/py/AUTHORS
third_party/python/py/LICENSE
third_party/python/py/MANIFEST.in
third_party/python/py/PKG-INFO
third_party/python/py/README.txt
third_party/python/py/py/__init__.py
third_party/python/py/py/__metainfo.py
third_party/python/py/py/_apipkg.py
third_party/python/py/py/_builtin.py
third_party/python/py/py/_code/__init__.py
third_party/python/py/py/_code/_assertionnew.py
third_party/python/py/py/_code/_assertionold.py
third_party/python/py/py/_code/_py2traceback.py
third_party/python/py/py/_code/assertion.py
third_party/python/py/py/_code/code.py
third_party/python/py/py/_code/source.py
third_party/python/py/py/_error.py
third_party/python/py/py/_iniconfig.py
third_party/python/py/py/_io/__init__.py
third_party/python/py/py/_io/capture.py
third_party/python/py/py/_io/saferepr.py
third_party/python/py/py/_io/terminalwriter.py
third_party/python/py/py/_log/__init__.py
third_party/python/py/py/_log/log.py
third_party/python/py/py/_log/warning.py
third_party/python/py/py/_path/__init__.py
third_party/python/py/py/_path/cacheutil.py
third_party/python/py/py/_path/common.py
third_party/python/py/py/_path/local.py
third_party/python/py/py/_path/svnurl.py
third_party/python/py/py/_path/svnwc.py
third_party/python/py/py/_process/__init__.py
third_party/python/py/py/_process/cmdexec.py
third_party/python/py/py/_process/forkedfunc.py
third_party/python/py/py/_process/killproc.py
third_party/python/py/py/_std.py
third_party/python/py/py/_xmlgen.py
third_party/python/py/py/test.py
third_party/python/py/setup.cfg
third_party/python/py/setup.py
third_party/python/pytest/.coveragerc
third_party/python/pytest/AUTHORS
third_party/python/pytest/LICENSE
third_party/python/pytest/MANIFEST.in
third_party/python/pytest/PKG-INFO
third_party/python/pytest/README.rst
third_party/python/pytest/_pytest/__init__.py
third_party/python/pytest/_pytest/_argcomplete.py
third_party/python/pytest/_pytest/_code/__init__.py
third_party/python/pytest/_pytest/_code/_py2traceback.py
third_party/python/pytest/_pytest/_code/code.py
third_party/python/pytest/_pytest/_code/source.py
third_party/python/pytest/_pytest/_pluggy.py
third_party/python/pytest/_pytest/_version.py
third_party/python/pytest/_pytest/assertion/__init__.py
third_party/python/pytest/_pytest/assertion/reinterpret.py
third_party/python/pytest/_pytest/assertion/rewrite.py
third_party/python/pytest/_pytest/assertion/truncate.py
third_party/python/pytest/_pytest/assertion/util.py
third_party/python/pytest/_pytest/cacheprovider.py
third_party/python/pytest/_pytest/capture.py
third_party/python/pytest/_pytest/compat.py
third_party/python/pytest/_pytest/config.py
third_party/python/pytest/_pytest/debugging.py
third_party/python/pytest/_pytest/deprecated.py
third_party/python/pytest/_pytest/doctest.py
third_party/python/pytest/_pytest/fixtures.py
third_party/python/pytest/_pytest/freeze_support.py
third_party/python/pytest/_pytest/genscript.py
third_party/python/pytest/_pytest/helpconfig.py
third_party/python/pytest/_pytest/hookspec.py
third_party/python/pytest/_pytest/junitxml.py
third_party/python/pytest/_pytest/main.py
third_party/python/pytest/_pytest/mark.py
third_party/python/pytest/_pytest/monkeypatch.py
third_party/python/pytest/_pytest/nose.py
third_party/python/pytest/_pytest/pastebin.py
third_party/python/pytest/_pytest/pdb.py
third_party/python/pytest/_pytest/pytester.py
third_party/python/pytest/_pytest/python.py
third_party/python/pytest/_pytest/recwarn.py
third_party/python/pytest/_pytest/resultlog.py
third_party/python/pytest/_pytest/runner.py
third_party/python/pytest/_pytest/setuponly.py
third_party/python/pytest/_pytest/setupplan.py
third_party/python/pytest/_pytest/skipping.py
third_party/python/pytest/_pytest/standalonetemplate.py
third_party/python/pytest/_pytest/terminal.py
third_party/python/pytest/_pytest/tmpdir.py
third_party/python/pytest/_pytest/unittest.py
third_party/python/pytest/_pytest/vendored_packages/README.md
third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
third_party/python/pytest/_pytest/vendored_packages/pluggy.py
third_party/python/pytest/_pytest/warnings.py
third_party/python/pytest/pytest.py
third_party/python/pytest/setup.cfg
third_party/python/pytest/setup.py
deleted file mode 100644
--- a/third_party/python/py/AUTHORS
+++ /dev/null
@@ -1,24 +0,0 @@
-Holger Krekel, holger at merlinux eu
-Benjamin Peterson, benjamin at python org
-Ronny Pfannschmidt, Ronny.Pfannschmidt at gmx de
-Guido Wesdorp, johnny at johnnydebris net 
-Samuele Pedroni, pedronis at openend se 
-Carl Friedrich Bolz, cfbolz at gmx de
-Armin Rigo, arigo at tunes org 
-Maciek Fijalkowski, fijal at genesilico pl
-Brian Dorsey, briandorsey at gmail com 
-Floris Bruynooghe, flub at devork be
-merlinux GmbH, Germany, office at merlinux eu
-
-Contributors include:: 
-
-Ross Lawley
-Ralf Schmitt
-Chris Lamb 
-Harald Armin Massa
-Martijn Faassen
-Ian Bicking 
-Jan Balster
-Grig Gheorghiu
-Bob Ippolito
-Christian Tismer
deleted file mode 100644
--- a/third_party/python/py/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-     
-  The above copyright notice and this permission notice shall be included in all
-  copies or substantial portions of the Software.
- 
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-  SOFTWARE.
-
deleted file mode 100644
--- a/third_party/python/py/MANIFEST.in
+++ /dev/null
@@ -1,9 +0,0 @@
-include CHANGELOG
-include AUTHORS
-include README.txt
-include setup.py
-include LICENSE
-include conftest.py
-include tox.ini
-graft doc
-graft testing
deleted file mode 100644
--- a/third_party/python/py/PKG-INFO
+++ /dev/null
@@ -1,46 +0,0 @@
-Metadata-Version: 1.1
-Name: py
-Version: 1.4.31
-Summary: library with cross-python path, ini-parsing, io, code, log facilities
-Home-page: http://pylib.readthedocs.org/
-Author: holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others
-Author-email: pytest-dev@python.org
-License: MIT license
-Description: .. image:: https://drone.io/bitbucket.org/pytest-dev/py/status.png
-           :target: https://drone.io/bitbucket.org/pytest-dev/py/latest
-        .. image:: https://pypip.in/v/py/badge.png
-           :target: https://pypi.python.org/pypi/py
-        
-        The py lib is a Python development support library featuring
-        the following tools and modules:
-        
-        * py.path:  uniform local and svn path objects
-        * py.apipkg:  explicit API control and lazy-importing
-        * py.iniconfig:  easy parsing of .ini files
-        * py.code: dynamic code generation and introspection
-        
-        NOTE: prior to the 1.4 release this distribution used to
-        contain py.test which is now its own package, see http://pytest.org
-        
-        For questions and more information please visit http://pylib.readthedocs.org
-        
-        Bugs and issues: http://bitbucket.org/pytest-dev/py/issues/
-        
-        Authors: Holger Krekel and others, 2004-2015
-        
-Platform: unix
-Platform: linux
-Platform: osx
-Platform: cygwin
-Platform: win32
-Classifier: Development Status :: 6 - Mature
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Topic :: Software Development :: Testing
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
deleted file mode 100644
--- a/third_party/python/py/README.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-.. image:: https://drone.io/bitbucket.org/pytest-dev/py/status.png
-   :target: https://drone.io/bitbucket.org/pytest-dev/py/latest
-.. image:: https://pypip.in/v/py/badge.png
-   :target: https://pypi.python.org/pypi/py
-
-The py lib is a Python development support library featuring
-the following tools and modules:
-
-* py.path:  uniform local and svn path objects
-* py.apipkg:  explicit API control and lazy-importing
-* py.iniconfig:  easy parsing of .ini files
-* py.code: dynamic code generation and introspection
-
-NOTE: prior to the 1.4 release this distribution used to
-contain py.test which is now its own package, see http://pytest.org
-
-For questions and more information please visit http://pylib.readthedocs.org
-
-Bugs and issues: http://bitbucket.org/pytest-dev/py/issues/
-
-Authors: Holger Krekel and others, 2004-2015
--- a/third_party/python/py/py/__init__.py
+++ b/third_party/python/py/py/__init__.py
@@ -1,150 +1,152 @@
-"""
-py.test and pylib: rapid testing and development utils
-
-this module uses apipkg.py for lazy-loading sub modules
-and classes.  The initpkg-dictionary  below specifies
-name->value mappings where value can be another namespace
-dictionary or an import path.
-
-(c) Holger Krekel and others, 2004-2014
-"""
-__version__ = '1.4.31'
-
-from py import _apipkg
-
-# so that py.error.* instances are picklable
-import sys
-sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error')
-
-_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={
-    # access to all standard lib modules
-    'std': '._std:std',
-    # access to all posix errno's as classes
-    'error': '._error:error',
-
-    '_pydir' : '.__metainfo:pydir',
-    'version': 'py:__version__', # backward compatibility
-
-    # pytest-2.0 has a flat namespace, we use alias modules
-    # to keep old references compatible
-    'test' : 'pytest',
-    'test.collect' : 'pytest',
-    'test.cmdline' : 'pytest',
-
-    # hook into the top-level standard library
-    'process' : {
-        '__doc__'        : '._process:__doc__',
-        'cmdexec'        : '._process.cmdexec:cmdexec',
-        'kill'           : '._process.killproc:kill',
-        'ForkedFunc'     : '._process.forkedfunc:ForkedFunc',
-    },
-
-    'apipkg' : {
-        'initpkg'   : '._apipkg:initpkg',
-        'ApiModule' : '._apipkg:ApiModule',
-    },
-
-    'iniconfig' : {
-        'IniConfig'      : '._iniconfig:IniConfig',
-        'ParseError'     : '._iniconfig:ParseError',
-    },
-
-    'path' : {
-        '__doc__'        : '._path:__doc__',
-        'svnwc'          : '._path.svnwc:SvnWCCommandPath',
-        'svnurl'         : '._path.svnurl:SvnCommandPath',
-        'local'          : '._path.local:LocalPath',
-        'SvnAuth'        : '._path.svnwc:SvnAuth',
-    },
-
-    # python inspection/code-generation API
-    'code' : {
-        '__doc__'           : '._code:__doc__',
-        'compile'           : '._code.source:compile_',
-        'Source'            : '._code.source:Source',
-        'Code'              : '._code.code:Code',
-        'Frame'             : '._code.code:Frame',
-        'ExceptionInfo'     : '._code.code:ExceptionInfo',
-        'Traceback'         : '._code.code:Traceback',
-        'getfslineno'       : '._code.source:getfslineno',
-        'getrawcode'        : '._code.code:getrawcode',
-        'patch_builtins'    : '._code.code:patch_builtins',
-        'unpatch_builtins'  : '._code.code:unpatch_builtins',
-        '_AssertionError'   : '._code.assertion:AssertionError',
-        '_reinterpret_old'  : '._code.assertion:reinterpret_old',
-        '_reinterpret'      : '._code.assertion:reinterpret',
-        '_reprcompare'      : '._code.assertion:_reprcompare',
-        '_format_explanation' : '._code.assertion:_format_explanation',
-    },
-
-    # backports and additions of builtins
-    'builtin' : {
-        '__doc__'        : '._builtin:__doc__',
-        'enumerate'      : '._builtin:enumerate',
-        'reversed'       : '._builtin:reversed',
-        'sorted'         : '._builtin:sorted',
-        'any'            : '._builtin:any',
-        'all'            : '._builtin:all',
-        'set'            : '._builtin:set',
-        'frozenset'      : '._builtin:frozenset',
-        'BaseException'  : '._builtin:BaseException',
-        'GeneratorExit'  : '._builtin:GeneratorExit',
-        '_sysex'         : '._builtin:_sysex',
-        'print_'         : '._builtin:print_',
-        '_reraise'       : '._builtin:_reraise',
-        '_tryimport'     : '._builtin:_tryimport',
-        'exec_'          : '._builtin:exec_',
-        '_basestring'    : '._builtin:_basestring',
-        '_totext'        : '._builtin:_totext',
-        '_isbytes'       : '._builtin:_isbytes',
-        '_istext'        : '._builtin:_istext',
-        '_getimself'     : '._builtin:_getimself',
-        '_getfuncdict'   : '._builtin:_getfuncdict',
-        '_getcode'       : '._builtin:_getcode',
-        'builtins'       : '._builtin:builtins',
-        'execfile'       : '._builtin:execfile',
-        'callable'       : '._builtin:callable',
-        'bytes'       : '._builtin:bytes',
-        'text'       : '._builtin:text',
-    },
-
-    # input-output helping
-    'io' : {
-        '__doc__'             : '._io:__doc__',
-        'dupfile'             : '._io.capture:dupfile',
-        'TextIO'              : '._io.capture:TextIO',
-        'BytesIO'             : '._io.capture:BytesIO',
-        'FDCapture'           : '._io.capture:FDCapture',
-        'StdCapture'          : '._io.capture:StdCapture',
-        'StdCaptureFD'        : '._io.capture:StdCaptureFD',
-        'TerminalWriter'      : '._io.terminalwriter:TerminalWriter',
-        'ansi_print'          : '._io.terminalwriter:ansi_print',
-        'get_terminal_width'  : '._io.terminalwriter:get_terminal_width',
-        'saferepr'            : '._io.saferepr:saferepr',
-    },
-
-    # small and mean xml/html generation
-    'xml' : {
-        '__doc__'            : '._xmlgen:__doc__',
-        'html'               : '._xmlgen:html',
-        'Tag'                : '._xmlgen:Tag',
-        'raw'                : '._xmlgen:raw',
-        'Namespace'          : '._xmlgen:Namespace',
-        'escape'             : '._xmlgen:escape',
-    },
-
-    'log' : {
-        # logging API ('producers' and 'consumers' connected via keywords)
-        '__doc__'            : '._log:__doc__',
-        '_apiwarn'           : '._log.warning:_apiwarn',
-        'Producer'           : '._log.log:Producer',
-        'setconsumer'        : '._log.log:setconsumer',
-        '_setstate'          : '._log.log:setstate',
-        '_getstate'          : '._log.log:getstate',
-        'Path'               : '._log.log:Path',
-        'STDOUT'             : '._log.log:STDOUT',
-        'STDERR'             : '._log.log:STDERR',
-        'Syslog'             : '._log.log:Syslog',
-    },
-
-})
+"""
+pylib: rapid testing and development utils
+
+this module uses apipkg.py for lazy-loading sub modules
+and classes.  The initpkg-dictionary  below specifies
+name->value mappings where value can be another namespace
+dictionary or an import path.
+
+(c) Holger Krekel and others, 2004-2014
+"""
+__version__ = '1.4.34'
+
+from py import _apipkg
+
+# so that py.error.* instances are picklable
+import sys
+sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error')
+import py.error  # "Dereference" it now just to be safe (issue110)
+
+
+_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={
+    # access to all standard lib modules
+    'std': '._std:std',
+    # access to all posix errno's as classes
+    'error': '._error:error',
+
+    '_pydir' : '.__metainfo:pydir',
+    'version': 'py:__version__', # backward compatibility
+
+    # pytest-2.0 has a flat namespace, we use alias modules
+    # to keep old references compatible
+    'test' : 'pytest',
+    'test.collect' : 'pytest',
+    'test.cmdline' : 'pytest',
+
+    # hook into the top-level standard library
+    'process' : {
+        '__doc__'        : '._process:__doc__',
+        'cmdexec'        : '._process.cmdexec:cmdexec',
+        'kill'           : '._process.killproc:kill',
+        'ForkedFunc'     : '._process.forkedfunc:ForkedFunc',
+    },
+
+    'apipkg' : {
+        'initpkg'   : '._apipkg:initpkg',
+        'ApiModule' : '._apipkg:ApiModule',
+    },
+
+    'iniconfig' : {
+        'IniConfig'      : '._iniconfig:IniConfig',
+        'ParseError'     : '._iniconfig:ParseError',
+    },
+
+    'path' : {
+        '__doc__'        : '._path:__doc__',
+        'svnwc'          : '._path.svnwc:SvnWCCommandPath',
+        'svnurl'         : '._path.svnurl:SvnCommandPath',
+        'local'          : '._path.local:LocalPath',
+        'SvnAuth'        : '._path.svnwc:SvnAuth',
+    },
+
+    # python inspection/code-generation API
+    'code' : {
+        '__doc__'           : '._code:__doc__',
+        'compile'           : '._code.source:compile_',
+        'Source'            : '._code.source:Source',
+        'Code'              : '._code.code:Code',
+        'Frame'             : '._code.code:Frame',
+        'ExceptionInfo'     : '._code.code:ExceptionInfo',
+        'Traceback'         : '._code.code:Traceback',
+        'getfslineno'       : '._code.source:getfslineno',
+        'getrawcode'        : '._code.code:getrawcode',
+        'patch_builtins'    : '._code.code:patch_builtins',
+        'unpatch_builtins'  : '._code.code:unpatch_builtins',
+        '_AssertionError'   : '._code.assertion:AssertionError',
+        '_reinterpret_old'  : '._code.assertion:reinterpret_old',
+        '_reinterpret'      : '._code.assertion:reinterpret',
+        '_reprcompare'      : '._code.assertion:_reprcompare',
+        '_format_explanation' : '._code.assertion:_format_explanation',
+    },
+
+    # backports and additions of builtins
+    'builtin' : {
+        '__doc__'        : '._builtin:__doc__',
+        'enumerate'      : '._builtin:enumerate',
+        'reversed'       : '._builtin:reversed',
+        'sorted'         : '._builtin:sorted',
+        'any'            : '._builtin:any',
+        'all'            : '._builtin:all',
+        'set'            : '._builtin:set',
+        'frozenset'      : '._builtin:frozenset',
+        'BaseException'  : '._builtin:BaseException',
+        'GeneratorExit'  : '._builtin:GeneratorExit',
+        '_sysex'         : '._builtin:_sysex',
+        'print_'         : '._builtin:print_',
+        '_reraise'       : '._builtin:_reraise',
+        '_tryimport'     : '._builtin:_tryimport',
+        'exec_'          : '._builtin:exec_',
+        '_basestring'    : '._builtin:_basestring',
+        '_totext'        : '._builtin:_totext',
+        '_isbytes'       : '._builtin:_isbytes',
+        '_istext'        : '._builtin:_istext',
+        '_getimself'     : '._builtin:_getimself',
+        '_getfuncdict'   : '._builtin:_getfuncdict',
+        '_getcode'       : '._builtin:_getcode',
+        'builtins'       : '._builtin:builtins',
+        'execfile'       : '._builtin:execfile',
+        'callable'       : '._builtin:callable',
+        'bytes'       : '._builtin:bytes',
+        'text'       : '._builtin:text',
+    },
+
+    # input-output helping
+    'io' : {
+        '__doc__'             : '._io:__doc__',
+        'dupfile'             : '._io.capture:dupfile',
+        'TextIO'              : '._io.capture:TextIO',
+        'BytesIO'             : '._io.capture:BytesIO',
+        'FDCapture'           : '._io.capture:FDCapture',
+        'StdCapture'          : '._io.capture:StdCapture',
+        'StdCaptureFD'        : '._io.capture:StdCaptureFD',
+        'TerminalWriter'      : '._io.terminalwriter:TerminalWriter',
+        'ansi_print'          : '._io.terminalwriter:ansi_print',
+        'get_terminal_width'  : '._io.terminalwriter:get_terminal_width',
+        'saferepr'            : '._io.saferepr:saferepr',
+    },
+
+    # small and mean xml/html generation
+    'xml' : {
+        '__doc__'            : '._xmlgen:__doc__',
+        'html'               : '._xmlgen:html',
+        'Tag'                : '._xmlgen:Tag',
+        'raw'                : '._xmlgen:raw',
+        'Namespace'          : '._xmlgen:Namespace',
+        'escape'             : '._xmlgen:escape',
+    },
+
+    'log' : {
+        # logging API ('producers' and 'consumers' connected via keywords)
+        '__doc__'            : '._log:__doc__',
+        '_apiwarn'           : '._log.warning:_apiwarn',
+        'Producer'           : '._log.log:Producer',
+        'setconsumer'        : '._log.log:setconsumer',
+        '_setstate'          : '._log.log:setstate',
+        '_getstate'          : '._log.log:getstate',
+        'Path'               : '._log.log:Path',
+        'STDOUT'             : '._log.log:STDOUT',
+        'STDERR'             : '._log.log:STDERR',
+        'Syslog'             : '._log.log:Syslog',
+    },
+
+})
--- a/third_party/python/py/py/__metainfo.py
+++ b/third_party/python/py/py/__metainfo.py
@@ -1,2 +1,2 @@
-import py
-pydir = py.path.local(py.__file__).dirpath()
+import py
+pydir = py.path.local(py.__file__).dirpath()
--- a/third_party/python/py/py/_apipkg.py
+++ b/third_party/python/py/py/_apipkg.py
@@ -1,181 +1,181 @@
-"""
-apipkg: control the exported namespace of a python package.
-
-see http://pypi.python.org/pypi/apipkg
-
-(c) holger krekel, 2009 - MIT license
-"""
-import os
-import sys
-from types import ModuleType
-
-__version__ = '1.3.dev'
-
-def _py_abspath(path):
-    """
-    special version of abspath
-    that will leave paths from jython jars alone
-    """
-    if path.startswith('__pyclasspath__'):
-
-        return path
-    else:
-        return os.path.abspath(path)
-
-def initpkg(pkgname, exportdefs, attr=dict()):
-    """ initialize given package from the export definitions. """
-    oldmod = sys.modules.get(pkgname)
-    d = {}
-    f = getattr(oldmod, '__file__', None)
-    if f:
-        f = _py_abspath(f)
-    d['__file__'] = f
-    if hasattr(oldmod, '__version__'):
-        d['__version__'] = oldmod.__version__
-    if hasattr(oldmod, '__loader__'):
-        d['__loader__'] = oldmod.__loader__
-    if hasattr(oldmod, '__path__'):
-        d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
-    if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
-        d['__doc__'] = oldmod.__doc__
-    d.update(attr)
-    if hasattr(oldmod, "__dict__"):
-        oldmod.__dict__.update(d)
-    mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
-    sys.modules[pkgname] = mod
-
-def importobj(modpath, attrname):
-    module = __import__(modpath, None, None, ['__doc__'])
-    if not attrname:
-        return module
-
-    retval = module
-    names = attrname.split(".")
-    for x in names:
-        retval = getattr(retval, x)
-    return retval
-
-class ApiModule(ModuleType):
-    def __docget(self):
-        try:
-            return self.__doc
-        except AttributeError:
-            if '__doc__' in self.__map__:
-                return self.__makeattr('__doc__')
-    def __docset(self, value):
-        self.__doc = value
-    __doc__ = property(__docget, __docset)
-
-    def __init__(self, name, importspec, implprefix=None, attr=None):
-        self.__name__ = name
-        self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
-        self.__map__ = {}
-        self.__implprefix__ = implprefix or name
-        if attr:
-            for name, val in attr.items():
-                # print "setting", self.__name__, name, val
-                setattr(self, name, val)
-        for name, importspec in importspec.items():
-            if isinstance(importspec, dict):
-                subname = '%s.%s' % (self.__name__, name)
-                apimod = ApiModule(subname, importspec, implprefix)
-                sys.modules[subname] = apimod
-                setattr(self, name, apimod)
-            else:
-                parts = importspec.split(':')
-                modpath = parts.pop(0)
-                attrname = parts and parts[0] or ""
-                if modpath[0] == '.':
-                    modpath = implprefix + modpath
-
-                if not attrname:
-                    subname = '%s.%s' % (self.__name__, name)
-                    apimod = AliasModule(subname, modpath)
-                    sys.modules[subname] = apimod
-                    if '.' not in name:
-                        setattr(self, name, apimod)
-                else:
-                    self.__map__[name] = (modpath, attrname)
-
-    def __repr__(self):
-        l = []
-        if hasattr(self, '__version__'):
-            l.append("version=" + repr(self.__version__))
-        if hasattr(self, '__file__'):
-            l.append('from ' + repr(self.__file__))
-        if l:
-            return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
-        return '<ApiModule %r>' % (self.__name__,)
-
-    def __makeattr(self, name):
-        """lazily compute value for name or raise AttributeError if unknown."""
-        # print "makeattr", self.__name__, name
-        target = None
-        if '__onfirstaccess__' in self.__map__:
-            target = self.__map__.pop('__onfirstaccess__')
-            importobj(*target)()
-        try:
-            modpath, attrname = self.__map__[name]
-        except KeyError:
-            if target is not None and name != '__onfirstaccess__':
-                # retry, onfirstaccess might have set attrs
-                return getattr(self, name)
-            raise AttributeError(name)
-        else:
-            result = importobj(modpath, attrname)
-            setattr(self, name, result)
-            try:
-                del self.__map__[name]
-            except KeyError:
-                pass  # in a recursive-import situation a double-del can happen
-            return result
-
-    __getattr__ = __makeattr
-
-    def __dict__(self):
-        # force all the content of the module to be loaded when __dict__ is read
-        dictdescr = ModuleType.__dict__['__dict__']
-        dict = dictdescr.__get__(self)
-        if dict is not None:
-            hasattr(self, 'some')
-            for name in self.__all__:
-                try:
-                    self.__makeattr(name)
-                except AttributeError:
-                    pass
-        return dict
-    __dict__ = property(__dict__)
-
-
-def AliasModule(modname, modpath, attrname=None):
-    mod = []
-
-    def getmod():
-        if not mod:
-            x = importobj(modpath, None)
-            if attrname is not None:
-                x = getattr(x, attrname)
-            mod.append(x)
-        return mod[0]
-
-    class AliasModule(ModuleType):
-
-        def __repr__(self):
-            x = modpath
-            if attrname:
-                x += "." + attrname
-            return '<AliasModule %r for %r>' % (modname, x)
-
-        def __getattribute__(self, name):
-            try:
-                return getattr(getmod(), name)
-            except ImportError:
-                return None
-
-        def __setattr__(self, name, value):
-            setattr(getmod(), name, value)
-
-        def __delattr__(self, name):
-            delattr(getmod(), name)
-
-    return AliasModule(str(modname))
+"""
+apipkg: control the exported namespace of a python package.
+
+see http://pypi.python.org/pypi/apipkg
+
+(c) holger krekel, 2009 - MIT license
+"""
+import os
+import sys
+from types import ModuleType
+
+__version__ = '1.3.dev'
+
+def _py_abspath(path):
+    """
+    special version of abspath
+    that will leave paths from jython jars alone
+    """
+    if path.startswith('__pyclasspath__'):
+
+        return path
+    else:
+        return os.path.abspath(path)
+
+def initpkg(pkgname, exportdefs, attr=dict()):
+    """ initialize given package from the export definitions. """
+    oldmod = sys.modules.get(pkgname)
+    d = {}
+    f = getattr(oldmod, '__file__', None)
+    if f:
+        f = _py_abspath(f)
+    d['__file__'] = f
+    if hasattr(oldmod, '__version__'):
+        d['__version__'] = oldmod.__version__
+    if hasattr(oldmod, '__loader__'):
+        d['__loader__'] = oldmod.__loader__
+    if hasattr(oldmod, '__path__'):
+        d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
+    if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
+        d['__doc__'] = oldmod.__doc__
+    d.update(attr)
+    if hasattr(oldmod, "__dict__"):
+        oldmod.__dict__.update(d)
+    mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
+    sys.modules[pkgname] = mod
+
+def importobj(modpath, attrname):
+    module = __import__(modpath, None, None, ['__doc__'])
+    if not attrname:
+        return module
+
+    retval = module
+    names = attrname.split(".")
+    for x in names:
+        retval = getattr(retval, x)
+    return retval
+
+class ApiModule(ModuleType):
+    def __docget(self):
+        try:
+            return self.__doc
+        except AttributeError:
+            if '__doc__' in self.__map__:
+                return self.__makeattr('__doc__')
+    def __docset(self, value):
+        self.__doc = value
+    __doc__ = property(__docget, __docset)
+
+    def __init__(self, name, importspec, implprefix=None, attr=None):
+        self.__name__ = name
+        self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
+        self.__map__ = {}
+        self.__implprefix__ = implprefix or name
+        if attr:
+            for name, val in attr.items():
+                # print "setting", self.__name__, name, val
+                setattr(self, name, val)
+        for name, importspec in importspec.items():
+            if isinstance(importspec, dict):
+                subname = '%s.%s' % (self.__name__, name)
+                apimod = ApiModule(subname, importspec, implprefix)
+                sys.modules[subname] = apimod
+                setattr(self, name, apimod)
+            else:
+                parts = importspec.split(':')
+                modpath = parts.pop(0)
+                attrname = parts and parts[0] or ""
+                if modpath[0] == '.':
+                    modpath = implprefix + modpath
+
+                if not attrname:
+                    subname = '%s.%s' % (self.__name__, name)
+                    apimod = AliasModule(subname, modpath)
+                    sys.modules[subname] = apimod
+                    if '.' not in name:
+                        setattr(self, name, apimod)
+                else:
+                    self.__map__[name] = (modpath, attrname)
+
+    def __repr__(self):
+        l = []
+        if hasattr(self, '__version__'):
+            l.append("version=" + repr(self.__version__))
+        if hasattr(self, '__file__'):
+            l.append('from ' + repr(self.__file__))
+        if l:
+            return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
+        return '<ApiModule %r>' % (self.__name__,)
+
+    def __makeattr(self, name):
+        """lazily compute value for name or raise AttributeError if unknown."""
+        # print "makeattr", self.__name__, name
+        target = None
+        if '__onfirstaccess__' in self.__map__:
+            target = self.__map__.pop('__onfirstaccess__')
+            importobj(*target)()
+        try:
+            modpath, attrname = self.__map__[name]
+        except KeyError:
+            if target is not None and name != '__onfirstaccess__':
+                # retry, onfirstaccess might have set attrs
+                return getattr(self, name)
+            raise AttributeError(name)
+        else:
+            result = importobj(modpath, attrname)
+            setattr(self, name, result)
+            try:
+                del self.__map__[name]
+            except KeyError:
+                pass  # in a recursive-import situation a double-del can happen
+            return result
+
+    __getattr__ = __makeattr
+
+    def __dict__(self):
+        # force all the content of the module to be loaded when __dict__ is read
+        dictdescr = ModuleType.__dict__['__dict__']
+        dict = dictdescr.__get__(self)
+        if dict is not None:
+            hasattr(self, 'some')
+            for name in self.__all__:
+                try:
+                    self.__makeattr(name)
+                except AttributeError:
+                    pass
+        return dict
+    __dict__ = property(__dict__)
+
+
+def AliasModule(modname, modpath, attrname=None):
+    mod = []
+
+    def getmod():
+        if not mod:
+            x = importobj(modpath, None)
+            if attrname is not None:
+                x = getattr(x, attrname)
+            mod.append(x)
+        return mod[0]
+
+    class AliasModule(ModuleType):
+
+        def __repr__(self):
+            x = modpath
+            if attrname:
+                x += "." + attrname
+            return '<AliasModule %r for %r>' % (modname, x)
+
+        def __getattribute__(self, name):
+            try:
+                return getattr(getmod(), name)
+            except ImportError:
+                return None
+
+        def __setattr__(self, name, value):
+            setattr(getmod(), name, value)
+
+        def __delattr__(self, name):
+            delattr(getmod(), name)
+
+    return AliasModule(str(modname))
--- a/third_party/python/py/py/_builtin.py
+++ b/third_party/python/py/py/_builtin.py
@@ -1,248 +1,248 @@
-import sys
-
-try:
-    reversed = reversed
-except NameError:
-    def reversed(sequence):
-        """reversed(sequence) -> reverse iterator over values of the sequence
-
-        Return a reverse iterator
-        """
-        if hasattr(sequence, '__reversed__'):
-            return sequence.__reversed__()
-        if not hasattr(sequence, '__getitem__'):
-            raise TypeError("argument to reversed() must be a sequence")
-        return reversed_iterator(sequence)
-
-    class reversed_iterator(object):
-
-        def __init__(self, seq):
-            self.seq = seq
-            self.remaining = len(seq)
-
-        def __iter__(self):
-            return self
-
-        def next(self):
-            i = self.remaining
-            if i > 0:
-                i -= 1
-                item = self.seq[i]
-                self.remaining = i
-                return item
-            raise StopIteration
-
-        def __length_hint__(self):
-            return self.remaining
-
-try:
-    any = any
-except NameError:
-    def any(iterable):
-        for x in iterable:
-            if x:
-                return True
-        return False
-
-try:
-    all = all
-except NameError:
-    def all(iterable):
-        for x in iterable:
-            if not x:
-                return False
-        return True
-
-try:
-    sorted = sorted
-except NameError:
-    builtin_cmp = cmp # need to use cmp as keyword arg
-
-    def sorted(iterable, cmp=None, key=None, reverse=0):
-        use_cmp = None
-        if key is not None:
-            if cmp is None:
-                def use_cmp(x, y):
-                    return builtin_cmp(x[0], y[0])
-            else:
-                def use_cmp(x, y):
-                    return cmp(x[0], y[0])
-            l = [(key(element), element) for element in iterable]
-        else:
-            if cmp is not None:
-                use_cmp = cmp
-            l = list(iterable)
-        if use_cmp is not None:
-            l.sort(use_cmp)
-        else:
-            l.sort()
-        if reverse:
-            l.reverse()
-        if key is not None:
-            return [element for (_, element) in l]
-        return l
-
-try:
-    set, frozenset = set, frozenset
-except NameError:
-    from sets import set, frozenset
-
-# pass through
-enumerate = enumerate
-
-try:
-    BaseException = BaseException
-except NameError:
-    BaseException = Exception
-
-try:
-    GeneratorExit = GeneratorExit
-except NameError:
-    class GeneratorExit(Exception):
-        """ This exception is never raised, it is there to make it possible to
-        write code compatible with CPython 2.5 even in lower CPython
-        versions."""
-        pass
-    GeneratorExit.__module__ = 'exceptions'
-
-_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
-
-try:
-    callable = callable
-except NameError:
-    def callable(obj):
-        return hasattr(obj, "__call__")
-
-if sys.version_info >= (3, 0):
-    exec ("print_ = print ; exec_=exec")
-    import builtins
-
-    # some backward compatibility helpers
-    _basestring = str
-    def _totext(obj, encoding=None, errors=None):
-        if isinstance(obj, bytes):
-            if errors is None:
-                obj = obj.decode(encoding)
-            else:
-                obj = obj.decode(encoding, errors)
-        elif not isinstance(obj, str):
-            obj = str(obj)
-        return obj
-
-    def _isbytes(x):
-        return isinstance(x, bytes)
-    def _istext(x):
-        return isinstance(x, str)
-
-    text = str
-    bytes = bytes
-
-
-    def _getimself(function):
-        return getattr(function, '__self__', None)
-
-    def _getfuncdict(function):
-        return getattr(function, "__dict__", None)
-
-    def _getcode(function):
-        return getattr(function, "__code__", None)
-
-    def execfile(fn, globs=None, locs=None):
-        if globs is None:
-            back = sys._getframe(1)
-            globs = back.f_globals
-            locs = back.f_locals
-            del back
-        elif locs is None:
-            locs = globs
-        fp = open(fn, "r")
-        try:
-            source = fp.read()
-        finally:
-            fp.close()
-        co = compile(source, fn, "exec", dont_inherit=True)
-        exec_(co, globs, locs)
-
-else:
-    import __builtin__ as builtins
-    _totext = unicode
-    _basestring = basestring
-    text = unicode
-    bytes = str
-    execfile = execfile
-    callable = callable
-    def _isbytes(x):
-        return isinstance(x, str)
-    def _istext(x):
-        return isinstance(x, unicode)
-
-    def _getimself(function):
-        return getattr(function, 'im_self', None)
-
-    def _getfuncdict(function):
-        return getattr(function, "__dict__", None)
-
-    def _getcode(function):
-        try:
-            return getattr(function, "__code__")
-        except AttributeError:
-            return getattr(function, "func_code", None)
-
-    def print_(*args, **kwargs):
-        """ minimal backport of py3k print statement. """
-        sep = ' '
-        if 'sep' in kwargs:
-            sep = kwargs.pop('sep')
-        end = '\n'
-        if 'end' in kwargs:
-            end = kwargs.pop('end')
-        file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
-        if kwargs:
-            args = ", ".join([str(x) for x in kwargs])
-            raise TypeError("invalid keyword arguments: %s" % args)
-        at_start = True
-        for x in args:
-            if not at_start:
-                file.write(sep)
-            file.write(str(x))
-            at_start = False
-        file.write(end)
-
-    def exec_(obj, globals=None, locals=None):
-        """ minimal backport of py3k exec statement. """
-        __tracebackhide__ = True
-        if globals is None:
-            frame = sys._getframe(1)
-            globals = frame.f_globals
-            if locals is None:
-                locals = frame.f_locals
-        elif locals is None:
-            locals = globals
-        exec2(obj, globals, locals)
-
-if sys.version_info >= (3, 0):
-    def _reraise(cls, val, tb):
-        __tracebackhide__ = True
-        assert hasattr(val, '__traceback__')
-        raise cls.with_traceback(val, tb)
-else:
-    exec ("""
-def _reraise(cls, val, tb):
-    __tracebackhide__ = True
-    raise cls, val, tb
-def exec2(obj, globals, locals):
-    __tracebackhide__ = True
-    exec obj in globals, locals
-""")
-
-def _tryimport(*names):
-    """ return the first successfully imported module. """
-    assert names
-    for name in names:
-        try:
-            __import__(name)
-        except ImportError:
-            excinfo = sys.exc_info()
-        else:
-            return sys.modules[name]
-    _reraise(*excinfo)
+import sys
+
+try:
+    reversed = reversed
+except NameError:
+    def reversed(sequence):
+        """reversed(sequence) -> reverse iterator over values of the sequence
+
+        Return a reverse iterator
+        """
+        if hasattr(sequence, '__reversed__'):
+            return sequence.__reversed__()
+        if not hasattr(sequence, '__getitem__'):
+            raise TypeError("argument to reversed() must be a sequence")
+        return reversed_iterator(sequence)
+
+    class reversed_iterator(object):
+
+        def __init__(self, seq):
+            self.seq = seq
+            self.remaining = len(seq)
+
+        def __iter__(self):
+            return self
+
+        def next(self):
+            i = self.remaining
+            if i > 0:
+                i -= 1
+                item = self.seq[i]
+                self.remaining = i
+                return item
+            raise StopIteration
+
+        def __length_hint__(self):
+            return self.remaining
+
+try:
+    any = any
+except NameError:
+    def any(iterable):
+        for x in iterable:
+            if x:
+                return True
+        return False
+
+try:
+    all = all
+except NameError:
+    def all(iterable):
+        for x in iterable:
+            if not x:
+                return False
+        return True
+
+try:
+    sorted = sorted
+except NameError:
+    builtin_cmp = cmp # need to use cmp as keyword arg
+
+    def sorted(iterable, cmp=None, key=None, reverse=0):
+        use_cmp = None
+        if key is not None:
+            if cmp is None:
+                def use_cmp(x, y):
+                    return builtin_cmp(x[0], y[0])
+            else:
+                def use_cmp(x, y):
+                    return cmp(x[0], y[0])
+            l = [(key(element), element) for element in iterable]
+        else:
+            if cmp is not None:
+                use_cmp = cmp
+            l = list(iterable)
+        if use_cmp is not None:
+            l.sort(use_cmp)
+        else:
+            l.sort()
+        if reverse:
+            l.reverse()
+        if key is not None:
+            return [element for (_, element) in l]
+        return l
+
+try:
+    set, frozenset = set, frozenset
+except NameError:
+    from sets import set, frozenset
+
+# pass through
+enumerate = enumerate
+
+try:
+    BaseException = BaseException
+except NameError:
+    BaseException = Exception
+
+try:
+    GeneratorExit = GeneratorExit
+except NameError:
+    class GeneratorExit(Exception):
+        """ This exception is never raised, it is there to make it possible to
+        write code compatible with CPython 2.5 even in lower CPython
+        versions."""
+        pass
+    GeneratorExit.__module__ = 'exceptions'
+
+_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return hasattr(obj, "__call__")
+
+if sys.version_info >= (3, 0):
+    exec ("print_ = print ; exec_=exec")
+    import builtins
+
+    # some backward compatibility helpers
+    _basestring = str
+    def _totext(obj, encoding=None, errors=None):
+        if isinstance(obj, bytes):
+            if errors is None:
+                obj = obj.decode(encoding)
+            else:
+                obj = obj.decode(encoding, errors)
+        elif not isinstance(obj, str):
+            obj = str(obj)
+        return obj
+
+    def _isbytes(x):
+        return isinstance(x, bytes)
+    def _istext(x):
+        return isinstance(x, str)
+
+    text = str
+    bytes = bytes
+
+
+    def _getimself(function):
+        return getattr(function, '__self__', None)
+
+    def _getfuncdict(function):
+        return getattr(function, "__dict__", None)
+
+    def _getcode(function):
+        return getattr(function, "__code__", None)
+
+    def execfile(fn, globs=None, locs=None):
+        if globs is None:
+            back = sys._getframe(1)
+            globs = back.f_globals
+            locs = back.f_locals
+            del back
+        elif locs is None:
+            locs = globs
+        fp = open(fn, "r")
+        try:
+            source = fp.read()
+        finally:
+            fp.close()
+        co = compile(source, fn, "exec", dont_inherit=True)
+        exec_(co, globs, locs)
+
+else:
+    import __builtin__ as builtins
+    _totext = unicode
+    _basestring = basestring
+    text = unicode
+    bytes = str
+    execfile = execfile
+    callable = callable
+    def _isbytes(x):
+        return isinstance(x, str)
+    def _istext(x):
+        return isinstance(x, unicode)
+
+    def _getimself(function):
+        return getattr(function, 'im_self', None)
+
+    def _getfuncdict(function):
+        return getattr(function, "__dict__", None)
+
+    def _getcode(function):
+        try:
+            return getattr(function, "__code__")
+        except AttributeError:
+            return getattr(function, "func_code", None)
+
+    def print_(*args, **kwargs):
+        """ minimal backport of py3k print statement. """
+        sep = ' '
+        if 'sep' in kwargs:
+            sep = kwargs.pop('sep')
+        end = '\n'
+        if 'end' in kwargs:
+            end = kwargs.pop('end')
+        file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
+        if kwargs:
+            args = ", ".join([str(x) for x in kwargs])
+            raise TypeError("invalid keyword arguments: %s" % args)
+        at_start = True
+        for x in args:
+            if not at_start:
+                file.write(sep)
+            file.write(str(x))
+            at_start = False
+        file.write(end)
+
+    def exec_(obj, globals=None, locals=None):
+        """ minimal backport of py3k exec statement. """
+        __tracebackhide__ = True
+        if globals is None:
+            frame = sys._getframe(1)
+            globals = frame.f_globals
+            if locals is None:
+                locals = frame.f_locals
+        elif locals is None:
+            locals = globals
+        exec2(obj, globals, locals)
+
+if sys.version_info >= (3, 0):
+    def _reraise(cls, val, tb):
+        __tracebackhide__ = True
+        assert hasattr(val, '__traceback__')
+        raise cls.with_traceback(val, tb)
+else:
+    exec ("""
+def _reraise(cls, val, tb):
+    __tracebackhide__ = True
+    raise cls, val, tb
+def exec2(obj, globals, locals):
+    __tracebackhide__ = True
+    exec obj in globals, locals
+""")
+
+def _tryimport(*names):
+    """ return the first successfully imported module. """
+    assert names
+    for name in names:
+        try:
+            __import__(name)
+        except ImportError:
+            excinfo = sys.exc_info()
+        else:
+            return sys.modules[name]
+    _reraise(*excinfo)
--- a/third_party/python/py/py/_code/__init__.py
+++ b/third_party/python/py/py/_code/__init__.py
@@ -1,1 +1,1 @@
-""" python inspection/code generation API """
+""" python inspection/code generation API """
--- a/third_party/python/py/py/_code/_assertionnew.py
+++ b/third_party/python/py/py/_code/_assertionnew.py
@@ -1,339 +1,339 @@
-"""
-Find intermediate evalutation results in assert statements through builtin AST.
-This should replace _assertionold.py eventually.
-"""
-
-import sys
-import ast
-
-import py
-from py._code.assertion import _format_explanation, BuiltinAssertionError
-
-
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
-    # See http://bugs.jython.org/issue1497
-    _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
-              "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
-              "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
-              "List", "Tuple")
-    _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
-              "AugAssign", "Print", "For", "While", "If", "With", "Raise",
-              "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
-              "Exec", "Global", "Expr", "Pass", "Break", "Continue")
-    _expr_nodes = set(getattr(ast, name) for name in _exprs)
-    _stmt_nodes = set(getattr(ast, name) for name in _stmts)
-    def _is_ast_expr(node):
-        return node.__class__ in _expr_nodes
-    def _is_ast_stmt(node):
-        return node.__class__ in _stmt_nodes
-else:
-    def _is_ast_expr(node):
-        return isinstance(node, ast.expr)
-    def _is_ast_stmt(node):
-        return isinstance(node, ast.stmt)
-
-
-class Failure(Exception):
-    """Error found while interpreting AST."""
-
-    def __init__(self, explanation=""):
-        self.cause = sys.exc_info()
-        self.explanation = explanation
-
-
-def interpret(source, frame, should_fail=False):
-    mod = ast.parse(source)
-    visitor = DebugInterpreter(frame)
-    try:
-        visitor.visit(mod)
-    except Failure:
-        failure = sys.exc_info()[1]
-        return getfailure(failure)
-    if should_fail:
-        return ("(assertion failed, but when it was re-run for "
-                "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --no-assert)")
-
-def run(offending_line, frame=None):
-    if frame is None:
-        frame = py.code.Frame(sys._getframe(1))
-    return interpret(offending_line, frame)
-
-def getfailure(failure):
-    explanation = _format_explanation(failure.explanation)
-    value = failure.cause[1]
-    if str(value):
-        lines = explanation.splitlines()
-        if not lines:
-            lines.append("")
-        lines[0] += " << %s" % (value,)
-        explanation = "\n".join(lines)
-    text = "%s: %s" % (failure.cause[0].__name__, explanation)
-    if text.startswith("AssertionError: assert "):
-        text = text[16:]
-    return text
-
-
-operator_map = {
-    ast.BitOr : "|",
-    ast.BitXor : "^",
-    ast.BitAnd : "&",
-    ast.LShift : "<<",
-    ast.RShift : ">>",
-    ast.Add : "+",
-    ast.Sub : "-",
-    ast.Mult : "*",
-    ast.Div : "/",
-    ast.FloorDiv : "//",
-    ast.Mod : "%",
-    ast.Eq : "==",
-    ast.NotEq : "!=",
-    ast.Lt : "<",
-    ast.LtE : "<=",
-    ast.Gt : ">",
-    ast.GtE : ">=",
-    ast.Pow : "**",
-    ast.Is : "is",
-    ast.IsNot : "is not",
-    ast.In : "in",
-    ast.NotIn : "not in"
-}
-
-unary_map = {
-    ast.Not : "not %s",
-    ast.Invert : "~%s",
-    ast.USub : "-%s",
-    ast.UAdd : "+%s"
-}
-
-
-class DebugInterpreter(ast.NodeVisitor):
-    """Interpret AST nodes to gleam useful debugging information. """
-
-    def __init__(self, frame):
-        self.frame = frame
-
-    def generic_visit(self, node):
-        # Fallback when we don't have a special implementation.
-        if _is_ast_expr(node):
-            mod = ast.Expression(node)
-            co = self._compile(mod)
-            try:
-                result = self.frame.eval(co)
-            except Exception:
-                raise Failure()
-            explanation = self.frame.repr(result)
-            return explanation, result
-        elif _is_ast_stmt(node):
-            mod = ast.Module([node])
-            co = self._compile(mod, "exec")
-            try:
-                self.frame.exec_(co)
-            except Exception:
-                raise Failure()
-            return None, None
-        else:
-            raise AssertionError("can't handle %s" %(node,))
-
-    def _compile(self, source, mode="eval"):
-        return compile(source, "<assertion interpretation>", mode)
-
-    def visit_Expr(self, expr):
-        return self.visit(expr.value)
-
-    def visit_Module(self, mod):
-        for stmt in mod.body:
-            self.visit(stmt)
-
-    def visit_Name(self, name):
-        explanation, result = self.generic_visit(name)
-        # See if the name is local.
-        source = "%r in locals() is not globals()" % (name.id,)
-        co = self._compile(source)
-        try:
-            local = self.frame.eval(co)
-        except Exception:
-            # have to assume it isn't
-            local = False
-        if not local:
-            return name.id, result
-        return explanation, result
-
-    def visit_Compare(self, comp):
-        left = comp.left
-        left_explanation, left_result = self.visit(left)
-        for op, next_op in zip(comp.ops, comp.comparators):
-            next_explanation, next_result = self.visit(next_op)
-            op_symbol = operator_map[op.__class__]
-            explanation = "%s %s %s" % (left_explanation, op_symbol,
-                                        next_explanation)
-            source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
-            co = self._compile(source)
-            try:
-                result = self.frame.eval(co, __exprinfo_left=left_result,
-                                         __exprinfo_right=next_result)
-            except Exception:
-                raise Failure(explanation)
-            try:
-                if not result:
-                    break
-            except KeyboardInterrupt:
-                raise
-            except:
-                break
-            left_explanation, left_result = next_explanation, next_result
-
-        rcomp = py.code._reprcompare
-        if rcomp:
-            res = rcomp(op_symbol, left_result, next_result)
-            if res:
-                explanation = res
-        return explanation, result
-
-    def visit_BoolOp(self, boolop):
-        is_or = isinstance(boolop.op, ast.Or)
-        explanations = []
-        for operand in boolop.values:
-            explanation, result = self.visit(operand)
-            explanations.append(explanation)
-            if result == is_or:
-                break
-        name = is_or and " or " or " and "
-        explanation = "(" + name.join(explanations) + ")"
-        return explanation, result
-
-    def visit_UnaryOp(self, unary):
-        pattern = unary_map[unary.op.__class__]
-        operand_explanation, operand_result = self.visit(unary.operand)
-        explanation = pattern % (operand_explanation,)
-        co = self._compile(pattern % ("__exprinfo_expr",))
-        try:
-            result = self.frame.eval(co, __exprinfo_expr=operand_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, result
-
-    def visit_BinOp(self, binop):
-        left_explanation, left_result = self.visit(binop.left)
-        right_explanation, right_result = self.visit(binop.right)
-        symbol = operator_map[binop.op.__class__]
-        explanation = "(%s %s %s)" % (left_explanation, symbol,
-                                      right_explanation)
-        source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, __exprinfo_left=left_result,
-                                     __exprinfo_right=right_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, result
-
-    def visit_Call(self, call):
-        func_explanation, func = self.visit(call.func)
-        arg_explanations = []
-        ns = {"__exprinfo_func" : func}
-        arguments = []
-        for arg in call.args:
-            arg_explanation, arg_result = self.visit(arg)
-            arg_name = "__exprinfo_%s" % (len(ns),)
-            ns[arg_name] = arg_result
-            arguments.append(arg_name)
-            arg_explanations.append(arg_explanation)
-        for keyword in call.keywords:
-            arg_explanation, arg_result = self.visit(keyword.value)
-            arg_name = "__exprinfo_%s" % (len(ns),)
-            ns[arg_name] = arg_result
-            keyword_source = "%s=%%s" % (keyword.arg)
-            arguments.append(keyword_source % (arg_name,))
-            arg_explanations.append(keyword_source % (arg_explanation,))
-        if call.starargs:
-            arg_explanation, arg_result = self.visit(call.starargs)
-            arg_name = "__exprinfo_star"
-            ns[arg_name] = arg_result
-            arguments.append("*%s" % (arg_name,))
-            arg_explanations.append("*%s" % (arg_explanation,))
-        if call.kwargs:
-            arg_explanation, arg_result = self.visit(call.kwargs)
-            arg_name = "__exprinfo_kwds"
-            ns[arg_name] = arg_result
-            arguments.append("**%s" % (arg_name,))
-            arg_explanations.append("**%s" % (arg_explanation,))
-        args_explained = ", ".join(arg_explanations)
-        explanation = "%s(%s)" % (func_explanation, args_explained)
-        args = ", ".join(arguments)
-        source = "__exprinfo_func(%s)" % (args,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, **ns)
-        except Exception:
-            raise Failure(explanation)
-        pattern = "%s\n{%s = %s\n}"
-        rep = self.frame.repr(result)
-        explanation = pattern % (rep, rep, explanation)
-        return explanation, result
-
-    def _is_builtin_name(self, name):
-        pattern = "%r not in globals() and %r not in locals()"
-        source = pattern % (name.id, name.id)
-        co = self._compile(source)
-        try:
-            return self.frame.eval(co)
-        except Exception:
-            return False
-
-    def visit_Attribute(self, attr):
-        if not isinstance(attr.ctx, ast.Load):
-            return self.generic_visit(attr)
-        source_explanation, source_result = self.visit(attr.value)
-        explanation = "%s.%s" % (source_explanation, attr.attr)
-        source = "__exprinfo_expr.%s" % (attr.attr,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, __exprinfo_expr=source_result)
-        except Exception:
-            raise Failure(explanation)
-        explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
-                                              self.frame.repr(result),
-                                              source_explanation, attr.attr)
-        # Check if the attr is from an instance.
-        source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
-        source = source % (attr.attr,)
-        co = self._compile(source)
-        try:
-            from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
-        except Exception:
-            from_instance = True
-        if from_instance:
-            rep = self.frame.repr(result)
-            pattern = "%s\n{%s = %s\n}"
-            explanation = pattern % (rep, rep, explanation)
-        return explanation, result
-
-    def visit_Assert(self, assrt):
-        test_explanation, test_result = self.visit(assrt.test)
-        if test_explanation.startswith("False\n{False =") and \
-                test_explanation.endswith("\n"):
-            test_explanation = test_explanation[15:-2]
-        explanation = "assert %s" % (test_explanation,)
-        if not test_result:
-            try:
-                raise BuiltinAssertionError
-            except Exception:
-                raise Failure(explanation)
-        return explanation, test_result
-
-    def visit_Assign(self, assign):
-        value_explanation, value_result = self.visit(assign.value)
-        explanation = "... = %s" % (value_explanation,)
-        name = ast.Name("__exprinfo_expr", ast.Load(),
-                        lineno=assign.value.lineno,
-                        col_offset=assign.value.col_offset)
-        new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
-                                col_offset=assign.col_offset)
-        mod = ast.Module([new_assign])
-        co = self._compile(mod, "exec")
-        try:
-            self.frame.exec_(co, __exprinfo_expr=value_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, value_result
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+This should replace _assertionold.py eventually.
+"""
+
+import sys
+import ast
+
+import py
+from py._code.assertion import _format_explanation, BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+    # See http://bugs.jython.org/issue1497
+    _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+              "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+              "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+              "List", "Tuple")
+    _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+              "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+              "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+              "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+    _expr_nodes = set(getattr(ast, name) for name in _exprs)
+    _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+    def _is_ast_expr(node):
+        return node.__class__ in _expr_nodes
+    def _is_ast_stmt(node):
+        return node.__class__ in _stmt_nodes
+else:
+    def _is_ast_expr(node):
+        return isinstance(node, ast.expr)
+    def _is_ast_stmt(node):
+        return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+    """Error found while interpreting AST."""
+
+    def __init__(self, explanation=""):
+        self.cause = sys.exc_info()
+        self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+    mod = ast.parse(source)
+    visitor = DebugInterpreter(frame)
+    try:
+        visitor.visit(mod)
+    except Failure:
+        failure = sys.exc_info()[1]
+        return getfailure(failure)
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+    if frame is None:
+        frame = py.code.Frame(sys._getframe(1))
+    return interpret(offending_line, frame)
+
+def getfailure(failure):
+    explanation = _format_explanation(failure.explanation)
+    value = failure.cause[1]
+    if str(value):
+        lines = explanation.splitlines()
+        if not lines:
+            lines.append("")
+        lines[0] += " << %s" % (value,)
+        explanation = "\n".join(lines)
+    text = "%s: %s" % (failure.cause[0].__name__, explanation)
+    if text.startswith("AssertionError: assert "):
+        text = text[16:]
+    return text
+
+
+operator_map = {
+    ast.BitOr : "|",
+    ast.BitXor : "^",
+    ast.BitAnd : "&",
+    ast.LShift : "<<",
+    ast.RShift : ">>",
+    ast.Add : "+",
+    ast.Sub : "-",
+    ast.Mult : "*",
+    ast.Div : "/",
+    ast.FloorDiv : "//",
+    ast.Mod : "%",
+    ast.Eq : "==",
+    ast.NotEq : "!=",
+    ast.Lt : "<",
+    ast.LtE : "<=",
+    ast.Gt : ">",
+    ast.GtE : ">=",
+    ast.Pow : "**",
+    ast.Is : "is",
+    ast.IsNot : "is not",
+    ast.In : "in",
+    ast.NotIn : "not in"
+}
+
+unary_map = {
+    ast.Not : "not %s",
+    ast.Invert : "~%s",
+    ast.USub : "-%s",
+    ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+    """Interpret AST nodes to gleam useful debugging information. """
+
+    def __init__(self, frame):
+        self.frame = frame
+
+    def generic_visit(self, node):
+        # Fallback when we don't have a special implementation.
+        if _is_ast_expr(node):
+            mod = ast.Expression(node)
+            co = self._compile(mod)
+            try:
+                result = self.frame.eval(co)
+            except Exception:
+                raise Failure()
+            explanation = self.frame.repr(result)
+            return explanation, result
+        elif _is_ast_stmt(node):
+            mod = ast.Module([node])
+            co = self._compile(mod, "exec")
+            try:
+                self.frame.exec_(co)
+            except Exception:
+                raise Failure()
+            return None, None
+        else:
+            raise AssertionError("can't handle %s" %(node,))
+
+    def _compile(self, source, mode="eval"):
+        return compile(source, "<assertion interpretation>", mode)
+
+    def visit_Expr(self, expr):
+        return self.visit(expr.value)
+
+    def visit_Module(self, mod):
+        for stmt in mod.body:
+            self.visit(stmt)
+
+    def visit_Name(self, name):
+        explanation, result = self.generic_visit(name)
+        # See if the name is local.
+        source = "%r in locals() is not globals()" % (name.id,)
+        co = self._compile(source)
+        try:
+            local = self.frame.eval(co)
+        except Exception:
+            # have to assume it isn't
+            local = False
+        if not local:
+            return name.id, result
+        return explanation, result
+
+    def visit_Compare(self, comp):
+        left = comp.left
+        left_explanation, left_result = self.visit(left)
+        for op, next_op in zip(comp.ops, comp.comparators):
+            next_explanation, next_result = self.visit(next_op)
+            op_symbol = operator_map[op.__class__]
+            explanation = "%s %s %s" % (left_explanation, op_symbol,
+                                        next_explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+            co = self._compile(source)
+            try:
+                result = self.frame.eval(co, __exprinfo_left=left_result,
+                                         __exprinfo_right=next_result)
+            except Exception:
+                raise Failure(explanation)
+            try:
+                if not result:
+                    break
+            except KeyboardInterrupt:
+                raise
+            except:
+                break
+            left_explanation, left_result = next_explanation, next_result
+
+        rcomp = py.code._reprcompare
+        if rcomp:
+            res = rcomp(op_symbol, left_result, next_result)
+            if res:
+                explanation = res
+        return explanation, result
+
+    def visit_BoolOp(self, boolop):
+        is_or = isinstance(boolop.op, ast.Or)
+        explanations = []
+        for operand in boolop.values:
+            explanation, result = self.visit(operand)
+            explanations.append(explanation)
+            if result == is_or:
+                break
+        name = is_or and " or " or " and "
+        explanation = "(" + name.join(explanations) + ")"
+        return explanation, result
+
+    def visit_UnaryOp(self, unary):
+        pattern = unary_map[unary.op.__class__]
+        operand_explanation, operand_result = self.visit(unary.operand)
+        explanation = pattern % (operand_explanation,)
+        co = self._compile(pattern % ("__exprinfo_expr",))
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=operand_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_BinOp(self, binop):
+        left_explanation, left_result = self.visit(binop.left)
+        right_explanation, right_result = self.visit(binop.right)
+        symbol = operator_map[binop.op.__class__]
+        explanation = "(%s %s %s)" % (left_explanation, symbol,
+                                      right_explanation)
+        source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_left=left_result,
+                                     __exprinfo_right=right_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_Call(self, call):
+        func_explanation, func = self.visit(call.func)
+        arg_explanations = []
+        ns = {"__exprinfo_func" : func}
+        arguments = []
+        for arg in call.args:
+            arg_explanation, arg_result = self.visit(arg)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            arguments.append(arg_name)
+            arg_explanations.append(arg_explanation)
+        for keyword in call.keywords:
+            arg_explanation, arg_result = self.visit(keyword.value)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            keyword_source = "%s=%%s" % (keyword.arg)
+            arguments.append(keyword_source % (arg_name,))
+            arg_explanations.append(keyword_source % (arg_explanation,))
+        if call.starargs:
+            arg_explanation, arg_result = self.visit(call.starargs)
+            arg_name = "__exprinfo_star"
+            ns[arg_name] = arg_result
+            arguments.append("*%s" % (arg_name,))
+            arg_explanations.append("*%s" % (arg_explanation,))
+        if call.kwargs:
+            arg_explanation, arg_result = self.visit(call.kwargs)
+            arg_name = "__exprinfo_kwds"
+            ns[arg_name] = arg_result
+            arguments.append("**%s" % (arg_name,))
+            arg_explanations.append("**%s" % (arg_explanation,))
+        args_explained = ", ".join(arg_explanations)
+        explanation = "%s(%s)" % (func_explanation, args_explained)
+        args = ", ".join(arguments)
+        source = "__exprinfo_func(%s)" % (args,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, **ns)
+        except Exception:
+            raise Failure(explanation)
+        pattern = "%s\n{%s = %s\n}"
+        rep = self.frame.repr(result)
+        explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def _is_builtin_name(self, name):
+        pattern = "%r not in globals() and %r not in locals()"
+        source = pattern % (name.id, name.id)
+        co = self._compile(source)
+        try:
+            return self.frame.eval(co)
+        except Exception:
+            return False
+
+    def visit_Attribute(self, attr):
+        if not isinstance(attr.ctx, ast.Load):
+            return self.generic_visit(attr)
+        source_explanation, source_result = self.visit(attr.value)
+        explanation = "%s.%s" % (source_explanation, attr.attr)
+        source = "__exprinfo_expr.%s" % (attr.attr,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            raise Failure(explanation)
+        explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+                                              self.frame.repr(result),
+                                              source_explanation, attr.attr)
+        # Check if the attr is from an instance.
+        source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+        source = source % (attr.attr,)
+        co = self._compile(source)
+        try:
+            from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            from_instance = True
+        if from_instance:
+            rep = self.frame.repr(result)
+            pattern = "%s\n{%s = %s\n}"
+            explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def visit_Assert(self, assrt):
+        test_explanation, test_result = self.visit(assrt.test)
+        if test_explanation.startswith("False\n{False =") and \
+                test_explanation.endswith("\n"):
+            test_explanation = test_explanation[15:-2]
+        explanation = "assert %s" % (test_explanation,)
+        if not test_result:
+            try:
+                raise BuiltinAssertionError
+            except Exception:
+                raise Failure(explanation)
+        return explanation, test_result
+
+    def visit_Assign(self, assign):
+        value_explanation, value_result = self.visit(assign.value)
+        explanation = "... = %s" % (value_explanation,)
+        name = ast.Name("__exprinfo_expr", ast.Load(),
+                        lineno=assign.value.lineno,
+                        col_offset=assign.value.col_offset)
+        new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+                                col_offset=assign.col_offset)
+        mod = ast.Module([new_assign])
+        co = self._compile(mod, "exec")
+        try:
+            self.frame.exec_(co, __exprinfo_expr=value_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, value_result
--- a/third_party/python/py/py/_code/_assertionold.py
+++ b/third_party/python/py/py/_code/_assertionold.py
@@ -1,555 +1,555 @@
-import py
-import sys, inspect
-from compiler import parse, ast, pycodegen
-from py._code.assertion import BuiltinAssertionError, _format_explanation
-
-passthroughex = py.builtin._sysex
-
-class Failure:
-    def __init__(self, node):
-        self.exc, self.value, self.tb = sys.exc_info()
-        self.node = node
-
-class View(object):
-    """View base class.
-
-    If C is a subclass of View, then C(x) creates a proxy object around
-    the object x.  The actual class of the proxy is not C in general,
-    but a *subclass* of C determined by the rules below.  To avoid confusion
-    we call view class the class of the proxy (a subclass of C, so of View)
-    and object class the class of x.
-
-    Attributes and methods not found in the proxy are automatically read on x.
-    Other operations like setting attributes are performed on the proxy, as
-    determined by its view class.  The object x is available from the proxy
-    as its __obj__ attribute.
-
-    The view class selection is determined by the __view__ tuples and the
-    optional __viewkey__ method.  By default, the selected view class is the
-    most specific subclass of C whose __view__ mentions the class of x.
-    If no such subclass is found, the search proceeds with the parent
-    object classes.  For example, C(True) will first look for a subclass
-    of C with __view__ = (..., bool, ...) and only if it doesn't find any
-    look for one with __view__ = (..., int, ...), and then ..., object,...
-    If everything fails the class C itself is considered to be the default.
-
-    Alternatively, the view class selection can be driven by another aspect
-    of the object x, instead of the class of x, by overriding __viewkey__.
-    See last example at the end of this module.
-    """
-
-    _viewcache = {}
-    __view__ = ()
-
-    def __new__(rootclass, obj, *args, **kwds):
-        self = object.__new__(rootclass)
-        self.__obj__ = obj
-        self.__rootclass__ = rootclass
-        key = self.__viewkey__()
-        try:
-            self.__class__ = self._viewcache[key]
-        except KeyError:
-            self.__class__ = self._selectsubclass(key)
-        return self
-
-    def __getattr__(self, attr):
-        # attributes not found in the normal hierarchy rooted on View
-        # are looked up in the object's real class
-        return getattr(self.__obj__, attr)
-
-    def __viewkey__(self):
-        return self.__obj__.__class__
-
-    def __matchkey__(self, key, subclasses):
-        if inspect.isclass(key):
-            keys = inspect.getmro(key)
-        else:
-            keys = [key]
-        for key in keys:
-            result = [C for C in subclasses if key in C.__view__]
-            if result:
-                return result
-        return []
-
-    def _selectsubclass(self, key):
-        subclasses = list(enumsubclasses(self.__rootclass__))
-        for C in subclasses:
-            if not isinstance(C.__view__, tuple):
-                C.__view__ = (C.__view__,)
-        choices = self.__matchkey__(key, subclasses)
-        if not choices:
-            return self.__rootclass__
-        elif len(choices) == 1:
-            return choices[0]
-        else:
-            # combine the multiple choices
-            return type('?', tuple(choices), {})
-
-    def __repr__(self):
-        return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
-
-
-def enumsubclasses(cls):
-    for subcls in cls.__subclasses__():
-        for subsubclass in enumsubclasses(subcls):
-            yield subsubclass
-    yield cls
-
-
-class Interpretable(View):
-    """A parse tree node with a few extra methods."""
-    explanation = None
-
-    def is_builtin(self, frame):
-        return False
-
-    def eval(self, frame):
-        # fall-back for unknown expression nodes
-        try:
-            expr = ast.Expression(self.__obj__)
-            expr.filename = '<eval>'
-            self.__obj__.filename = '<eval>'
-            co = pycodegen.ExpressionCodeGenerator(expr).getCode()
-            result = frame.eval(co)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-        self.result = result
-        self.explanation = self.explanation or frame.repr(self.result)
-
-    def run(self, frame):
-        # fall-back for unknown statement nodes
-        try:
-            expr = ast.Module(None, ast.Stmt([self.__obj__]))
-            expr.filename = '<run>'
-            co = pycodegen.ModuleCodeGenerator(expr).getCode()
-            frame.exec_(co)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-
-    def nice_explanation(self):
-        return _format_explanation(self.explanation)
-
-
-class Name(Interpretable):
-    __view__ = ast.Name
-
-    def is_local(self, frame):
-        source = '%r in locals() is not globals()' % self.name
-        try:
-            return frame.is_true(frame.eval(source))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def is_global(self, frame):
-        source = '%r in globals()' % self.name
-        try:
-            return frame.is_true(frame.eval(source))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def is_builtin(self, frame):
-        source = '%r not in locals() and %r not in globals()' % (
-            self.name, self.name)
-        try:
-            return frame.is_true(frame.eval(source))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def eval(self, frame):
-        super(Name, self).eval(frame)
-        if not self.is_local(frame):
-            self.explanation = self.name
-
-class Compare(Interpretable):
-    __view__ = ast.Compare
-
-    def eval(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        for operation, expr2 in self.ops:
-            if hasattr(self, 'result'):
-                # shortcutting in chained expressions
-                if not frame.is_true(self.result):
-                    break
-            expr2 = Interpretable(expr2)
-            expr2.eval(frame)
-            self.explanation = "%s %s %s" % (
-                expr.explanation, operation, expr2.explanation)
-            source = "__exprinfo_left %s __exprinfo_right" % operation
-            try:
-                self.result = frame.eval(source,
-                                         __exprinfo_left=expr.result,
-                                         __exprinfo_right=expr2.result)
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-            expr = expr2
-
-class And(Interpretable):
-    __view__ = ast.And
-
-    def eval(self, frame):
-        explanations = []
-        for expr in self.nodes:
-            expr = Interpretable(expr)
-            expr.eval(frame)
-            explanations.append(expr.explanation)
-            self.result = expr.result
-            if not frame.is_true(expr.result):
-                break
-        self.explanation = '(' + ' and '.join(explanations) + ')'
-
-class Or(Interpretable):
-    __view__ = ast.Or
-
-    def eval(self, frame):
-        explanations = []
-        for expr in self.nodes:
-            expr = Interpretable(expr)
-            expr.eval(frame)
-            explanations.append(expr.explanation)
-            self.result = expr.result
-            if frame.is_true(expr.result):
-                break
-        self.explanation = '(' + ' or '.join(explanations) + ')'
-
-
-# == Unary operations ==
-keepalive = []
-for astclass, astpattern in {
-    ast.Not    : 'not __exprinfo_expr',
-    ast.Invert : '(~__exprinfo_expr)',
-    }.items():
-
-    class UnaryArith(Interpretable):
-        __view__ = astclass
-
-        def eval(self, frame, astpattern=astpattern):
-            expr = Interpretable(self.expr)
-            expr.eval(frame)
-            self.explanation = astpattern.replace('__exprinfo_expr',
-                                                  expr.explanation)
-            try:
-                self.result = frame.eval(astpattern,
-                                         __exprinfo_expr=expr.result)
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-
-    keepalive.append(UnaryArith)
-
-# == Binary operations ==
-for astclass, astpattern in {
-    ast.Add    : '(__exprinfo_left + __exprinfo_right)',
-    ast.Sub    : '(__exprinfo_left - __exprinfo_right)',
-    ast.Mul    : '(__exprinfo_left * __exprinfo_right)',
-    ast.Div    : '(__exprinfo_left / __exprinfo_right)',
-    ast.Mod    : '(__exprinfo_left % __exprinfo_right)',
-    ast.Power  : '(__exprinfo_left ** __exprinfo_right)',
-    }.items():
-
-    class BinaryArith(Interpretable):
-        __view__ = astclass
-
-        def eval(self, frame, astpattern=astpattern):
-            left = Interpretable(self.left)
-            left.eval(frame)
-            right = Interpretable(self.right)
-            right.eval(frame)
-            self.explanation = (astpattern
-                                .replace('__exprinfo_left',  left .explanation)
-                                .replace('__exprinfo_right', right.explanation))
-            try:
-                self.result = frame.eval(astpattern,
-                                         __exprinfo_left=left.result,
-                                         __exprinfo_right=right.result)
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-
-    keepalive.append(BinaryArith)
-
-
-class CallFunc(Interpretable):
-    __view__ = ast.CallFunc
-
-    def is_bool(self, frame):
-        source = 'isinstance(__exprinfo_value, bool)'
-        try:
-            return frame.is_true(frame.eval(source,
-                                            __exprinfo_value=self.result))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def eval(self, frame):
-        node = Interpretable(self.node)
-        node.eval(frame)
-        explanations = []
-        vars = {'__exprinfo_fn': node.result}
-        source = '__exprinfo_fn('
-        for a in self.args:
-            if isinstance(a, ast.Keyword):
-                keyword = a.name
-                a = a.expr
-            else:
-                keyword = None
-            a = Interpretable(a)
-            a.eval(frame)
-            argname = '__exprinfo_%d' % len(vars)
-            vars[argname] = a.result
-            if keyword is None:
-                source += argname + ','
-                explanations.append(a.explanation)
-            else:
-                source += '%s=%s,' % (keyword, argname)
-                explanations.append('%s=%s' % (keyword, a.explanation))
-        if self.star_args:
-            star_args = Interpretable(self.star_args)
-            star_args.eval(frame)
-            argname = '__exprinfo_star'
-            vars[argname] = star_args.result
-            source += '*' + argname + ','
-            explanations.append('*' + star_args.explanation)
-        if self.dstar_args:
-            dstar_args = Interpretable(self.dstar_args)
-            dstar_args.eval(frame)
-            argname = '__exprinfo_kwds'
-            vars[argname] = dstar_args.result
-            source += '**' + argname + ','
-            explanations.append('**' + dstar_args.explanation)
-        self.explanation = "%s(%s)" % (
-            node.explanation, ', '.join(explanations))
-        if source.endswith(','):
-            source = source[:-1]
-        source += ')'
-        try:
-            self.result = frame.eval(source, **vars)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-        if not node.is_builtin(frame) or not self.is_bool(frame):
-            r = frame.repr(self.result)
-            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
-
-class Getattr(Interpretable):
-    __view__ = ast.Getattr
-
-    def eval(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        source = '__exprinfo_expr.%s' % self.attrname
-        try:
-            self.result = frame.eval(source, __exprinfo_expr=expr.result)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-        self.explanation = '%s.%s' % (expr.explanation, self.attrname)
-        # if the attribute comes from the instance, its value is interesting
-        source = ('hasattr(__exprinfo_expr, "__dict__") and '
-                  '%r in __exprinfo_expr.__dict__' % self.attrname)
-        try:
-            from_instance = frame.is_true(
-                frame.eval(source, __exprinfo_expr=expr.result))
-        except passthroughex:
-            raise
-        except:
-            from_instance = True
-        if from_instance:
-            r = frame.repr(self.result)
-            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
-
-# == Re-interpretation of full statements ==
-
-class Assert(Interpretable):
-    __view__ = ast.Assert
-
-    def run(self, frame):
-        test = Interpretable(self.test)
-        test.eval(frame)
-        # simplify 'assert False where False = ...'
-        if (test.explanation.startswith('False\n{False = ') and
-            test.explanation.endswith('\n}')):
-            test.explanation = test.explanation[15:-2]
-        # print the result as  'assert <explanation>'
-        self.result = test.result
-        self.explanation = 'assert ' + test.explanation
-        if not frame.is_true(test.result):
-            try:
-                raise BuiltinAssertionError
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-
-class Assign(Interpretable):
-    __view__ = ast.Assign
-
-    def run(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        self.result = expr.result
-        self.explanation = '... = ' + expr.explanation
-        # fall-back-run the rest of the assignment
-        ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
-        mod = ast.Module(None, ast.Stmt([ass]))
-        mod.filename = '<run>'
-        co = pycodegen.ModuleCodeGenerator(mod).getCode()
-        try:
-            frame.exec_(co, __exprinfo_expr=expr.result)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-
-class Discard(Interpretable):
-    __view__ = ast.Discard
-
-    def run(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        self.result = expr.result
-        self.explanation = expr.explanation
-
-class Stmt(Interpretable):
-    __view__ = ast.Stmt
-
-    def run(self, frame):
-        for stmt in self.nodes:
-            stmt = Interpretable(stmt)
-            stmt.run(frame)
-
-
-def report_failure(e):
-    explanation = e.node.nice_explanation()
-    if explanation:
-        explanation = ", in: " + explanation
-    else:
-        explanation = ""
-    sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
-
-def check(s, frame=None):
-    if frame is None:
-        frame = sys._getframe(1)
-        frame = py.code.Frame(frame)
-    expr = parse(s, 'eval')
-    assert isinstance(expr, ast.Expression)
-    node = Interpretable(expr.node)
-    try:
-        node.eval(frame)
-    except passthroughex:
-        raise
-    except Failure:
-        e = sys.exc_info()[1]
-        report_failure(e)
-    else:
-        if not frame.is_true(node.result):
-            sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
-
-
-###########################################################
-# API / Entry points
-# #########################################################
-
-def interpret(source, frame, should_fail=False):
-    module = Interpretable(parse(source, 'exec').node)
-    #print "got module", module
-    if isinstance(frame, py.std.types.FrameType):
-        frame = py.code.Frame(frame)
-    try:
-        module.run(frame)
-    except Failure:
-        e = sys.exc_info()[1]
-        return getfailure(e)
-    except passthroughex:
-        raise
-    except:
-        import traceback
-        traceback.print_exc()
-    if should_fail:
-        return ("(assertion failed, but when it was re-run for "
-                "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --nomagic)")
-    else:
-        return None
-
-def getmsg(excinfo):
-    if isinstance(excinfo, tuple):
-        excinfo = py.code.ExceptionInfo(excinfo)
-    #frame, line = gettbline(tb)
-    #frame = py.code.Frame(frame)
-    #return interpret(line, frame)
-
-    tb = excinfo.traceback[-1]
-    source = str(tb.statement).strip()
-    x = interpret(source, tb.frame, should_fail=True)
-    if not isinstance(x, str):
-        raise TypeError("interpret returned non-string %r" % (x,))
-    return x
-
-def getfailure(e):
-    explanation = e.node.nice_explanation()
-    if str(e.value):
-        lines = explanation.split('\n')
-        lines[0] += "  << %s" % (e.value,)
-        explanation = '\n'.join(lines)
-    text = "%s: %s" % (e.exc.__name__, explanation)
-    if text.startswith('AssertionError: assert '):
-        text = text[16:]
-    return text
-
-def run(s, frame=None):
-    if frame is None:
-        frame = sys._getframe(1)
-        frame = py.code.Frame(frame)
-    module = Interpretable(parse(s, 'exec').node)
-    try:
-        module.run(frame)
-    except Failure:
-        e = sys.exc_info()[1]
-        report_failure(e)
-
-
-if __name__ == '__main__':
-    # example:
-    def f():
-        return 5
-    def g():
-        return 3
-    def h(x):
-        return 'never'
-    check("f() * g() == 5")
-    check("not f()")
-    check("not (f() and g() or 0)")
-    check("f() == g()")
-    i = 4
-    check("i == f()")
-    check("len(f()) == 0")
-    check("isinstance(2+3+4, float)")
-
-    run("x = i")
-    check("x == 5")
-
-    run("assert not f(), 'oops'")
-    run("a, b, c = 1, 2")
-    run("a, b, c = f()")
-
-    check("max([f(),g()]) == 4")
-    check("'hello'[g()] == 'h'")
-    run("'guk%d' % h(f())")
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from py._code.assertion import BuiltinAssertionError, _format_explanation
+
+passthroughex = py.builtin._sysex
+
+class Failure:
+    def __init__(self, node):
+        self.exc, self.value, self.tb = sys.exc_info()
+        self.node = node
+
+class View(object):
+    """View base class.
+
+    If C is a subclass of View, then C(x) creates a proxy object around
+    the object x.  The actual class of the proxy is not C in general,
+    but a *subclass* of C determined by the rules below.  To avoid confusion
+    we call view class the class of the proxy (a subclass of C, so of View)
+    and object class the class of x.
+
+    Attributes and methods not found in the proxy are automatically read on x.
+    Other operations like setting attributes are performed on the proxy, as
+    determined by its view class.  The object x is available from the proxy
+    as its __obj__ attribute.
+
+    The view class selection is determined by the __view__ tuples and the
+    optional __viewkey__ method.  By default, the selected view class is the
+    most specific subclass of C whose __view__ mentions the class of x.
+    If no such subclass is found, the search proceeds with the parent
+    object classes.  For example, C(True) will first look for a subclass
+    of C with __view__ = (..., bool, ...) and only if it doesn't find any
+    look for one with __view__ = (..., int, ...), and then ..., object,...
+    If everything fails the class C itself is considered to be the default.
+
+    Alternatively, the view class selection can be driven by another aspect
+    of the object x, instead of the class of x, by overriding __viewkey__.
+    See last example at the end of this module.
+    """
+
+    _viewcache = {}
+    __view__ = ()
+
+    def __new__(rootclass, obj, *args, **kwds):
+        self = object.__new__(rootclass)
+        self.__obj__ = obj
+        self.__rootclass__ = rootclass
+        key = self.__viewkey__()
+        try:
+            self.__class__ = self._viewcache[key]
+        except KeyError:
+            self.__class__ = self._selectsubclass(key)
+        return self
+
+    def __getattr__(self, attr):
+        # attributes not found in the normal hierarchy rooted on View
+        # are looked up in the object's real class
+        return getattr(self.__obj__, attr)
+
+    def __viewkey__(self):
+        return self.__obj__.__class__
+
+    def __matchkey__(self, key, subclasses):
+        if inspect.isclass(key):
+            keys = inspect.getmro(key)
+        else:
+            keys = [key]
+        for key in keys:
+            result = [C for C in subclasses if key in C.__view__]
+            if result:
+                return result
+        return []
+
+    def _selectsubclass(self, key):
+        subclasses = list(enumsubclasses(self.__rootclass__))
+        for C in subclasses:
+            if not isinstance(C.__view__, tuple):
+                C.__view__ = (C.__view__,)
+        choices = self.__matchkey__(key, subclasses)
+        if not choices:
+            return self.__rootclass__
+        elif len(choices) == 1:
+            return choices[0]
+        else:
+            # combine the multiple choices
+            return type('?', tuple(choices), {})
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+    for subcls in cls.__subclasses__():
+        for subsubclass in enumsubclasses(subcls):
+            yield subsubclass
+    yield cls
+
+
+class Interpretable(View):
+    """A parse tree node with a few extra methods."""
+    explanation = None
+
+    def is_builtin(self, frame):
+        return False
+
+    def eval(self, frame):
+        # fall-back for unknown expression nodes
+        try:
+            expr = ast.Expression(self.__obj__)
+            expr.filename = '<eval>'
+            self.__obj__.filename = '<eval>'
+            co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+            result = frame.eval(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.result = result
+        self.explanation = self.explanation or frame.repr(self.result)
+
+    def run(self, frame):
+        # fall-back for unknown statement nodes
+        try:
+            expr = ast.Module(None, ast.Stmt([self.__obj__]))
+            expr.filename = '<run>'
+            co = pycodegen.ModuleCodeGenerator(expr).getCode()
+            frame.exec_(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+    def nice_explanation(self):
+        return _format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+    __view__ = ast.Name
+
+    def is_local(self, frame):
+        source = '%r in locals() is not globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_global(self, frame):
+        source = '%r in globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_builtin(self, frame):
+        source = '%r not in locals() and %r not in globals()' % (
+            self.name, self.name)
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        super(Name, self).eval(frame)
+        if not self.is_local(frame):
+            self.explanation = self.name
+
+class Compare(Interpretable):
+    __view__ = ast.Compare
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        for operation, expr2 in self.ops:
+            if hasattr(self, 'result'):
+                # shortcutting in chained expressions
+                if not frame.is_true(self.result):
+                    break
+            expr2 = Interpretable(expr2)
+            expr2.eval(frame)
+            self.explanation = "%s %s %s" % (
+                expr.explanation, operation, expr2.explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % operation
+            try:
+                self.result = frame.eval(source,
+                                         __exprinfo_left=expr.result,
+                                         __exprinfo_right=expr2.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+            expr = expr2
+
+class And(Interpretable):
+    __view__ = ast.And
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if not frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+    __view__ = ast.Or
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+    ast.Not    : 'not __exprinfo_expr',
+    ast.Invert : '(~__exprinfo_expr)',
+    }.items():
+
+    class UnaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            expr = Interpretable(self.expr)
+            expr.eval(frame)
+            self.explanation = astpattern.replace('__exprinfo_expr',
+                                                  expr.explanation)
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_expr=expr.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+    ast.Add    : '(__exprinfo_left + __exprinfo_right)',
+    ast.Sub    : '(__exprinfo_left - __exprinfo_right)',
+    ast.Mul    : '(__exprinfo_left * __exprinfo_right)',
+    ast.Div    : '(__exprinfo_left / __exprinfo_right)',
+    ast.Mod    : '(__exprinfo_left % __exprinfo_right)',
+    ast.Power  : '(__exprinfo_left ** __exprinfo_right)',
+    }.items():
+
+    class BinaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            left = Interpretable(self.left)
+            left.eval(frame)
+            right = Interpretable(self.right)
+            right.eval(frame)
+            self.explanation = (astpattern
+                                .replace('__exprinfo_left',  left .explanation)
+                                .replace('__exprinfo_right', right.explanation))
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_left=left.result,
+                                         __exprinfo_right=right.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+    __view__ = ast.CallFunc
+
+    def is_bool(self, frame):
+        source = 'isinstance(__exprinfo_value, bool)'
+        try:
+            return frame.is_true(frame.eval(source,
+                                            __exprinfo_value=self.result))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        node = Interpretable(self.node)
+        node.eval(frame)
+        explanations = []
+        vars = {'__exprinfo_fn': node.result}
+        source = '__exprinfo_fn('
+        for a in self.args:
+            if isinstance(a, ast.Keyword):
+                keyword = a.name
+                a = a.expr
+            else:
+                keyword = None
+            a = Interpretable(a)
+            a.eval(frame)
+            argname = '__exprinfo_%d' % len(vars)
+            vars[argname] = a.result
+            if keyword is None:
+                source += argname + ','
+                explanations.append(a.explanation)
+            else:
+                source += '%s=%s,' % (keyword, argname)
+                explanations.append('%s=%s' % (keyword, a.explanation))
+        if self.star_args:
+            star_args = Interpretable(self.star_args)
+            star_args.eval(frame)
+            argname = '__exprinfo_star'
+            vars[argname] = star_args.result
+            source += '*' + argname + ','
+            explanations.append('*' + star_args.explanation)
+        if self.dstar_args:
+            dstar_args = Interpretable(self.dstar_args)
+            dstar_args.eval(frame)
+            argname = '__exprinfo_kwds'
+            vars[argname] = dstar_args.result
+            source += '**' + argname + ','
+            explanations.append('**' + dstar_args.explanation)
+        self.explanation = "%s(%s)" % (
+            node.explanation, ', '.join(explanations))
+        if source.endswith(','):
+            source = source[:-1]
+        source += ')'
+        try:
+            self.result = frame.eval(source, **vars)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        if not node.is_builtin(frame) or not self.is_bool(frame):
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+    __view__ = ast.Getattr
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        source = '__exprinfo_expr.%s' % self.attrname
+        try:
+            self.result = frame.eval(source, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+        # if the attribute comes from the instance, its value is interesting
+        source = ('hasattr(__exprinfo_expr, "__dict__") and '
+                  '%r in __exprinfo_expr.__dict__' % self.attrname)
+        try:
+            from_instance = frame.is_true(
+                frame.eval(source, __exprinfo_expr=expr.result))
+        except passthroughex:
+            raise
+        except:
+            from_instance = True
+        if from_instance:
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+    __view__ = ast.Assert
+
+    def run(self, frame):
+        test = Interpretable(self.test)
+        test.eval(frame)
+        # simplify 'assert False where False = ...'
+        if (test.explanation.startswith('False\n{False = ') and
+            test.explanation.endswith('\n}')):
+            test.explanation = test.explanation[15:-2]
+        # print the result as  'assert <explanation>'
+        self.result = test.result
+        self.explanation = 'assert ' + test.explanation
+        if not frame.is_true(test.result):
+            try:
+                raise BuiltinAssertionError
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+class Assign(Interpretable):
+    __view__ = ast.Assign
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = '... = ' + expr.explanation
+        # fall-back-run the rest of the assignment
+        ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+        mod = ast.Module(None, ast.Stmt([ass]))
+        mod.filename = '<run>'
+        co = pycodegen.ModuleCodeGenerator(mod).getCode()
+        try:
+            frame.exec_(co, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+class Discard(Interpretable):
+    __view__ = ast.Discard
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+    __view__ = ast.Stmt
+
+    def run(self, frame):
+        for stmt in self.nodes:
+            stmt = Interpretable(stmt)
+            stmt.run(frame)
+
+
+def report_failure(e):
+    explanation = e.node.nice_explanation()
+    if explanation:
+        explanation = ", in: " + explanation
+    else:
+        explanation = ""
+    sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    expr = parse(s, 'eval')
+    assert isinstance(expr, ast.Expression)
+    node = Interpretable(expr.node)
+    try:
+        node.eval(frame)
+    except passthroughex:
+        raise
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+    else:
+        if not frame.is_true(node.result):
+            sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+    module = Interpretable(parse(source, 'exec').node)
+    #print "got module", module
+    if isinstance(frame, py.std.types.FrameType):
+        frame = py.code.Frame(frame)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        return getfailure(e)
+    except passthroughex:
+        raise
+    except:
+        import traceback
+        traceback.print_exc()
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --nomagic)")
+    else:
+        return None
+
+def getmsg(excinfo):
+    if isinstance(excinfo, tuple):
+        excinfo = py.code.ExceptionInfo(excinfo)
+    #frame, line = gettbline(tb)
+    #frame = py.code.Frame(frame)
+    #return interpret(line, frame)
+
+    tb = excinfo.traceback[-1]
+    source = str(tb.statement).strip()
+    x = interpret(source, tb.frame, should_fail=True)
+    if not isinstance(x, str):
+        raise TypeError("interpret returned non-string %r" % (x,))
+    return x
+
+def getfailure(e):
+    explanation = e.node.nice_explanation()
+    if str(e.value):
+        lines = explanation.split('\n')
+        lines[0] += "  << %s" % (e.value,)
+        explanation = '\n'.join(lines)
+    text = "%s: %s" % (e.exc.__name__, explanation)
+    if text.startswith('AssertionError: assert '):
+        text = text[16:]
+    return text
+
+def run(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    module = Interpretable(parse(s, 'exec').node)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+
+
+if __name__ == '__main__':
+    # example:
+    def f():
+        return 5
+    def g():
+        return 3
+    def h(x):
+        return 'never'
+    check("f() * g() == 5")
+    check("not f()")
+    check("not (f() and g() or 0)")
+    check("f() == g()")
+    i = 4
+    check("i == f()")
+    check("len(f()) == 0")
+    check("isinstance(2+3+4, float)")
+
+    run("x = i")
+    check("x == 5")
+
+    run("assert not f(), 'oops'")
+    run("a, b, c = 1, 2")
+    run("a, b, c = f()")
+
+    check("max([f(),g()]) == 4")
+    check("'hello'[g()] == 'h'")
+    run("'guk%d' % h(f())")
--- a/third_party/python/py/py/_code/_py2traceback.py
+++ b/third_party/python/py/py/_code/_py2traceback.py
@@ -1,79 +1,79 @@
-# copied from python-2.7.3's traceback.py
-# CHANGES:
-# - some_str is replaced, trying to create unicode strings
-#
-import types
-
-def format_exception_only(etype, value):
-    """Format the exception part of a traceback.
-
-    The arguments are the exception type and value such as given by
-    sys.last_type and sys.last_value. The return value is a list of
-    strings, each ending in a newline.
-
-    Normally, the list contains a single string; however, for
-    SyntaxError exceptions, it contains several lines that (when
-    printed) display detailed information about where the syntax
-    error occurred.
-
-    The message indicating which exception occurred is always the last
-    string in the list.
-
-    """
-
-    # An instance should not have a meaningful value parameter, but
-    # sometimes does, particularly for string exceptions, such as
-    # >>> raise string1, string2  # deprecated
-    #
-    # Clear these out first because issubtype(string1, SyntaxError)
-    # would throw another exception and mask the original problem.
-    if (isinstance(etype, BaseException) or
-        isinstance(etype, types.InstanceType) or
-        etype is None or type(etype) is str):
-        return [_format_final_exc_line(etype, value)]
-
-    stype = etype.__name__
-
-    if not issubclass(etype, SyntaxError):
-        return [_format_final_exc_line(stype, value)]
-
-    # It was a syntax error; show exactly where the problem was found.
-    lines = []
-    try:
-        msg, (filename, lineno, offset, badline) = value.args
-    except Exception:
-        pass
-    else:
-        filename = filename or "<string>"
-        lines.append('  File "%s", line %d\n' % (filename, lineno))
-        if badline is not None:
-            lines.append('    %s\n' % badline.strip())
-            if offset is not None:
-                caretspace = badline.rstrip('\n')[:offset].lstrip()
-                # non-space whitespace (likes tabs) must be kept for alignment
-                caretspace = ((c.isspace() and c or ' ') for c in caretspace)
-                # only three spaces to account for offset1 == pos 0
-                lines.append('   %s^\n' % ''.join(caretspace))
-        value = msg
-
-    lines.append(_format_final_exc_line(stype, value))
-    return lines
-
-def _format_final_exc_line(etype, value):
-    """Return a list of a single line -- normal case for format_exception_only"""
-    valuestr = _some_str(value)
-    if value is None or not valuestr:
-        line = "%s\n" % etype
-    else:
-        line = "%s: %s\n" % (etype, valuestr)
-    return line
-
-def _some_str(value):
-    try:
-        return unicode(value)
-    except Exception:
-        try:
-            return str(value)
-        except Exception:
-            pass
-    return '<unprintable %s object>' % type(value).__name__
+# copied from python-2.7.3's traceback.py
+# CHANGES:
+# - some_str is replaced, trying to create unicode strings
+#
+import types
+
+def format_exception_only(etype, value):
+    """Format the exception part of a traceback.
+
+    The arguments are the exception type and value such as given by
+    sys.last_type and sys.last_value. The return value is a list of
+    strings, each ending in a newline.
+
+    Normally, the list contains a single string; however, for
+    SyntaxError exceptions, it contains several lines that (when
+    printed) display detailed information about where the syntax
+    error occurred.
+
+    The message indicating which exception occurred is always the last
+    string in the list.
+
+    """
+
+    # An instance should not have a meaningful value parameter, but
+    # sometimes does, particularly for string exceptions, such as
+    # >>> raise string1, string2  # deprecated
+    #
+    # Clear these out first because issubtype(string1, SyntaxError)
+    # would throw another exception and mask the original problem.
+    if (isinstance(etype, BaseException) or
+        isinstance(etype, types.InstanceType) or
+        etype is None or type(etype) is str):
+        return [_format_final_exc_line(etype, value)]
+
+    stype = etype.__name__
+
+    if not issubclass(etype, SyntaxError):
+        return [_format_final_exc_line(stype, value)]
+
+    # It was a syntax error; show exactly where the problem was found.
+    lines = []
+    try:
+        msg, (filename, lineno, offset, badline) = value.args
+    except Exception:
+        pass
+    else:
+        filename = filename or "<string>"
+        lines.append('  File "%s", line %d\n' % (filename, lineno))
+        if badline is not None:
+            lines.append('    %s\n' % badline.strip())
+            if offset is not None:
+                caretspace = badline.rstrip('\n')[:offset].lstrip()
+                # non-space whitespace (likes tabs) must be kept for alignment
+                caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+                # only three spaces to account for offset1 == pos 0
+                lines.append('   %s^\n' % ''.join(caretspace))
+        value = msg
+
+    lines.append(_format_final_exc_line(stype, value))
+    return lines
+
+def _format_final_exc_line(etype, value):
+    """Return a list of a single line -- normal case for format_exception_only"""
+    valuestr = _some_str(value)
+    if value is None or not valuestr:
+        line = "%s\n" % etype
+    else:
+        line = "%s: %s\n" % (etype, valuestr)
+    return line
+
+def _some_str(value):
+    try:
+        return unicode(value)
+    except Exception:
+        try:
+            return str(value)
+        except Exception:
+            pass
+    return '<unprintable %s object>' % type(value).__name__
--- a/third_party/python/py/py/_code/assertion.py
+++ b/third_party/python/py/py/_code/assertion.py
@@ -1,94 +1,94 @@
-import sys
-import py
-
-BuiltinAssertionError = py.builtin.builtins.AssertionError
-
-_reprcompare = None # if set, will be called by assert reinterp for comparison ops
-
-def _format_explanation(explanation):
-    """This formats an explanation
-
-    Normally all embedded newlines are escaped, however there are
-    three exceptions: \n{, \n} and \n~.  The first two are intended
-    cover nested explanations, see function and attribute explanations
-    for examples (.visit_Call(), visit_Attribute()).  The last one is
-    for when one explanation needs to span multiple lines, e.g. when
-    displaying diffs.
-    """
-    raw_lines = (explanation or '').split('\n')
-    # escape newlines not followed by {, } and ~
-    lines = [raw_lines[0]]
-    for l in raw_lines[1:]:
-        if l.startswith('{') or l.startswith('}') or l.startswith('~'):
-            lines.append(l)
-        else:
-            lines[-1] += '\\n' + l
-
-    result = lines[:1]
-    stack = [0]
-    stackcnt = [0]
-    for line in lines[1:]:
-        if line.startswith('{'):
-            if stackcnt[-1]:
-                s = 'and   '
-            else:
-                s = 'where '
-            stack.append(len(result))
-            stackcnt[-1] += 1
-            stackcnt.append(0)
-            result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
-        elif line.startswith('}'):
-            assert line.startswith('}')
-            stack.pop()
-            stackcnt.pop()
-            result[stack[-1]] += line[1:]
-        else:
-            assert line.startswith('~')
-            result.append('  '*len(stack) + line[1:])
-    assert len(stack) == 1
-    return '\n'.join(result)
-
-
-class AssertionError(BuiltinAssertionError):
-    def __init__(self, *args):
-        BuiltinAssertionError.__init__(self, *args)
-        if args:
-            try:
-                self.msg = str(args[0])
-            except py.builtin._sysex:
-                raise
-            except:
-                self.msg = "<[broken __repr__] %s at %0xd>" %(
-                    args[0].__class__, id(args[0]))
-        else:
-            f = py.code.Frame(sys._getframe(1))
-            try:
-                source = f.code.fullsource
-                if source is not None:
-                    try:
-                        source = source.getstatement(f.lineno, assertion=True)
-                    except IndexError:
-                        source = None
-                    else:
-                        source = str(source.deindent()).strip()
-            except py.error.ENOENT:
-                source = None
-                # this can also occur during reinterpretation, when the
-                # co_filename is set to "<run>".
-            if source:
-                self.msg = reinterpret(source, f, should_fail=True)
-            else:
-                self.msg = "<could not determine information>"
-            if not self.args:
-                self.args = (self.msg,)
-
-if sys.version_info > (3, 0):
-    AssertionError.__module__ = "builtins"
-    reinterpret_old = "old reinterpretation not available for py3"
-else:
-    from py._code._assertionold import interpret as reinterpret_old
-if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
-    from py._code._assertionnew import interpret as reinterpret
-else:
-    reinterpret = reinterpret_old
-
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+_reprcompare = None # if set, will be called by assert reinterp for comparison ops
+
+def _format_explanation(explanation):
+    """This formats an explanation
+
+    Normally all embedded newlines are escaped, however there are
+    three exceptions: \n{, \n} and \n~.  The first two are intended
+    cover nested explanations, see function and attribute explanations
+    for examples (.visit_Call(), visit_Attribute()).  The last one is
+    for when one explanation needs to span multiple lines, e.g. when
+    displaying diffs.
+    """
+    raw_lines = (explanation or '').split('\n')
+    # escape newlines not followed by {, } and ~
+    lines = [raw_lines[0]]
+    for l in raw_lines[1:]:
+        if l.startswith('{') or l.startswith('}') or l.startswith('~'):
+            lines.append(l)
+        else:
+            lines[-1] += '\\n' + l
+
+    result = lines[:1]
+    stack = [0]
+    stackcnt = [0]
+    for line in lines[1:]:
+        if line.startswith('{'):
+            if stackcnt[-1]:
+                s = 'and   '
+            else:
+                s = 'where '
+            stack.append(len(result))
+            stackcnt[-1] += 1
+            stackcnt.append(0)
+            result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
+        elif line.startswith('}'):
+            assert line.startswith('}')
+            stack.pop()
+            stackcnt.pop()
+            result[stack[-1]] += line[1:]
+        else:
+            assert line.startswith('~')
+            result.append('  '*len(stack) + line[1:])
+    assert len(stack) == 1
+    return '\n'.join(result)
+
+
+class AssertionError(BuiltinAssertionError):
+    def __init__(self, *args):
+        BuiltinAssertionError.__init__(self, *args)
+        if args:
+            try:
+                self.msg = str(args[0])
+            except py.builtin._sysex:
+                raise
+            except:
+                self.msg = "<[broken __repr__] %s at %0xd>" %(
+                    args[0].__class__, id(args[0]))
+        else:
+            f = py.code.Frame(sys._getframe(1))
+            try:
+                source = f.code.fullsource
+                if source is not None:
+                    try:
+                        source = source.getstatement(f.lineno, assertion=True)
+                    except IndexError:
+                        source = None
+                    else:
+                        source = str(source.deindent()).strip()
+            except py.error.ENOENT:
+                source = None
+                # this can also occur during reinterpretation, when the
+                # co_filename is set to "<run>".
+            if source:
+                self.msg = reinterpret(source, f, should_fail=True)
+            else:
+                self.msg = "<could not determine information>"
+            if not self.args:
+                self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+    AssertionError.__module__ = "builtins"
+    reinterpret_old = "old reinterpretation not available for py3"
+else:
+    from py._code._assertionold import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+    from py._code._assertionnew import interpret as reinterpret
+else:
+    reinterpret = reinterpret_old
+
--- a/third_party/python/py/py/_code/code.py
+++ b/third_party/python/py/py/_code/code.py
@@ -1,787 +1,787 @@
-import py
-import sys
-from inspect import CO_VARARGS, CO_VARKEYWORDS
-
-builtin_repr = repr
-
-reprlib = py.builtin._tryimport('repr', 'reprlib')
-
-if sys.version_info[0] >= 3:
-    from traceback import format_exception_only
-else:
-    from py._code._py2traceback import format_exception_only
-
-class Code(object):
-    """ wrapper around Python code objects """
-    def __init__(self, rawcode):
-        if not hasattr(rawcode, "co_filename"):
-            rawcode = py.code.getrawcode(rawcode)
-        try:
-            self.filename = rawcode.co_filename
-            self.firstlineno = rawcode.co_firstlineno - 1
-            self.name = rawcode.co_name
-        except AttributeError:
-            raise TypeError("not a code object: %r" %(rawcode,))
-        self.raw = rawcode
-
-    def __eq__(self, other):
-        return self.raw == other.raw
-
-    def __ne__(self, other):
-        return not self == other
-
-    @property
-    def path(self):
-        """ return a path object pointing to source code (note that it
-        might not point to an actually existing file). """
-        p = py.path.local(self.raw.co_filename)
-        # maybe don't try this checking
-        if not p.check():
-            # XXX maybe try harder like the weird logic
-            # in the standard lib [linecache.updatecache] does?
-            p = self.raw.co_filename
-        return p
-
-    @property
-    def fullsource(self):
-        """ return a py.code.Source object for the full source file of the code
-        """
-        from py._code import source
-        full, _ = source.findsource(self.raw)
-        return full
-
-    def source(self):
-        """ return a py.code.Source object for the code object's source only
-        """
-        # return source only for that part of code
-        return py.code.Source(self.raw)
-
-    def getargs(self, var=False):
-        """ return a tuple with the argument names for the code object
-
-            if 'var' is set True also return the names of the variable and
-            keyword arguments when present
-        """
-        # handfull shortcut for getting args
-        raw = self.raw
-        argcount = raw.co_argcount
-        if var:
-            argcount += raw.co_flags & CO_VARARGS
-            argcount += raw.co_flags & CO_VARKEYWORDS
-        return raw.co_varnames[:argcount]
-
-class Frame(object):
-    """Wrapper around a Python frame holding f_locals and f_globals
-    in which expressions can be evaluated."""
-
-    def __init__(self, frame):
-        self.lineno = frame.f_lineno - 1
-        self.f_globals = frame.f_globals
-        self.f_locals = frame.f_locals
-        self.raw = frame
-        self.code = py.code.Code(frame.f_code)
-
-    @property
-    def statement(self):
-        """ statement this frame is at """
-        if self.code.fullsource is None:
-            return py.code.Source("")
-        return self.code.fullsource.getstatement(self.lineno)
-
-    def eval(self, code, **vars):
-        """ evaluate 'code' in the frame
-
-            'vars' are optional additional local variables
-
-            returns the result of the evaluation
-        """
-        f_locals = self.f_locals.copy()
-        f_locals.update(vars)
-        return eval(code, self.f_globals, f_locals)
-
-    def exec_(self, code, **vars):
-        """ exec 'code' in the frame
-
-            'vars' are optiona; additional local variables
-        """
-        f_locals = self.f_locals.copy()
-        f_locals.update(vars)
-        py.builtin.exec_(code, self.f_globals, f_locals )
-
-    def repr(self, object):
-        """ return a 'safe' (non-recursive, one-line) string repr for 'object'
-        """
-        return py.io.saferepr(object)
-
-    def is_true(self, object):
-        return object
-
-    def getargs(self, var=False):
-        """ return a list of tuples (name, value) for all arguments
-
-            if 'var' is set True also include the variable and keyword
-            arguments when present
-        """
-        retval = []
-        for arg in self.code.getargs(var):
-            try:
-                retval.append((arg, self.f_locals[arg]))
-            except KeyError:
-                pass     # this can occur when using Psyco
-        return retval
-
-class TracebackEntry(object):
-    """ a single entry in a traceback """
-
-    _repr_style = None
-    exprinfo = None
-
-    def __init__(self, rawentry):
-        self._rawentry = rawentry
-        self.lineno = rawentry.tb_lineno - 1
-
-    def set_repr_style(self, mode):
-        assert mode in ("short", "long")
-        self._repr_style = mode
-
-    @property
-    def frame(self):
-        return py.code.Frame(self._rawentry.tb_frame)
-
-    @property
-    def relline(self):
-        return self.lineno - self.frame.code.firstlineno
-
-    def __repr__(self):
-        return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
-
-    @property
-    def statement(self):
-        """ py.code.Source object for the current statement """
-        source = self.frame.code.fullsource
-        return source.getstatement(self.lineno)
-
-    @property
-    def path(self):
-        """ path to the source code """
-        return self.frame.code.path
-
-    def getlocals(self):
-        return self.frame.f_locals
-    locals = property(getlocals, None, None, "locals of underlaying frame")
-
-    def reinterpret(self):
-        """Reinterpret the failing statement and returns a detailed information
-           about what operations are performed."""
-        if self.exprinfo is None:
-            source = str(self.statement).strip()
-            x = py.code._reinterpret(source, self.frame, should_fail=True)
-            if not isinstance(x, str):
-                raise TypeError("interpret returned non-string %r" % (x,))
-            self.exprinfo = x
-        return self.exprinfo
-
-    def getfirstlinesource(self):
-        # on Jython this firstlineno can be -1 apparently
-        return max(self.frame.code.firstlineno, 0)
-
-    def getsource(self, astcache=None):
-        """ return failing source code. """
-        # we use the passed in astcache to not reparse asttrees
-        # within exception info printing
-        from py._code.source import getstatementrange_ast
-        source = self.frame.code.fullsource
-        if source is None:
-            return None
-        key = astnode = None
-        if astcache is not None:
-            key = self.frame.code.path
-            if key is not None:
-                astnode = astcache.get(key, None)
-        start = self.getfirstlinesource()
-        try:
-            astnode, _, end = getstatementrange_ast(self.lineno, source,
-                                                    astnode=astnode)
-        except SyntaxError:
-            end = self.lineno + 1
-        else:
-            if key is not None:
-                astcache[key] = astnode
-        return source[start:end]
-
-    source = property(getsource)
-
-    def ishidden(self):
-        """ return True if the current frame has a var __tracebackhide__
-            resolving to True
-
-            mostly for internal use
-        """
-        try:
-            return self.frame.f_locals['__tracebackhide__']
-        except KeyError:
-            try:
-                return self.frame.f_globals['__tracebackhide__']
-            except KeyError:
-                return False
-
-    def __str__(self):
-        try:
-            fn = str(self.path)
-        except py.error.Error:
-            fn = '???'
-        name = self.frame.code.name
-        try:
-            line = str(self.statement).lstrip()
-        except KeyboardInterrupt:
-            raise
-        except:
-            line = "???"
-        return "  File %r:%d in %s\n  %s\n" %(fn, self.lineno+1, name, line)
-
-    def name(self):
-        return self.frame.code.raw.co_name
-    name = property(name, None, None, "co_name of underlaying code")
-
-class Traceback(list):
-    """ Traceback objects encapsulate and offer higher level
-        access to Traceback entries.
-    """
-    Entry = TracebackEntry
-    def __init__(self, tb):
-        """ initialize from given python traceback object. """
-        if hasattr(tb, 'tb_next'):
-            def f(cur):
-                while cur is not None:
-                    yield self.Entry(cur)
-                    cur = cur.tb_next
-            list.__init__(self, f(tb))
-        else:
-            list.__init__(self, tb)
-
-    def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
-        """ return a Traceback instance wrapping part of this Traceback
-
-            by provding any combination of path, lineno and firstlineno, the
-            first frame to start the to-be-returned traceback is determined
-
-            this allows cutting the first part of a Traceback instance e.g.
-            for formatting reasons (removing some uninteresting bits that deal
-            with handling of the exception/traceback)
-        """
-        for x in self:
-            code = x.frame.code
-            codepath = code.path
-            if ((path is None or codepath == path) and
-                (excludepath is None or not hasattr(codepath, 'relto') or
-                 not codepath.relto(excludepath)) and
-                (lineno is None or x.lineno == lineno) and
-                (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
-                return Traceback(x._rawentry)
-        return self
-
-    def __getitem__(self, key):
-        val = super(Traceback, self).__getitem__(key)
-        if isinstance(key, type(slice(0))):
-            val = self.__class__(val)
-        return val
-
-    def filter(self, fn=lambda x: not x.ishidden()):
-        """ return a Traceback instance with certain items removed
-
-            fn is a function that gets a single argument, a TracebackItem
-            instance, and should return True when the item should be added
-            to the Traceback, False when not
-
-            by default this removes all the TracebackItems which are hidden
-            (see ishidden() above)
-        """
-        return Traceback(filter(fn, self))
-
-    def getcrashentry(self):
-        """ return last non-hidden traceback entry that lead
-        to the exception of a traceback.
-        """
-        for i in range(-1, -len(self)-1, -1):
-            entry = self[i]
-            if not entry.ishidden():
-                return entry
-        return self[-1]
-
-    def recursionindex(self):
-        """ return the index of the frame/TracebackItem where recursion
-            originates if appropriate, None if no recursion occurred
-        """
-        cache = {}
-        for i, entry in enumerate(self):
-            # id for the code.raw is needed to work around
-            # the strange metaprogramming in the decorator lib from pypi
-            # which generates code objects that have hash/value equality
-            #XXX needs a test
-            key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
-            #print "checking for recursion at", key
-            l = cache.setdefault(key, [])
-            if l:
-                f = entry.frame
-                loc = f.f_locals
-                for otherloc in l:
-                    if f.is_true(f.eval(co_equal,
-                        __recursioncache_locals_1=loc,
-                        __recursioncache_locals_2=otherloc)):
-                        return i
-            l.append(entry.frame.f_locals)
-        return None
-
-co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
-                   '?', 'eval')
-
-class ExceptionInfo(object):
-    """ wraps sys.exc_info() objects and offers
-        help for navigating the traceback.
-    """
-    _striptext = ''
-    def __init__(self, tup=None, exprinfo=None):
-        if tup is None:
-            tup = sys.exc_info()
-            if exprinfo is None and isinstance(tup[1], AssertionError):
-                exprinfo = getattr(tup[1], 'msg', None)
-                if exprinfo is None:
-                    exprinfo = str(tup[1])
-                if exprinfo and exprinfo.startswith('assert '):
-                    self._striptext = 'AssertionError: '
-        self._excinfo = tup
-        #: the exception class
-        self.type = tup[0]
-        #: the exception instance
-        self.value = tup[1]
-        #: the exception raw traceback
-        self.tb = tup[2]
-        #: the exception type name
-        self.typename = self.type.__name__
-        #: the exception traceback (py.code.Traceback instance)
-        self.traceback = py.code.Traceback(self.tb)
-
-    def __repr__(self):
-        return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
-
-    def exconly(self, tryshort=False):
-        """ return the exception as a string
-
-            when 'tryshort' resolves to True, and the exception is a
-            py.code._AssertionError, only the actual exception part of
-            the exception representation is returned (so 'AssertionError: ' is
-            removed from the beginning)
-        """
-        lines = format_exception_only(self.type, self.value)
-        text = ''.join(lines)
-        text = text.rstrip()
-        if tryshort:
-            if text.startswith(self._striptext):
-                text = text[len(self._striptext):]
-        return text
-
-    def errisinstance(self, exc):
-        """ return True if the exception is an instance of exc """
-        return isinstance(self.value, exc)
-
-    def _getreprcrash(self):
-        exconly = self.exconly(tryshort=True)
-        entry = self.traceback.getcrashentry()
-        path, lineno = entry.frame.code.raw.co_filename, entry.lineno
-        return ReprFileLocation(path, lineno+1, exconly)
-
-    def getrepr(self, showlocals=False, style="long",
-            abspath=False, tbfilter=True, funcargs=False):
-        """ return str()able representation of this exception info.
-            showlocals: show locals per traceback entry
-            style: long|short|no|native traceback style
-            tbfilter: hide entries (where __tracebackhide__ is true)
-
-            in case of style==native, tbfilter and showlocals is ignored.
-        """
-        if style == 'native':
-            return ReprExceptionInfo(ReprTracebackNative(
-                py.std.traceback.format_exception(
-                    self.type,
-                    self.value,
-                    self.traceback[0]._rawentry,
-                )), self._getreprcrash())
-
-        fmt = FormattedExcinfo(showlocals=showlocals, style=style,
-            abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
-        return fmt.repr_excinfo(self)
-
-    def __str__(self):
-        entry = self.traceback[-1]
-        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
-        return str(loc)
-
-    def __unicode__(self):
-        entry = self.traceback[-1]
-        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
-        return unicode(loc)
-
-
-class FormattedExcinfo(object):
-    """ presenting information about failing Functions and Generators. """
-    # for traceback entries
-    flow_marker = ">"
-    fail_marker = "E"
-
-    def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
-        self.showlocals = showlocals
-        self.style = style
-        self.tbfilter = tbfilter
-        self.funcargs = funcargs
-        self.abspath = abspath
-        self.astcache = {}
-
-    def _getindent(self, source):
-        # figure out indent for given source
-        try:
-            s = str(source.getstatement(len(source)-1))
-        except KeyboardInterrupt:
-            raise
-        except:
-            try:
-                s = str(source[-1])
-            except KeyboardInterrupt:
-                raise
-            except:
-                return 0
-        return 4 + (len(s) - len(s.lstrip()))
-
-    def _getentrysource(self, entry):
-        source = entry.getsource(self.astcache)
-        if source is not None:
-            source = source.deindent()
-        return source
-
-    def _saferepr(self, obj):
-        return py.io.saferepr(obj)
-
-    def repr_args(self, entry):
-        if self.funcargs:
-            args = []
-            for argname, argvalue in entry.frame.getargs(var=True):
-                args.append((argname, self._saferepr(argvalue)))
-            return ReprFuncArgs(args)
-
-    def get_source(self, source, line_index=-1, excinfo=None, short=False):
-        """ return formatted and marked up source lines. """
-        lines = []
-        if source is None or line_index >= len(source.lines):
-            source = py.code.Source("???")
-            line_index = 0
-        if line_index < 0:
-            line_index += len(source)
-        space_prefix = "    "
-        if short:
-            lines.append(space_prefix + source.lines[line_index].strip())
-        else:
-            for line in source.lines[:line_index]:
-                lines.append(space_prefix + line)
-            lines.append(self.flow_marker + "   " + source.lines[line_index])
-            for line in source.lines[line_index+1:]:
-                lines.append(space_prefix + line)
-        if excinfo is not None:
-            indent = 4 if short else self._getindent(source)
-            lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
-        return lines
-
-    def get_exconly(self, excinfo, indent=4, markall=False):
-        lines = []
-        indent = " " * indent
-        # get the real exception information out
-        exlines = excinfo.exconly(tryshort=True).split('\n')
-        failindent = self.fail_marker + indent[1:]
-        for line in exlines:
-            lines.append(failindent + line)
-            if not markall:
-                failindent = indent
-        return lines
-
-    def repr_locals(self, locals):
-        if self.showlocals:
-            lines = []
-            keys = [loc for loc in locals if loc[0] != "@"]
-            keys.sort()
-            for name in keys:
-                value = locals[name]
-                if name == '__builtins__':
-                    lines.append("__builtins__ = <builtins>")
-                else:
-                    # This formatting could all be handled by the
-                    # _repr() function, which is only reprlib.Repr in
-                    # disguise, so is very configurable.
-                    str_repr = self._saferepr(value)
-                    #if len(str_repr) < 70 or not isinstance(value,
-                    #                            (list, tuple, dict)):
-                    lines.append("%-10s = %s" %(name, str_repr))
-                    #else:
-                    #    self._line("%-10s =\\" % (name,))
-                    #    # XXX
-                    #    py.std.pprint.pprint(value, stream=self.excinfowriter)
-            return ReprLocals(lines)
-
-    def repr_traceback_entry(self, entry, excinfo=None):
-        source = self._getentrysource(entry)
-        if source is None:
-            source = py.code.Source("???")
-            line_index = 0
-        else:
-            # entry.getfirstlinesource() can be -1, should be 0 on jython
-            line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
-
-        lines = []
-        style = entry._repr_style
-        if style is None:
-            style = self.style
-        if style in ("short", "long"):
-            short = style == "short"
-            reprargs = self.repr_args(entry) if not short else None
-            s = self.get_source(source, line_index, excinfo, short=short)
-            lines.extend(s)
-            if short:
-                message = "in %s" %(entry.name)
-            else:
-                message = excinfo and excinfo.typename or ""
-            path = self._makepath(entry.path)
-            filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
-            localsrepr = None
-            if not short:
-                localsrepr =  self.repr_locals(entry.locals)
-            return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
-        if excinfo:
-            lines.extend(self.get_exconly(excinfo, indent=4))
-        return ReprEntry(lines, None, None, None, style)
-
-    def _makepath(self, path):
-        if not self.abspath:
-            try:
-                np = py.path.local().bestrelpath(path)
-            except OSError:
-                return path
-            if len(np) < len(str(path)):
-                path = np
-        return path
-
-    def repr_traceback(self, excinfo):
-        traceback = excinfo.traceback
-        if self.tbfilter:
-            traceback = traceback.filter()
-        recursionindex = None
-        if excinfo.errisinstance(RuntimeError):
-            if "maximum recursion depth exceeded" in str(excinfo.value):
-                recursionindex = traceback.recursionindex()
-        last = traceback[-1]
-        entries = []
-        extraline = None
-        for index, entry in enumerate(traceback):
-            einfo = (last == entry) and excinfo or None
-            reprentry = self.repr_traceback_entry(entry, einfo)
-            entries.append(reprentry)
-            if index == recursionindex:
-                extraline = "!!! Recursion detected (same locals & position)"
-                break
-        return ReprTraceback(entries, extraline, style=self.style)
-
-    def repr_excinfo(self, excinfo):
-        reprtraceback = self.repr_traceback(excinfo)
-        reprcrash = excinfo._getreprcrash()
-        return ReprExceptionInfo(reprtraceback, reprcrash)
-
-class TerminalRepr:
-    def __str__(self):
-        s = self.__unicode__()
-        if sys.version_info[0] < 3:
-            s = s.encode('utf-8')
-        return s
-
-    def __unicode__(self):
-        # FYI this is called from pytest-xdist's serialization of exception
-        # information.
-        io = py.io.TextIO()
-        tw = py.io.TerminalWriter(file=io)
-        self.toterminal(tw)
-        return io.getvalue().strip()
-
-    def __repr__(self):
-        return "<%s instance at %0x>" %(self.__class__, id(self))
-
-
-class ReprExceptionInfo(TerminalRepr):
-    def __init__(self, reprtraceback, reprcrash):
-        self.reprtraceback = reprtraceback
-        self.reprcrash = reprcrash
-        self.sections = []
-
-    def addsection(self, name, content, sep="-"):
-        self.sections.append((name, content, sep))
-
-    def toterminal(self, tw):
-        self.reprtraceback.toterminal(tw)
-        for name, content, sep in self.sections:
-            tw.sep(sep, name)
-            tw.line(content)
-
-class ReprTraceback(TerminalRepr):
-    entrysep = "_ "
-
-    def __init__(self, reprentries, extraline, style):
-        self.reprentries = reprentries
-        self.extraline = extraline
-        self.style = style
-
-    def toterminal(self, tw):
-        # the entries might have different styles
-        last_style = None
-        for i, entry in enumerate(self.reprentries):
-            if entry.style == "long":
-                tw.line("")
-            entry.toterminal(tw)
-            if i < len(self.reprentries) - 1:
-                next_entry = self.reprentries[i+1]
-                if entry.style == "long" or \
-                   entry.style == "short" and next_entry.style == "long":
-                    tw.sep(self.entrysep)
-
-        if self.extraline:
-            tw.line(self.extraline)
-
-class ReprTracebackNative(ReprTraceback):
-    def __init__(self, tblines):
-        self.style = "native"
-        self.reprentries = [ReprEntryNative(tblines)]
-        self.extraline = None
-
-class ReprEntryNative(TerminalRepr):
-    style = "native"
-
-    def __init__(self, tblines):
-        self.lines = tblines
-
-    def toterminal(self, tw):
-        tw.write("".join(self.lines))
-
-class ReprEntry(TerminalRepr):
-    localssep = "_ "
-
-    def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
-        self.lines = lines
-        self.reprfuncargs = reprfuncargs
-        self.reprlocals = reprlocals
-        self.reprfileloc = filelocrepr
-        self.style = style
-
-    def toterminal(self, tw):
-        if self.style == "short":
-            self.reprfileloc.toterminal(tw)
-            for line in self.lines:
-                red = line.startswith("E   ")
-                tw.line(line, bold=True, red=red)
-            #tw.line("")
-            return
-        if self.reprfuncargs:
-            self.reprfuncargs.toterminal(tw)
-        for line in self.lines:
-            red = line.startswith("E   ")
-            tw.line(line, bold=True, red=red)
-        if self.reprlocals:
-            #tw.sep(self.localssep, "Locals")
-            tw.line("")
-            self.reprlocals.toterminal(tw)
-        if self.reprfileloc:
-            if self.lines:
-                tw.line("")
-            self.reprfileloc.toterminal(tw)
-
-    def __str__(self):
-        return "%s\n%s\n%s" % ("\n".join(self.lines),
-                               self.reprlocals,
-                               self.reprfileloc)
-
-class ReprFileLocation(TerminalRepr):
-    def __init__(self, path, lineno, message):
-        self.path = str(path)
-        self.lineno = lineno
-        self.message = message
-
-    def toterminal(self, tw):
-        # filename and lineno output for each entry,
-        # using an output format that most editors unterstand
-        msg = self.message
-        i = msg.find("\n")
-        if i != -1:
-            msg = msg[:i]
-        tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
-
-class ReprLocals(TerminalRepr):
-    def __init__(self, lines):
-        self.lines = lines
-
-    def toterminal(self, tw):
-        for line in self.lines:
-            tw.line(line)
-
-class ReprFuncArgs(TerminalRepr):
-    def __init__(self, args):
-        self.args = args
-
-    def toterminal(self, tw):
-        if self.args:
-            linesofar = ""
-            for name, value in self.args:
-                ns = "%s = %s" %(name, value)
-                if len(ns) + len(linesofar) + 2 > tw.fullwidth:
-                    if linesofar:
-                        tw.line(linesofar)
-                    linesofar =  ns
-                else:
-                    if linesofar:
-                        linesofar += ", " + ns
-                    else:
-                        linesofar = ns
-            if linesofar:
-                tw.line(linesofar)
-            tw.line("")
-
-
-
-oldbuiltins = {}
-
-def patch_builtins(assertion=True, compile=True):
-    """ put compile and AssertionError builtins to Python's builtins. """
-    if assertion:
-        from py._code import assertion
-        l = oldbuiltins.setdefault('AssertionError', [])
-        l.append(py.builtin.builtins.AssertionError)
-        py.builtin.builtins.AssertionError = assertion.AssertionError
-    if compile:
-        l = oldbuiltins.setdefault('compile', [])
-        l.append(py.builtin.builtins.compile)
-        py.builtin.builtins.compile = py.code.compile
-
-def unpatch_builtins(assertion=True, compile=True):
-    """ remove compile and AssertionError builtins from Python builtins. """
-    if assertion:
-        py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
-    if compile:
-        py.builtin.builtins.compile = oldbuiltins['compile'].pop()
-
-def getrawcode(obj, trycall=True):
-    """ return code object for given function. """
-    try:
-        return obj.__code__
-    except AttributeError:
-        obj = getattr(obj, 'im_func', obj)
-        obj = getattr(obj, 'func_code', obj)
-        obj = getattr(obj, 'f_code', obj)
-        obj = getattr(obj, '__code__', obj)
-        if trycall and not hasattr(obj, 'co_firstlineno'):
-            if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
-                x = getrawcode(obj.__call__, trycall=False)
-                if hasattr(x, 'co_firstlineno'):
-                    return x
-        return obj
-
+import py
+import sys
+from inspect import CO_VARARGS, CO_VARKEYWORDS
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+if sys.version_info[0] >= 3:
+    from traceback import format_exception_only
+else:
+    from py._code._py2traceback import format_exception_only
+
+class Code(object):
+    """ wrapper around Python code objects """
+    def __init__(self, rawcode):
+        if not hasattr(rawcode, "co_filename"):
+            rawcode = py.code.getrawcode(rawcode)
+        try:
+            self.filename = rawcode.co_filename
+            self.firstlineno = rawcode.co_firstlineno - 1
+            self.name = rawcode.co_name
+        except AttributeError:
+            raise TypeError("not a code object: %r" %(rawcode,))
+        self.raw = rawcode
+
+    def __eq__(self, other):
+        return self.raw == other.raw
+
+    def __ne__(self, other):
+        return not self == other
+
+    @property
+    def path(self):
+        """ return a path object pointing to source code (note that it
+        might not point to an actually existing file). """
+        p = py.path.local(self.raw.co_filename)
+        # maybe don't try this checking
+        if not p.check():
+            # XXX maybe try harder like the weird logic
+            # in the standard lib [linecache.updatecache] does?
+            p = self.raw.co_filename
+        return p
+
+    @property
+    def fullsource(self):
+        """ return a py.code.Source object for the full source file of the code
+        """
+        from py._code import source
+        full, _ = source.findsource(self.raw)
+        return full
+
+    def source(self):
+        """ return a py.code.Source object for the code object's source only
+        """
+        # return source only for that part of code
+        return py.code.Source(self.raw)
+
+    def getargs(self, var=False):
+        """ return a tuple with the argument names for the code object
+
+            if 'var' is set True also return the names of the variable and
+            keyword arguments when present
+        """
+        # handfull shortcut for getting args
+        raw = self.raw
+        argcount = raw.co_argcount
+        if var:
+            argcount += raw.co_flags & CO_VARARGS
+            argcount += raw.co_flags & CO_VARKEYWORDS
+        return raw.co_varnames[:argcount]
+
+class Frame(object):
+    """Wrapper around a Python frame holding f_locals and f_globals
+    in which expressions can be evaluated."""
+
+    def __init__(self, frame):
+        self.lineno = frame.f_lineno - 1
+        self.f_globals = frame.f_globals
+        self.f_locals = frame.f_locals
+        self.raw = frame
+        self.code = py.code.Code(frame.f_code)
+
+    @property
+    def statement(self):
+        """ statement this frame is at """
+        if self.code.fullsource is None:
+            return py.code.Source("")
+        return self.code.fullsource.getstatement(self.lineno)
+
+    def eval(self, code, **vars):
+        """ evaluate 'code' in the frame
+
+            'vars' are optional additional local variables
+
+            returns the result of the evaluation
+        """
+        f_locals = self.f_locals.copy()
+        f_locals.update(vars)
+        return eval(code, self.f_globals, f_locals)
+
+    def exec_(self, code, **vars):
+        """ exec 'code' in the frame
+
+            'vars' are optiona; additional local variables
+        """
+        f_locals = self.f_locals.copy()
+        f_locals.update(vars)
+        py.builtin.exec_(code, self.f_globals, f_locals )
+
+    def repr(self, object):
+        """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+        """
+        return py.io.saferepr(object)
+
+    def is_true(self, object):
+        return object
+
+    def getargs(self, var=False):
+        """ return a list of tuples (name, value) for all arguments
+
+            if 'var' is set True also include the variable and keyword
+            arguments when present
+        """
+        retval = []
+        for arg in self.code.getargs(var):
+            try:
+                retval.append((arg, self.f_locals[arg]))
+            except KeyError:
+                pass     # this can occur when using Psyco
+        return retval
+
+class TracebackEntry(object):
+    """ a single entry in a traceback """
+
+    _repr_style = None
+    exprinfo = None
+
+    def __init__(self, rawentry):
+        self._rawentry = rawentry
+        self.lineno = rawentry.tb_lineno - 1
+
+    def set_repr_style(self, mode):
+        assert mode in ("short", "long")
+        self._repr_style = mode
+
+    @property
+    def frame(self):
+        return py.code.Frame(self._rawentry.tb_frame)
+
+    @property
+    def relline(self):
+        return self.lineno - self.frame.code.firstlineno
+
+    def __repr__(self):
+        return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+    @property
+    def statement(self):
+        """ py.code.Source object for the current statement """
+        source = self.frame.code.fullsource
+        return source.getstatement(self.lineno)
+
+    @property
+    def path(self):
+        """ path to the source code """
+        return self.frame.code.path
+
+    def getlocals(self):
+        return self.frame.f_locals
+    locals = property(getlocals, None, None, "locals of underlaying frame")
+
+    def reinterpret(self):
+        """Reinterpret the failing statement and returns a detailed information
+           about what operations are performed."""
+        if self.exprinfo is None:
+            source = str(self.statement).strip()
+            x = py.code._reinterpret(source, self.frame, should_fail=True)
+            if not isinstance(x, str):
+                raise TypeError("interpret returned non-string %r" % (x,))
+            self.exprinfo = x
+        return self.exprinfo
+
+    def getfirstlinesource(self):
+        # on Jython this firstlineno can be -1 apparently
+        return max(self.frame.code.firstlineno, 0)
+
+    def getsource(self, astcache=None):
+        """ return failing source code. """
+        # we use the passed in astcache to not reparse asttrees
+        # within exception info printing
+        from py._code.source import getstatementrange_ast
+        source = self.frame.code.fullsource
+        if source is None:
+            return None
+        key = astnode = None
+        if astcache is not None:
+            key = self.frame.code.path
+            if key is not None:
+                astnode = astcache.get(key, None)
+        start = self.getfirstlinesource()
+        try:
+            astnode, _, end = getstatementrange_ast(self.lineno, source,
+                                                    astnode=astnode)
+        except SyntaxError:
+            end = self.lineno + 1
+        else:
+            if key is not None:
+                astcache[key] = astnode
+        return source[start:end]
+
+    source = property(getsource)
+
+    def ishidden(self):
+        """ return True if the current frame has a var __tracebackhide__
+            resolving to True
+
+            mostly for internal use
+        """
+        try:
+            return self.frame.f_locals['__tracebackhide__']
+        except KeyError:
+            try:
+                return self.frame.f_globals['__tracebackhide__']
+            except KeyError:
+                return False
+
+    def __str__(self):
+        try:
+            fn = str(self.path)
+        except py.error.Error:
+            fn = '???'
+        name = self.frame.code.name
+        try:
+            line = str(self.statement).lstrip()
+        except KeyboardInterrupt:
+            raise
+        except:
+            line = "???"
+        return "  File %r:%d in %s\n  %s\n" %(fn, self.lineno+1, name, line)
+
+    def name(self):
+        return self.frame.code.raw.co_name
+    name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+    """ Traceback objects encapsulate and offer higher level
+        access to Traceback entries.
+    """
+    Entry = TracebackEntry
+    def __init__(self, tb):
+        """ initialize from given python traceback object. """
+        if hasattr(tb, 'tb_next'):
+            def f(cur):
+                while cur is not None:
+                    yield self.Entry(cur)
+                    cur = cur.tb_next
+            list.__init__(self, f(tb))
+        else:
+            list.__init__(self, tb)
+
+    def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+        """ return a Traceback instance wrapping part of this Traceback
+
+            by provding any combination of path, lineno and firstlineno, the
+            first frame to start the to-be-returned traceback is determined
+
+            this allows cutting the first part of a Traceback instance e.g.
+            for formatting reasons (removing some uninteresting bits that deal
+            with handling of the exception/traceback)
+        """
+        for x in self:
+            code = x.frame.code
+            codepath = code.path
+            if ((path is None or codepath == path) and
+                (excludepath is None or not hasattr(codepath, 'relto') or
+                 not codepath.relto(excludepath)) and
+                (lineno is None or x.lineno == lineno) and
+                (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+                return Traceback(x._rawentry)
+        return self
+
+    def __getitem__(self, key):
+        val = super(Traceback, self).__getitem__(key)
+        if isinstance(key, type(slice(0))):
+            val = self.__class__(val)
+        return val
+
+    def filter(self, fn=lambda x: not x.ishidden()):
+        """ return a Traceback instance with certain items removed
+
+            fn is a function that gets a single argument, a TracebackItem
+            instance, and should return True when the item should be added
+            to the Traceback, False when not
+
+            by default this removes all the TracebackItems which are hidden
+            (see ishidden() above)
+        """
+        return Traceback(filter(fn, self))
+
+    def getcrashentry(self):
+        """ return last non-hidden traceback entry that lead
+        to the exception of a traceback.
+        """
+        for i in range(-1, -len(self)-1, -1):
+            entry = self[i]
+            if not entry.ishidden():
+                return entry
+        return self[-1]
+
+    def recursionindex(self):
+        """ return the index of the frame/TracebackItem where recursion
+            originates if appropriate, None if no recursion occurred
+        """
+        cache = {}
+        for i, entry in enumerate(self):
+            # id for the code.raw is needed to work around
+            # the strange metaprogramming in the decorator lib from pypi
+            # which generates code objects that have hash/value equality
+            #XXX needs a test
+            key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
+            #print "checking for recursion at", key
+            l = cache.setdefault(key, [])
+            if l:
+                f = entry.frame
+                loc = f.f_locals
+                for otherloc in l:
+                    if f.is_true(f.eval(co_equal,
+                        __recursioncache_locals_1=loc,
+                        __recursioncache_locals_2=otherloc)):
+                        return i
+            l.append(entry.frame.f_locals)
+        return None
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+                   '?', 'eval')
+
+class ExceptionInfo(object):
+    """ wraps sys.exc_info() objects and offers
+        help for navigating the traceback.
+    """
+    _striptext = ''
+    def __init__(self, tup=None, exprinfo=None):
+        if tup is None:
+            tup = sys.exc_info()
+            if exprinfo is None and isinstance(tup[1], AssertionError):
+                exprinfo = getattr(tup[1], 'msg', None)
+                if exprinfo is None:
+                    exprinfo = str(tup[1])
+                if exprinfo and exprinfo.startswith('assert '):
+                    self._striptext = 'AssertionError: '
+        self._excinfo = tup
+        #: the exception class
+        self.type = tup[0]
+        #: the exception instance
+        self.value = tup[1]
+        #: the exception raw traceback
+        self.tb = tup[2]
+        #: the exception type name
+        self.typename = self.type.__name__
+        #: the exception traceback (py.code.Traceback instance)
+        self.traceback = py.code.Traceback(self.tb)
+
+    def __repr__(self):
+        return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+    def exconly(self, tryshort=False):
+        """ return the exception as a string
+
+            when 'tryshort' resolves to True, and the exception is a
+            py.code._AssertionError, only the actual exception part of
+            the exception representation is returned (so 'AssertionError: ' is
+            removed from the beginning)
+        """
+        lines = format_exception_only(self.type, self.value)
+        text = ''.join(lines)
+        text = text.rstrip()
+        if tryshort:
+            if text.startswith(self._striptext):
+                text = text[len(self._striptext):]
+        return text
+
+    def errisinstance(self, exc):
+        """ return True if the exception is an instance of exc """
+        return isinstance(self.value, exc)
+
+    def _getreprcrash(self):
+        exconly = self.exconly(tryshort=True)
+        entry = self.traceback.getcrashentry()
+        path, lineno = entry.frame.code.raw.co_filename, entry.lineno
+        return ReprFileLocation(path, lineno+1, exconly)
+
+    def getrepr(self, showlocals=False, style="long",
+            abspath=False, tbfilter=True, funcargs=False):
+        """ return str()able representation of this exception info.
+            showlocals: show locals per traceback entry
+            style: long|short|no|native traceback style
+            tbfilter: hide entries (where __tracebackhide__ is true)
+
+            in case of style==native, tbfilter and showlocals is ignored.
+        """
+        if style == 'native':
+            return ReprExceptionInfo(ReprTracebackNative(
+                py.std.traceback.format_exception(
+                    self.type,
+                    self.value,
+                    self.traceback[0]._rawentry,
+                )), self._getreprcrash())
+
+        fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+            abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+        return fmt.repr_excinfo(self)
+
+    def __str__(self):
+        entry = self.traceback[-1]
+        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+        return str(loc)
+
+    def __unicode__(self):
+        entry = self.traceback[-1]
+        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+        return loc.__unicode__()
+
+
+class FormattedExcinfo(object):
+    """ presenting information about failing Functions and Generators. """
+    # for traceback entries
+    flow_marker = ">"
+    fail_marker = "E"
+
+    def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+        self.showlocals = showlocals
+        self.style = style
+        self.tbfilter = tbfilter
+        self.funcargs = funcargs
+        self.abspath = abspath
+        self.astcache = {}
+
+    def _getindent(self, source):
+        # figure out indent for given source
+        try:
+            s = str(source.getstatement(len(source)-1))
+        except KeyboardInterrupt:
+            raise
+        except:
+            try:
+                s = str(source[-1])
+            except KeyboardInterrupt:
+                raise
+            except:
+                return 0
+        return 4 + (len(s) - len(s.lstrip()))
+
+    def _getentrysource(self, entry):
+        source = entry.getsource(self.astcache)
+        if source is not None:
+            source = source.deindent()
+        return source
+
+    def _saferepr(self, obj):
+        return py.io.saferepr(obj)
+
+    def repr_args(self, entry):
+        if self.funcargs:
+            args = []
+            for argname, argvalue in entry.frame.getargs(var=True):
+                args.append((argname, self._saferepr(argvalue)))
+            return ReprFuncArgs(args)
+
+    def get_source(self, source, line_index=-1, excinfo=None, short=False):
+        """ return formatted and marked up source lines. """
+        lines = []
+        if source is None or line_index >= len(source.lines):
+            source = py.code.Source("???")
+            line_index = 0
+        if line_index < 0:
+            line_index += len(source)
+        space_prefix = "    "
+        if short:
+            lines.append(space_prefix + source.lines[line_index].strip())
+        else:
+            for line in source.lines[:line_index]:
+                lines.append(space_prefix + line)
+            lines.append(self.flow_marker + "   " + source.lines[line_index])
+            for line in source.lines[line_index+1:]:
+                lines.append(space_prefix + line)
+        if excinfo is not None:
+            indent = 4 if short else self._getindent(source)
+            lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+        return lines
+
+    def get_exconly(self, excinfo, indent=4, markall=False):
+        lines = []
+        indent = " " * indent
+        # get the real exception information out
+        exlines = excinfo.exconly(tryshort=True).split('\n')
+        failindent = self.fail_marker + indent[1:]
+        for line in exlines:
+            lines.append(failindent + line)
+            if not markall:
+                failindent = indent
+        return lines
+
+    def repr_locals(self, locals):
+        if self.showlocals:
+            lines = []
+            keys = [loc for loc in locals if loc[0] != "@"]
+            keys.sort()
+            for name in keys:
+                value = locals[name]
+                if name == '__builtins__':
+                    lines.append("__builtins__ = <builtins>")
+                else:
+                    # This formatting could all be handled by the
+                    # _repr() function, which is only reprlib.Repr in
+                    # disguise, so is very configurable.
+                    str_repr = self._saferepr(value)
+                    #if len(str_repr) < 70 or not isinstance(value,
+                    #                            (list, tuple, dict)):
+                    lines.append("%-10s = %s" %(name, str_repr))
+                    #else:
+                    #    self._line("%-10s =\\" % (name,))
+                    #    # XXX
+                    #    py.std.pprint.pprint(value, stream=self.excinfowriter)
+            return ReprLocals(lines)
+
+    def repr_traceback_entry(self, entry, excinfo=None):
+        source = self._getentrysource(entry)
+        if source is None:
+            source = py.code.Source("???")
+            line_index = 0
+        else:
+            # entry.getfirstlinesource() can be -1, should be 0 on jython
+            line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+        lines = []
+        style = entry._repr_style
+        if style is None:
+            style = self.style
+        if style in ("short", "long"):
+            short = style == "short"
+            reprargs = self.repr_args(entry) if not short else None
+            s = self.get_source(source, line_index, excinfo, short=short)
+            lines.extend(s)
+            if short:
+                message = "in %s" %(entry.name)
+            else:
+                message = excinfo and excinfo.typename or ""
+            path = self._makepath(entry.path)
+            filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+            localsrepr = None
+            if not short:
+                localsrepr =  self.repr_locals(entry.locals)
+            return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
+        if excinfo:
+            lines.extend(self.get_exconly(excinfo, indent=4))
+        return ReprEntry(lines, None, None, None, style)
+
+    def _makepath(self, path):
+        if not self.abspath:
+            try:
+                np = py.path.local().bestrelpath(path)
+            except OSError:
+                return path
+            if len(np) < len(str(path)):
+                path = np
+        return path
+
+    def repr_traceback(self, excinfo):
+        traceback = excinfo.traceback
+        if self.tbfilter:
+            traceback = traceback.filter()
+        recursionindex = None
+        if excinfo.errisinstance(RuntimeError):
+            if "maximum recursion depth exceeded" in str(excinfo.value):
+                recursionindex = traceback.recursionindex()
+        last = traceback[-1]
+        entries = []
+        extraline = None
+        for index, entry in enumerate(traceback):
+            einfo = (last == entry) and excinfo or None
+            reprentry = self.repr_traceback_entry(entry, einfo)
+            entries.append(reprentry)
+            if index == recursionindex:
+                extraline = "!!! Recursion detected (same locals & position)"
+                break
+        return ReprTraceback(entries, extraline, style=self.style)
+
+    def repr_excinfo(self, excinfo):
+        reprtraceback = self.repr_traceback(excinfo)
+        reprcrash = excinfo._getreprcrash()
+        return ReprExceptionInfo(reprtraceback, reprcrash)
+
+class TerminalRepr:
+    def __str__(self):
+        s = self.__unicode__()
+        if sys.version_info[0] < 3:
+            s = s.encode('utf-8')
+        return s
+
+    def __unicode__(self):
+        # FYI this is called from pytest-xdist's serialization of exception
+        # information.
+        io = py.io.TextIO()
+        tw = py.io.TerminalWriter(file=io)
+        self.toterminal(tw)
+        return io.getvalue().strip()
+
+    def __repr__(self):
+        return "<%s instance at %0x>" %(self.__class__, id(self))
+
+
+class ReprExceptionInfo(TerminalRepr):
+    def __init__(self, reprtraceback, reprcrash):
+        self.reprtraceback = reprtraceback
+        self.reprcrash = reprcrash
+        self.sections = []
+
+    def addsection(self, name, content, sep="-"):
+        self.sections.append((name, content, sep))
+
+    def toterminal(self, tw):
+        self.reprtraceback.toterminal(tw)
+        for name, content, sep in self.sections:
+            tw.sep(sep, name)
+            tw.line(content)
+
+class ReprTraceback(TerminalRepr):
+    entrysep = "_ "
+
+    def __init__(self, reprentries, extraline, style):
+        self.reprentries = reprentries
+        self.extraline = extraline
+        self.style = style
+
+    def toterminal(self, tw):
+        # the entries might have different styles
+        last_style = None
+        for i, entry in enumerate(self.reprentries):
+            if entry.style == "long":
+                tw.line("")
+            entry.toterminal(tw)
+            if i < len(self.reprentries) - 1:
+                next_entry = self.reprentries[i+1]
+                if entry.style == "long" or \
+                   entry.style == "short" and next_entry.style == "long":
+                    tw.sep(self.entrysep)
+
+        if self.extraline:
+            tw.line(self.extraline)
+
+class ReprTracebackNative(ReprTraceback):
+    def __init__(self, tblines):
+        self.style = "native"
+        self.reprentries = [ReprEntryNative(tblines)]
+        self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+    style = "native"
+
+    def __init__(self, tblines):
+        self.lines = tblines
+
+    def toterminal(self, tw):
+        tw.write("".join(self.lines))
+
+class ReprEntry(TerminalRepr):
+    localssep = "_ "
+
+    def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
+        self.lines = lines
+        self.reprfuncargs = reprfuncargs
+        self.reprlocals = reprlocals
+        self.reprfileloc = filelocrepr
+        self.style = style
+
+    def toterminal(self, tw):
+        if self.style == "short":
+            self.reprfileloc.toterminal(tw)
+            for line in self.lines:
+                red = line.startswith("E   ")
+                tw.line(line, bold=True, red=red)
+            #tw.line("")
+            return
+        if self.reprfuncargs:
+            self.reprfuncargs.toterminal(tw)
+        for line in self.lines:
+            red = line.startswith("E   ")
+            tw.line(line, bold=True, red=red)
+        if self.reprlocals:
+            #tw.sep(self.localssep, "Locals")
+            tw.line("")
+            self.reprlocals.toterminal(tw)
+        if self.reprfileloc:
+            if self.lines:
+                tw.line("")
+            self.reprfileloc.toterminal(tw)
+
+    def __str__(self):
+        return "%s\n%s\n%s" % ("\n".join(self.lines),
+                               self.reprlocals,
+                               self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+    def __init__(self, path, lineno, message):
+        self.path = str(path)
+        self.lineno = lineno
+        self.message = message
+
+    def toterminal(self, tw):
+        # filename and lineno output for each entry,
+        # using an output format that most editors unterstand
+        msg = self.message
+        i = msg.find("\n")
+        if i != -1:
+            msg = msg[:i]
+        tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+    def __init__(self, lines):
+        self.lines = lines
+
+    def toterminal(self, tw):
+        for line in self.lines:
+            tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+    def __init__(self, args):
+        self.args = args
+
+    def toterminal(self, tw):
+        if self.args:
+            linesofar = ""
+            for name, value in self.args:
+                ns = "%s = %s" %(name, value)
+                if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+                    if linesofar:
+                        tw.line(linesofar)
+                    linesofar =  ns
+                else:
+                    if linesofar:
+                        linesofar += ", " + ns
+                    else:
+                        linesofar = ns
+            if linesofar:
+                tw.line(linesofar)
+            tw.line("")
+
+
+
+oldbuiltins = {}
+
+def patch_builtins(assertion=True, compile=True):
+    """ put compile and AssertionError builtins to Python's builtins. """
+    if assertion:
+        from py._code import assertion
+        l = oldbuiltins.setdefault('AssertionError', [])
+        l.append(py.builtin.builtins.AssertionError)
+        py.builtin.builtins.AssertionError = assertion.AssertionError
+    if compile:
+        l = oldbuiltins.setdefault('compile', [])
+        l.append(py.builtin.builtins.compile)
+        py.builtin.builtins.compile = py.code.compile
+
+def unpatch_builtins(assertion=True, compile=True):
+    """ remove compile and AssertionError builtins from Python builtins. """
+    if assertion:
+        py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
+    if compile:
+        py.builtin.builtins.compile = oldbuiltins['compile'].pop()
+
+def getrawcode(obj, trycall=True):
+    """ return code object for given function. """
+    try:
+        return obj.__code__
+    except AttributeError:
+        obj = getattr(obj, 'im_func', obj)
+        obj = getattr(obj, 'func_code', obj)
+        obj = getattr(obj, 'f_code', obj)
+        obj = getattr(obj, '__code__', obj)
+        if trycall and not hasattr(obj, 'co_firstlineno'):
+            if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+                x = getrawcode(obj.__call__, trycall=False)
+                if hasattr(x, 'co_firstlineno'):
+                    return x
+        return obj
+
--- a/third_party/python/py/py/_code/source.py
+++ b/third_party/python/py/py/_code/source.py
@@ -1,419 +1,411 @@
-from __future__ import generators
-
-from bisect import bisect_right
-import sys
-import inspect, tokenize
-import py
-from types import ModuleType
-cpy_compile = compile
-
-try:
-    import _ast
-    from _ast import PyCF_ONLY_AST as _AST_FLAG
-except ImportError:
-    _AST_FLAG = 0
-    _ast = None
-
-
-class Source(object):
-    """ a immutable object holding a source code fragment,
-        possibly deindenting it.
-    """
-    _compilecounter = 0
-    def __init__(self, *parts, **kwargs):
-        self.lines = lines = []
-        de = kwargs.get('deindent', True)
-        rstrip = kwargs.get('rstrip', True)
-        for part in parts:
-            if not part:
-                partlines = []
-            if isinstance(part, Source):
-                partlines = part.lines
-            elif isinstance(part, (tuple, list)):
-                partlines = [x.rstrip("\n") for x in part]
-            elif isinstance(part, py.builtin._basestring):
-                partlines = part.split('\n')
-                if rstrip:
-                    while partlines:
-                        if partlines[-1].strip():
-                            break
-                        partlines.pop()
-            else:
-                partlines = getsource(part, deindent=de).lines
-            if de:
-                partlines = deindent(partlines)
-            lines.extend(partlines)
-
-    def __eq__(self, other):
-        try:
-            return self.lines == other.lines
-        except AttributeError:
-            if isinstance(other, str):
-                return str(self) == other
-            return False
-
-    def __getitem__(self, key):
-        if isinstance(key, int):
-            return self.lines[key]
-        else:
-            if key.step not in (None, 1):
-                raise IndexError("cannot slice a Source with a step")
-            return self.__getslice__(key.start, key.stop)
-
-    def __len__(self):
-        return len(self.lines)
-
-    def __getslice__(self, start, end):
-        newsource = Source()
-        newsource.lines = self.lines[start:end]
-        return newsource
-
-    def strip(self):
-        """ return new source object with trailing
-            and leading blank lines removed.
-        """
-        start, end = 0, len(self)
-        while start < end and not self.lines[start].strip():
-            start += 1
-        while end > start and not self.lines[end-1].strip():
-            end -= 1
-        source = Source()
-        source.lines[:] = self.lines[start:end]
-        return source
-
-    def putaround(self, before='', after='', indent=' ' * 4):
-        """ return a copy of the source object with
-            'before' and 'after' wrapped around it.
-        """
-        before = Source(before)
-        after = Source(after)
-        newsource = Source()
-        lines = [ (indent + line) for line in self.lines]
-        newsource.lines = before.lines + lines +  after.lines
-        return newsource
-
-    def indent(self, indent=' ' * 4):
-        """ return a copy of the source object with
-            all lines indented by the given indent-string.
-        """
-        newsource = Source()
-        newsource.lines = [(indent+line) for line in self.lines]
-        return newsource
-
-    def getstatement(self, lineno, assertion=False):
-        """ return Source statement which contains the
-            given linenumber (counted from 0).
-        """
-        start, end = self.getstatementrange(lineno, assertion)
-        return self[start:end]
-
-    def getstatementrange(self, lineno, assertion=False):
-        """ return (start, end) tuple which spans the minimal
-            statement region which containing the given lineno.
-        """
-        if not (0 <= lineno < len(self)):
-            raise IndexError("lineno out of range")
-        ast, start, end = getstatementrange_ast(lineno, self)
-        return start, end
-
-    def deindent(self, offset=None):
-        """ return a new source object deindented by offset.
-            If offset is None then guess an indentation offset from
-            the first non-blank line.  Subsequent lines which have a
-            lower indentation offset will be copied verbatim as
-            they are assumed to be part of multilines.
-        """
-        # XXX maybe use the tokenizer to properly handle multiline
-        #     strings etc.pp?
-        newsource = Source()
-        newsource.lines[:] = deindent(self.lines, offset)
-        return newsource
-
-    def isparseable(self, deindent=True):
-        """ return True if source is parseable, heuristically
-            deindenting it by default.
-        """
-        try:
-            import parser
-        except ImportError:
-            syntax_checker = lambda x: compile(x, 'asd', 'exec')
-        else:
-            syntax_checker = parser.suite
-
-        if deindent:
-            source = str(self.deindent())
-        else:
-            source = str(self)
-        try:
-            #compile(source+'\n', "x", "exec")
-            syntax_checker(source+'\n')
-        except KeyboardInterrupt:
-            raise
-        except Exception:
-            return False
-        else:
-            return True
-
-    def __str__(self):
-        return "\n".join(self.lines)
-
-    def compile(self, filename=None, mode='exec',
-                flag=generators.compiler_flag,
-                dont_inherit=0, _genframe=None):
-        """ return compiled code object. if filename is None
-            invent an artificial filename which displays
-            the source/line position of the caller frame.
-        """
-        if not filename or py.path.local(filename).check(file=0):
-            if _genframe is None:
-                _genframe = sys._getframe(1) # the caller
-            fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
-            base = "<%d-codegen " % self._compilecounter
-            self.__class__._compilecounter += 1
-            if not filename:
-                filename = base + '%s:%d>' % (fn, lineno)
-            else:
-                filename = base + '%r %s:%d>' % (filename, fn, lineno)
-        source = "\n".join(self.lines) + '\n'
-        try:
-            co = cpy_compile(source, filename, mode, flag)
-        except SyntaxError:
-            ex = sys.exc_info()[1]
-            # re-represent syntax errors from parsing python strings
-            msglines = self.lines[:ex.lineno]
-            if ex.offset:
-                msglines.append(" "*ex.offset + '^')
-            msglines.append("(code was compiled probably from here: %s)" % filename)
-            newex = SyntaxError('\n'.join(msglines))
-            newex.offset = ex.offset
-            newex.lineno = ex.lineno
-            newex.text = ex.text
-            raise newex
-        else:
-            if flag & _AST_FLAG:
-                return co
-            lines = [(x + "\n") for x in self.lines]
-            if sys.version_info[0] >= 3:
-                # XXX py3's inspect.getsourcefile() checks for a module
-                # and a pep302 __loader__ ... we don't have a module
-                # at code compile-time so we need to fake it here
-                m = ModuleType("_pycodecompile_pseudo_module")
-                py.std.inspect.modulesbyfile[filename] = None
-                py.std.sys.modules[None] = m
-                m.__loader__ = 1
-            py.std.linecache.cache[filename] = (1, None, lines, filename)
-            return co
-
-#
-# public API shortcut functions
-#
-
-def compile_(source, filename=None, mode='exec', flags=
-            generators.compiler_flag, dont_inherit=0):
-    """ compile the given source to a raw code object,
-        and maintain an internal cache which allows later
-        retrieval of the source code for the code object
-        and any recursively created code objects.
-    """
-    if _ast is not None and isinstance(source, _ast.AST):
-        # XXX should Source support having AST?
-        return cpy_compile(source, filename, mode, flags, dont_inherit)
-    _genframe = sys._getframe(1) # the caller
-    s = Source(source)
-    co = s.compile(filename, mode, flags, _genframe=_genframe)
-    return co
-
-
-def getfslineno(obj):
-    """ Return source location (path, lineno) for the given object.
-    If the source cannot be determined return ("", -1)
-    """
-    try:
-        code = py.code.Code(obj)
-    except TypeError:
-        try:
-            fn = (py.std.inspect.getsourcefile(obj) or
-                  py.std.inspect.getfile(obj))
-        except TypeError:
-            return "", -1
-
-        fspath = fn and py.path.local(fn) or None
-        lineno = -1
-        if fspath:
-            try:
-                _, lineno = findsource(obj)
-            except IOError:
-                pass
-    else:
-        fspath = code.path
-        lineno = code.firstlineno
-    assert isinstance(lineno, int)
-    return fspath, lineno
-
-#
-# helper functions
-#
-
-def findsource(obj):
-    try:
-        sourcelines, lineno = py.std.inspect.findsource(obj)
-    except py.builtin._sysex:
-        raise
-    except:
-        return None, -1
-    source = Source()
-    source.lines = [line.rstrip() for line in sourcelines]
-    return source, lineno
-
-def getsource(obj, **kwargs):
-    obj = py.code.getrawcode(obj)
-    try:
-        strsrc = inspect.getsource(obj)
-    except IndentationError:
-        strsrc = "\"Buggy python version consider upgrading, cannot get source\""
-    assert isinstance(strsrc, str)
-    return Source(strsrc, **kwargs)
-
-def deindent(lines, offset=None):
-    if offset is None:
-        for line in lines:
-            line = line.expandtabs()
-            s = line.lstrip()
-            if s:
-                offset = len(line)-len(s)
-                break
-        else:
-            offset = 0
-    if offset == 0:
-        return list(lines)
-    newlines = []
-    def readline_generator(lines):
-        for line in lines:
-            yield line + '\n'
-        while True:
-            yield ''
-
-    it = readline_generator(lines)
-
-    try:
-        for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
-            if sline > len(lines):
-                break # End of input reached
-            if sline > len(newlines):
-                line = lines[sline - 1].expandtabs()
-                if line.lstrip() and line[:offset].isspace():
-                    line = line[offset:] # Deindent
-                newlines.append(line)
-
-            for i in range(sline, eline):
-                # Don't deindent continuing lines of
-                # multiline tokens (i.e. multiline strings)
-                newlines.append(lines[i])
-    except (IndentationError, tokenize.TokenError):
-        pass
-    # Add any lines we didn't see. E.g. if an exception was raised.
-    newlines.extend(lines[len(newlines):])
-    return newlines
-
-
-def get_statement_startend2(lineno, node):
-    import ast
-    # flatten all statements and except handlers into one lineno-list
-    # AST's line numbers start indexing at 1
-    l = []
-    for x in ast.walk(node):
-        if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
-            l.append(x.lineno - 1)
-            for name in "finalbody", "orelse":
-                val = getattr(x, name, None)
-                if val:
-                    # treat the finally/orelse part as its own statement
-                    l.append(val[0].lineno - 1 - 1)
-    l.sort()
-    insert_index = bisect_right(l, lineno)
-    start = l[insert_index - 1]
-    if insert_index >= len(l):
-        end = None
-    else:
-        end = l[insert_index]
-    return start, end
-
-
-def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
-    if astnode is None:
-        content = str(source)
-        if sys.version_info < (2,7):
-            content += "\n"
-        try:
-            astnode = compile(content, "source", "exec", 1024)  # 1024 for AST
-        except ValueError:
-            start, end = getstatementrange_old(lineno, source, assertion)
-            return None, start, end
-    start, end = get_statement_startend2(lineno, astnode)
-    # we need to correct the end:
-    # - ast-parsing strips comments
-    # - there might be empty lines
-    # - we might have lesser indented code blocks at the end
-    if end is None:
-        end = len(source.lines)
-
-    if end > start + 1:
-        # make sure we don't span differently indented code blocks
-        # by using the BlockFinder helper used which inspect.getsource() uses itself
-        block_finder = inspect.BlockFinder()
-        # if we start with an indented line, put blockfinder to "started" mode
-        block_finder.started = source.lines[start][0].isspace()
-        it = ((x + "\n") for x in source.lines[start:end])
-        try:
-            for tok in tokenize.generate_tokens(lambda: next(it)):
-                block_finder.tokeneater(*tok)
-        except (inspect.EndOfBlock, IndentationError):
-            end = block_finder.last + start
-        except Exception:
-            pass
-
-    # the end might still point to a comment or empty line, correct it
-    while end:
-        line = source.lines[end - 1].lstrip()
-        if line.startswith("#") or not line:
-            end -= 1
-        else:
-            break
-    return astnode, start, end
-
-
-def getstatementrange_old(lineno, source, assertion=False):
-    """ return (start, end) tuple which spans the minimal
-        statement region which containing the given lineno.
-        raise an IndexError if no such statementrange can be found.
-    """
-    # XXX this logic is only used on python2.4 and below
-    # 1. find the start of the statement
-    from codeop import compile_command
-    for start in range(lineno, -1, -1):
-        if assertion:
-            line = source.lines[start]
-            # the following lines are not fully tested, change with care
-            if 'super' in line and 'self' in line and '__init__' in line:
-                raise IndexError("likely a subclass")
-            if "assert" not in line and "raise" not in line:
-                continue
-        trylines = source.lines[start:lineno+1]
-        # quick hack to prepare parsing an indented line with
-        # compile_command() (which errors on "return" outside defs)
-        trylines.insert(0, 'def xxx():')
-        trysource = '\n '.join(trylines)
-        #              ^ space here
-        try:
-            compile_command(trysource)
-        except (SyntaxError, OverflowError, ValueError):
-            continue
-
-        # 2. find the end of the statement
-        for end in range(lineno+1, len(source)+1):
-            trysource = source[start:end]
-            if trysource.isparseable():
-                return start, end
-    raise SyntaxError("no valid source range around line %d " % (lineno,))
-
-
+from __future__ import generators
+
+from bisect import bisect_right
+import sys
+import inspect, tokenize
+import py
+from types import ModuleType
+cpy_compile = compile
+
+try:
+    import _ast
+    from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+    _AST_FLAG = 0
+    _ast = None
+
+
+class Source(object):
+    """ a immutable object holding a source code fragment,
+        possibly deindenting it.
+    """
+    _compilecounter = 0
+    def __init__(self, *parts, **kwargs):
+        self.lines = lines = []
+        de = kwargs.get('deindent', True)
+        rstrip = kwargs.get('rstrip', True)
+        for part in parts:
+            if not part:
+                partlines = []
+            if isinstance(part, Source):
+                partlines = part.lines
+            elif isinstance(part, (tuple, list)):
+                partlines = [x.rstrip("\n") for x in part]
+            elif isinstance(part, py.builtin._basestring):
+                partlines = part.split('\n')
+                if rstrip:
+                    while partlines:
+                        if partlines[-1].strip():
+                            break
+                        partlines.pop()
+            else:
+                partlines = getsource(part, deindent=de).lines
+            if de:
+                partlines = deindent(partlines)
+            lines.extend(partlines)
+
+    def __eq__(self, other):
+        try:
+            return self.lines == other.lines
+        except AttributeError:
+            if isinstance(other, str):
+                return str(self) == other
+            return False
+
+    def __getitem__(self, key):
+        if isinstance(key, int):
+            return self.lines[key]
+        else:
+            if key.step not in (None, 1):
+                raise IndexError("cannot slice a Source with a step")
+            return self.__getslice__(key.start, key.stop)
+
+    def __len__(self):
+        return len(self.lines)
+
+    def __getslice__(self, start, end):
+        newsource = Source()
+        newsource.lines = self.lines[start:end]
+        return newsource
+
+    def strip(self):
+        """ return new source object with trailing
+            and leading blank lines removed.
+        """
+        start, end = 0, len(self)
+        while start < end and not self.lines[start].strip():
+            start += 1
+        while end > start and not self.lines[end-1].strip():
+            end -= 1
+        source = Source()
+        source.lines[:] = self.lines[start:end]
+        return source
+
+    def putaround(self, before='', after='', indent=' ' * 4):
+        """ return a copy of the source object with
+            'before' and 'after' wrapped around it.
+        """
+        before = Source(before)
+        after = Source(after)
+        newsource = Source()
+        lines = [ (indent + line) for line in self.lines]
+        newsource.lines = before.lines + lines +  after.lines
+        return newsource
+
+    def indent(self, indent=' ' * 4):
+        """ return a copy of the source object with
+            all lines indented by the given indent-string.
+        """
+        newsource = Source()
+        newsource.lines = [(indent+line) for line in self.lines]
+        return newsource
+
+    def getstatement(self, lineno, assertion=False):
+        """ return Source statement which contains the
+            given linenumber (counted from 0).
+        """
+        start, end = self.getstatementrange(lineno, assertion)
+        return self[start:end]
+
+    def getstatementrange(self, lineno, assertion=False):
+        """ return (start, end) tuple which spans the minimal
+            statement region which containing the given lineno.
+        """
+        if not (0 <= lineno < len(self)):
+            raise IndexError("lineno out of range")
+        ast, start, end = getstatementrange_ast(lineno, self)
+        return start, end
+
+    def deindent(self, offset=None):
+        """ return a new source object deindented by offset.
+            If offset is None then guess an indentation offset from
+            the first non-blank line.  Subsequent lines which have a
+            lower indentation offset will be copied verbatim as
+            they are assumed to be part of multilines.
+        """
+        # XXX maybe use the tokenizer to properly handle multiline
+        #     strings etc.pp?
+        newsource = Source()
+        newsource.lines[:] = deindent(self.lines, offset)
+        return newsource
+
+    def isparseable(self, deindent=True):
+        """ return True if source is parseable, heuristically
+            deindenting it by default.
+        """
+        try:
+            import parser
+        except ImportError:
+            syntax_checker = lambda x: compile(x, 'asd', 'exec')
+        else:
+            syntax_checker = parser.suite
+
+        if deindent:
+            source = str(self.deindent())
+        else:
+            source = str(self)
+        try:
+            #compile(source+'\n', "x", "exec")
+            syntax_checker(source+'\n')
+        except KeyboardInterrupt:
+            raise
+        except Exception:
+            return False
+        else:
+            return True
+
+    def __str__(self):
+        return "\n".join(self.lines)
+
+    def compile(self, filename=None, mode='exec',
+                flag=generators.compiler_flag,
+                dont_inherit=0, _genframe=None):
+        """ return compiled code object. if filename is None
+            invent an artificial filename which displays
+            the source/line position of the caller frame.
+        """
+        if not filename or py.path.local(filename).check(file=0):
+            if _genframe is None:
+                _genframe = sys._getframe(1) # the caller
+            fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+            base = "<%d-codegen " % self._compilecounter
+            self.__class__._compilecounter += 1
+            if not filename:
+                filename = base + '%s:%d>' % (fn, lineno)
+            else:
+                filename = base + '%r %s:%d>' % (filename, fn, lineno)
+        source = "\n".join(self.lines) + '\n'
+        try:
+            co = cpy_compile(source, filename, mode, flag)
+        except SyntaxError:
+            ex = sys.exc_info()[1]
+            # re-represent syntax errors from parsing python strings
+            msglines = self.lines[:ex.lineno]
+            if ex.offset:
+                msglines.append(" "*ex.offset + '^')
+            msglines.append("(code was compiled probably from here: %s)" % filename)
+            newex = SyntaxError('\n'.join(msglines))
+            newex.offset = ex.offset
+            newex.lineno = ex.lineno
+            newex.text = ex.text
+            raise newex
+        else:
+            if flag & _AST_FLAG:
+                return co
+            lines = [(x + "\n") for x in self.lines]
+            py.std.linecache.cache[filename] = (1, None, lines, filename)
+            return co
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+            generators.compiler_flag, dont_inherit=0):
+    """ compile the given source to a raw code object,
+        and maintain an internal cache which allows later
+        retrieval of the source code for the code object
+        and any recursively created code objects.
+    """
+    if _ast is not None and isinstance(source, _ast.AST):
+        # XXX should Source support having AST?
+        return cpy_compile(source, filename, mode, flags, dont_inherit)
+    _genframe = sys._getframe(1) # the caller
+    s = Source(source)
+    co = s.compile(filename, mode, flags, _genframe=_genframe)
+    return co
+
+
+def getfslineno(obj):
+    """ Return source location (path, lineno) for the given object.
+    If the source cannot be determined return ("", -1)
+    """
+    try:
+        code = py.code.Code(obj)
+    except TypeError:
+        try:
+            fn = (py.std.inspect.getsourcefile(obj) or
+                  py.std.inspect.getfile(obj))
+        except TypeError:
+            return "", -1
+
+        fspath = fn and py.path.local(fn) or None
+        lineno = -1
+        if fspath:
+            try:
+                _, lineno = findsource(obj)
+            except IOError:
+                pass
+    else:
+        fspath = code.path
+        lineno = code.firstlineno
+    assert isinstance(lineno, int)
+    return fspath, lineno
+
+#
+# helper functions
+#
+
+def findsource(obj):
+    try:
+        sourcelines, lineno = py.std.inspect.findsource(obj)
+    except py.builtin._sysex:
+        raise
+    except:
+        return None, -1
+    source = Source()
+    source.lines = [line.rstrip() for line in sourcelines]
+    return source, lineno
+
+def getsource(obj, **kwargs):
+    obj = py.code.getrawcode(obj)
+    try:
+        strsrc = inspect.getsource(obj)
+    except IndentationError:
+        strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+    assert isinstance(strsrc, str)
+    return Source(strsrc, **kwargs)
+
+def deindent(lines, offset=None):
+    if offset is None:
+        for line in lines:
+            line = line.expandtabs()
+            s = line.lstrip()
+            if s:
+                offset = len(line)-len(s)
+                break
+        else:
+            offset = 0
+    if offset == 0:
+        return list(lines)
+    newlines = []
+    def readline_generator(lines):
+        for line in lines:
+            yield line + '\n'
+        while True:
+            yield ''
+
+    it = readline_generator(lines)
+
+    try:
+        for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
+            if sline > len(lines):
+                break # End of input reached
+            if sline > len(newlines):
+                line = lines[sline - 1].expandtabs()
+                if line.lstrip() and line[:offset].isspace():
+                    line = line[offset:] # Deindent
+                newlines.append(line)
+
+            for i in range(sline, eline):
+                # Don't deindent continuing lines of
+                # multiline tokens (i.e. multiline strings)
+                newlines.append(lines[i])
+    except (IndentationError, tokenize.TokenError):
+        pass
+    # Add any lines we didn't see. E.g. if an exception was raised.
+    newlines.extend(lines[len(newlines):])
+    return newlines
+
+
+def get_statement_startend2(lineno, node):
+    import ast
+    # flatten all statements and except handlers into one lineno-list
+    # AST's line numbers start indexing at 1
+    l = []
+    for x in ast.walk(node):
+        if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
+            l.append(x.lineno - 1)
+            for name in "finalbody", "orelse":
+                val = getattr(x, name, None)
+                if val:
+                    # treat the finally/orelse part as its own statement
+                    l.append(val[0].lineno - 1 - 1)
+    l.sort()
+    insert_index = bisect_right(l, lineno)
+    start = l[insert_index - 1]
+    if insert_index >= len(l):
+        end = None
+    else:
+        end = l[insert_index]
+    return start, end
+
+
+def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
+    if astnode is None:
+        content = str(source)
+        if sys.version_info < (2,7):
+            content += "\n"
+        try:
+            astnode = compile(content, "source", "exec", 1024)  # 1024 for AST
+        except ValueError:
+            start, end = getstatementrange_old(lineno, source, assertion)
+            return None, start, end
+    start, end = get_statement_startend2(lineno, astnode)
+    # we need to correct the end:
+    # - ast-parsing strips comments
+    # - there might be empty lines
+    # - we might have lesser indented code blocks at the end
+    if end is None:
+        end = len(source.lines)
+
+    if end > start + 1:
+        # make sure we don't span differently indented code blocks
+        # by using the BlockFinder helper used which inspect.getsource() uses itself
+        block_finder = inspect.BlockFinder()
+        # if we start with an indented line, put blockfinder to "started" mode
+        block_finder.started = source.lines[start][0].isspace()
+        it = ((x + "\n") for x in source.lines[start:end])
+        try:
+            for tok in tokenize.generate_tokens(lambda: next(it)):
+                block_finder.tokeneater(*tok)
+        except (inspect.EndOfBlock, IndentationError):
+            end = block_finder.last + start
+        except Exception:
+            pass
+
+    # the end might still point to a comment or empty line, correct it
+    while end:
+        line = source.lines[end - 1].lstrip()
+        if line.startswith("#") or not line:
+            end -= 1
+        else:
+            break
+    return astnode, start, end
+
+
+def getstatementrange_old(lineno, source, assertion=False):
+    """ return (start, end) tuple which spans the minimal
+        statement region which containing the given lineno.
+        raise an IndexError if no such statementrange can be found.
+    """
+    # XXX this logic is only used on python2.4 and below
+    # 1. find the start of the statement
+    from codeop import compile_command
+    for start in range(lineno, -1, -1):
+        if assertion:
+            line = source.lines[start]
+            # the following lines are not fully tested, change with care
+            if 'super' in line and 'self' in line and '__init__' in line:
+                raise IndexError("likely a subclass")
+            if "assert" not in line and "raise" not in line:
+                continue
+        trylines = source.lines[start:lineno+1]
+        # quick hack to prepare parsing an indented line with
+        # compile_command() (which errors on "return" outside defs)
+        trylines.insert(0, 'def xxx():')
+        trysource = '\n '.join(trylines)
+        #              ^ space here
+        try:
+            compile_command(trysource)
+        except (SyntaxError, OverflowError, ValueError):
+            continue
+
+        # 2. find the end of the statement
+        for end in range(lineno+1, len(source)+1):
+            trysource = source[start:end]
+            if trysource.isparseable():
+                return start, end
+    raise SyntaxError("no valid source range around line %d " % (lineno,))
+
+
--- a/third_party/python/py/py/_error.py
+++ b/third_party/python/py/py/_error.py
@@ -1,88 +1,89 @@
-"""
-create errno-specific classes for IO or os calls.
-
-"""
-import sys, os, errno
-
-class Error(EnvironmentError):
-    def __repr__(self):
-        return "%s.%s %r: %s " %(self.__class__.__module__,
-                               self.__class__.__name__,
-                               self.__class__.__doc__,
-                               " ".join(map(str, self.args)),
-                               #repr(self.args)
-                                )
-
-    def __str__(self):
-        s = "[%s]: %s" %(self.__class__.__doc__,
-                          " ".join(map(str, self.args)),
-                          )
-        return s
-
-_winerrnomap = {
-    2: errno.ENOENT,
-    3: errno.ENOENT,
-    17: errno.EEXIST,
-    13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
-    22: errno.ENOTDIR,
-    20: errno.ENOTDIR,
-    267: errno.ENOTDIR,
-    5: errno.EACCES,  # anything better?
-}
-
-class ErrorMaker(object):
-    """ lazily provides Exception classes for each possible POSIX errno
-        (as defined per the 'errno' module).  All such instances
-        subclass EnvironmentError.
-    """
-    Error = Error
-    _errno2class = {}
-
-    def __getattr__(self, name):
-        if name[0] == "_":
-            raise AttributeError(name)
-        eno = getattr(errno, name)
-        cls = self._geterrnoclass(eno)
-        setattr(self, name, cls)
-        return cls
-
-    def _geterrnoclass(self, eno):
-        try:
-            return self._errno2class[eno]
-        except KeyError:
-            clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
-            errorcls = type(Error)(clsname, (Error,),
-                    {'__module__':'py.error',
-                     '__doc__': os.strerror(eno)})
-            self._errno2class[eno] = errorcls
-            return errorcls
-
-    def checked_call(self, func, *args, **kwargs):
-        """ call a function and raise an errno-exception if applicable. """
-        __tracebackhide__ = True
-        try:
-            return func(*args, **kwargs)
-        except self.Error:
-            raise
-        except (OSError, EnvironmentError):
-            cls, value, tb = sys.exc_info()
-            if not hasattr(value, 'errno'):
-                raise
-            __tracebackhide__ = False
-            errno = value.errno
-            try:
-                if not isinstance(value, WindowsError):
-                    raise NameError
-            except NameError:
-                # we are not on Windows, or we got a proper OSError
-                cls = self._geterrnoclass(errno)
-            else:
-                try:
-                    cls = self._geterrnoclass(_winerrnomap[errno])
-                except KeyError:
-                    raise value
-            raise cls("%s%r" % (func.__name__, args))
-            __tracebackhide__ = True
-            
-
-error = ErrorMaker()
+"""
+create errno-specific classes for IO or os calls.
+
+"""
+import sys, os, errno
+
+class Error(EnvironmentError):
+    def __repr__(self):
+        return "%s.%s %r: %s " %(self.__class__.__module__,
+                               self.__class__.__name__,
+                               self.__class__.__doc__,
+                               " ".join(map(str, self.args)),
+                               #repr(self.args)
+                                )
+
+    def __str__(self):
+        s = "[%s]: %s" %(self.__class__.__doc__,
+                          " ".join(map(str, self.args)),
+                          )
+        return s
+
+_winerrnomap = {
+    2: errno.ENOENT,
+    3: errno.ENOENT,
+    17: errno.EEXIST,
+    18: errno.EXDEV,
+    13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
+    22: errno.ENOTDIR,
+    20: errno.ENOTDIR,
+    267: errno.ENOTDIR,
+    5: errno.EACCES,  # anything better?
+}
+
+class ErrorMaker(object):
+    """ lazily provides Exception classes for each possible POSIX errno
+        (as defined per the 'errno' module).  All such instances
+        subclass EnvironmentError.
+    """
+    Error = Error
+    _errno2class = {}
+
+    def __getattr__(self, name):
+        if name[0] == "_":
+            raise AttributeError(name)
+        eno = getattr(errno, name)
+        cls = self._geterrnoclass(eno)
+        setattr(self, name, cls)
+        return cls
+
+    def _geterrnoclass(self, eno):
+        try:
+            return self._errno2class[eno]
+        except KeyError:
+            clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
+            errorcls = type(Error)(clsname, (Error,),
+                    {'__module__':'py.error',
+                     '__doc__': os.strerror(eno)})
+            self._errno2class[eno] = errorcls
+            return errorcls
+
+    def checked_call(self, func, *args, **kwargs):
+        """ call a function and raise an errno-exception if applicable. """
+        __tracebackhide__ = True
+        try:
+            return func(*args, **kwargs)
+        except self.Error:
+            raise
+        except (OSError, EnvironmentError):
+            cls, value, tb = sys.exc_info()
+            if not hasattr(value, 'errno'):
+                raise
+            __tracebackhide__ = False
+            errno = value.errno
+            try:
+                if not isinstance(value, WindowsError):
+                    raise NameError
+            except NameError:
+                # we are not on Windows, or we got a proper OSError
+                cls = self._geterrnoclass(errno)
+            else:
+                try:
+                    cls = self._geterrnoclass(_winerrnomap[errno])
+                except KeyError:
+                    raise value
+            raise cls("%s%r" % (func.__name__, args))
+            __tracebackhide__ = True
+            
+
+error = ErrorMaker()
--- a/third_party/python/py/py/_iniconfig.py
+++ b/third_party/python/py/py/_iniconfig.py
@@ -1,162 +1,162 @@
-""" brain-dead simple parser for ini-style files.
-(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed
-"""
-__version__ = "0.2.dev2"
-
-__all__ = ['IniConfig', 'ParseError']
-
-COMMENTCHARS = "#;"
-
-class ParseError(Exception):
-    def __init__(self, path, lineno, msg):
-        Exception.__init__(self, path, lineno, msg)
-        self.path = path
-        self.lineno = lineno
-        self.msg = msg
-
-    def __str__(self):
-        return "%s:%s: %s" %(self.path, self.lineno+1, self.msg)
-
-class SectionWrapper(object):
-    def __init__(self, config, name):
-        self.config = config
-        self.name = name
-
-    def lineof(self, name):
-        return self.config.lineof(self.name, name)
-
-    def get(self, key, default=None, convert=str):
-        return self.config.get(self.name, key, convert=convert, default=default)
-
-    def __getitem__(self, key):
-        return self.config.sections[self.name][key]
-
-    def __iter__(self):
-        section = self.config.sections.get(self.name, [])
-        def lineof(key):
-            return self.config.lineof(self.name, key)
-        for name in sorted(section, key=lineof):
-            yield name
-
-    def items(self):
-        for name in self:
-            yield name, self[name]
-
-
-class IniConfig(object):
-    def __init__(self, path, data=None):
-        self.path = str(path) # convenience
-        if data is None:
-            f = open(self.path)
-            try:
-                tokens = self._parse(iter(f))
-            finally:
-                f.close()
-        else:
-            tokens = self._parse(data.splitlines(True))
-
-        self._sources = {}
-        self.sections = {}
-
-        for lineno, section, name, value in tokens:
-            if section is None:
-                self._raise(lineno, 'no section header defined')
-            self._sources[section, name] = lineno
-            if name is None:
-                if section in self.sections:
-                    self._raise(lineno, 'duplicate section %r'%(section, ))
-                self.sections[section] = {}
-            else:
-                if name in self.sections[section]:
-                    self._raise(lineno, 'duplicate name %r'%(name, ))
-                self.sections[section][name] = value
-
-    def _raise(self, lineno, msg):
-        raise ParseError(self.path, lineno, msg)
-
-    def _parse(self, line_iter):
-        result = []
-        section = None
-        for lineno, line in enumerate(line_iter):
-            name, data = self._parseline(line, lineno)
-            # new value
-            if name is not None and data is not None:
-                result.append((lineno, section, name, data))
-            # new section
-            elif name is not None and data is None:
-                if not name:
-                    self._raise(lineno, 'empty section name')
-                section = name
-                result.append((lineno, section, None, None))
-            # continuation
-            elif name is None and data is not None:
-                if not result:
-                    self._raise(lineno, 'unexpected value continuation')
-                last = result.pop()
-                last_name, last_data = last[-2:]
-                if last_name is None:
-                    self._raise(lineno, 'unexpected value continuation')
-
-                if last_data:
-                    data = '%s\n%s' % (last_data, data)
-                result.append(last[:-1] + (data,))
-        return result
-
-    def _parseline(self, line, lineno):
-        # blank lines
-        if iscommentline(line):
-            line = ""
-        else:
-            line = line.rstrip()
-        if not line:
-            return None, None
-        # section
-        if line[0] == '[':
-            realline = line
-            for c in COMMENTCHARS:
-                line = line.split(c)[0].rstrip()
-            if line[-1] == "]":
-                return line[1:-1], None
-            return None, realline.strip()
-        # value
-        elif not line[0].isspace():
-            try:
-                name, value = line.split('=', 1)
-                if ":" in name:
-                    raise ValueError()
-            except ValueError:
-                try:
-                    name, value = line.split(":", 1)
-                except ValueError:
-                    self._raise(lineno, 'unexpected line: %r' % line)
-            return name.strip(), value.strip()
-        # continuation
-        else:
-            return None, line.strip()
-
-    def lineof(self, section, name=None):
-        lineno = self._sources.get((section, name))
-        if lineno is not None:
-            return lineno + 1
-
-    def get(self, section, name, default=None, convert=str):
-        try:
-            return convert(self.sections[section][name])
-        except KeyError:
-            return default
-
-    def __getitem__(self, name):
-        if name not in self.sections:
-            raise KeyError(name)
-        return SectionWrapper(self, name)
-
-    def __iter__(self):
-        for name in sorted(self.sections, key=self.lineof):
-            yield SectionWrapper(self, name)
-
-    def __contains__(self, arg):
-        return arg in self.sections
-
-def iscommentline(line):
-    c = line.lstrip()[:1]
-    return c in COMMENTCHARS
+""" brain-dead simple parser for ini-style files.
+(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed
+"""
+__version__ = "0.2.dev2"
+
+__all__ = ['IniConfig', 'ParseError']
+
+COMMENTCHARS = "#;"
+
+class ParseError(Exception):
+    def __init__(self, path, lineno, msg):
+        Exception.__init__(self, path, lineno, msg)
+        self.path = path
+        self.lineno = lineno
+        self.msg = msg
+
+    def __str__(self):
+        return "%s:%s: %s" %(self.path, self.lineno+1, self.msg)
+
+class SectionWrapper(object):
+    def __init__(self, config, name):
+        self.config = config
+        self.name = name
+
+    def lineof(self, name):
+        return self.config.lineof(self.name, name)
+
+    def get(self, key, default=None, convert=str):
+        return self.config.get(self.name, key, convert=convert, default=default)
+
+    def __getitem__(self, key):
+        return self.config.sections[self.name][key]
+
+    def __iter__(self):
+        section = self.config.sections.get(self.name, [])
+        def lineof(key):
+            return self.config.lineof(self.name, key)
+        for name in sorted(section, key=lineof):
+            yield name
+
+    def items(self):
+        for name in self:
+            yield name, self[name]
+
+
+class IniConfig(object):
+    def __init__(self, path, data=None):
+        self.path = str(path) # convenience
+        if data is None:
+            f = open(self.path)
+            try:
+                tokens = self._parse(iter(f))
+            finally:
+                f.close()
+        else:
+            tokens = self._parse(data.splitlines(True))
+
+        self._sources = {}
+        self.sections = {}
+
+        for lineno, section, name, value in tokens:
+            if section is None:
+                self._raise(lineno, 'no section header defined')
+            self._sources[section, name] = lineno
+            if name is None:
+                if section in self.sections:
+                    self._raise(lineno, 'duplicate section %r'%(section, ))
+                self.sections[section] = {}
+            else:
+                if name in self.sections[section]:
+                    self._raise(lineno, 'duplicate name %r'%(name, ))
+                self.sections[section][name] = value
+
+    def _raise(self, lineno, msg):
+        raise ParseError(self.path, lineno, msg)
+
+    def _parse(self, line_iter):
+        result = []
+        section = None
+        for lineno, line in enumerate(line_iter):
+            name, data = self._parseline(line, lineno)
+            # new value
+            if name is not None and data is not None:
+                result.append((lineno, section, name, data))
+            # new section
+            elif name is not None and data is None:
+                if not name:
+                    self._raise(lineno, 'empty section name')
+                section = name
+                result.append((lineno, section, None, None))
+            # continuation
+            elif name is None and data is not None:
+                if not result:
+                    self._raise(lineno, 'unexpected value continuation')
+                last = result.pop()
+                last_name, last_data = last[-2:]
+                if last_name is None:
+                    self._raise(lineno, 'unexpected value continuation')
+
+                if last_data:
+                    data = '%s\n%s' % (last_data, data)
+                result.append(last[:-1] + (data,))
+        return result
+
+    def _parseline(self, line, lineno):
+        # blank lines
+        if iscommentline(line):
+            line = ""
+        else:
+            line = line.rstrip()
+        if not line:
+            return None, None
+        # section
+        if line[0] == '[':
+            realline = line
+            for c in COMMENTCHARS:
+                line = line.split(c)[0].rstrip()
+            if line[-1] == "]":
+                return line[1:-1], None
+            return None, realline.strip()
+        # value
+        elif not line[0].isspace():
+            try:
+                name, value = line.split('=', 1)
+                if ":" in name:
+                    raise ValueError()
+            except ValueError:
+                try:
+                    name, value = line.split(":", 1)
+                except ValueError:
+                    self._raise(lineno, 'unexpected line: %r' % line)
+            return name.strip(), value.strip()
+        # continuation
+        else:
+            return None, line.strip()
+
+    def lineof(self, section, name=None):
+        lineno = self._sources.get((section, name))
+        if lineno is not None:
+            return lineno + 1
+
+    def get(self, section, name, default=None, convert=str):
+        try:
+            return convert(self.sections[section][name])
+        except KeyError:
+            return default
+
+    def __getitem__(self, name):
+        if name not in self.sections:
+            raise KeyError(name)
+        return SectionWrapper(self, name)
+
+    def __iter__(self):
+        for name in sorted(self.sections, key=self.lineof):
+            yield SectionWrapper(self, name)
+
+    def __contains__(self, arg):
+        return arg in self.sections
+
+def iscommentline(line):
+    c = line.lstrip()[:1]
+    return c in COMMENTCHARS
--- a/third_party/python/py/py/_io/__init__.py
+++ b/third_party/python/py/py/_io/__init__.py
@@ -1,1 +1,1 @@
-""" input/output helping """
+""" input/output helping """
--- a/third_party/python/py/py/_io/capture.py
+++ b/third_party/python/py/py/_io/capture.py
@@ -1,371 +1,371 @@
-import os
-import sys
-import py
-import tempfile
-
-try:
-    from io import StringIO
-except ImportError:
-    from StringIO import StringIO
-
-if sys.version_info < (3,0):
-    class TextIO(StringIO):
-        def write(self, data):
-            if not isinstance(data, unicode):
-                data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
-            StringIO.write(self, data)
-else:
-    TextIO = StringIO
-
-try:
-    from io import BytesIO
-except ImportError:
-    class BytesIO(StringIO):
-        def write(self, data):
-            if isinstance(data, unicode):
-                raise TypeError("not a byte value: %r" %(data,))
-            StringIO.write(self, data)
-
-patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
-
-class FDCapture:
-    """ Capture IO to/from a given os-level filedescriptor. """
-
-    def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False):
-        """ save targetfd descriptor, and open a new
-            temporary file there.  If no tmpfile is
-            specified a tempfile.Tempfile() will be opened
-            in text mode.
-        """
-        self.targetfd = targetfd
-        if tmpfile is None and targetfd != 0:
-            f = tempfile.TemporaryFile('wb+')
-            tmpfile = dupfile(f, encoding="UTF-8")
-            f.close()
-        self.tmpfile = tmpfile
-        self._savefd = os.dup(self.targetfd)
-        if patchsys:
-            self._oldsys = getattr(sys, patchsysdict[targetfd])
-        if now:
-            self.start()
-
-    def start(self):
-        try:
-            os.fstat(self._savefd)
-        except OSError:
-            raise ValueError("saved filedescriptor not valid, "
-                "did you call start() twice?")
-        if self.targetfd == 0 and not self.tmpfile:
-            fd = os.open(devnullpath, os.O_RDONLY)
-            os.dup2(fd, 0)
-            os.close(fd)
-            if hasattr(self, '_oldsys'):
-                setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
-        else:
-            os.dup2(self.tmpfile.fileno(), self.targetfd)
-            if hasattr(self, '_oldsys'):
-                setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
-
-    def done(self):
-        """ unpatch and clean up, returns the self.tmpfile (file object)
-        """
-        os.dup2(self._savefd, self.targetfd)
-        os.close(self._savefd)
-        if self.targetfd != 0:
-            self.tmpfile.seek(0)
-        if hasattr(self, '_oldsys'):
-            setattr(sys, patchsysdict[self.targetfd], self._oldsys)
-        return self.tmpfile
-
-    def writeorg(self, data):
-        """ write a string to the original file descriptor
-        """
-        tempfp = tempfile.TemporaryFile()
-        try:
-            os.dup2(self._savefd, tempfp.fileno())
-            tempfp.write(data)
-        finally:
-            tempfp.close()
-
-
-def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
-    """ return a new open file object that's a duplicate of f
-
-        mode is duplicated if not given, 'buffering' controls
-        buffer size (defaulting to no buffering) and 'raising'
-        defines whether an exception is raised when an incompatible
-        file object is passed in (if raising is False, the file
-        object itself will be returned)
-    """
-    try:
-        fd = f.fileno()
-        mode = mode or f.mode
-    except AttributeError:
-        if raising:
-            raise
-        return f
-    newfd = os.dup(fd)
-    if sys.version_info >= (3,0):
-        if encoding is not None:
-            mode = mode.replace("b", "")
-            buffering = True
-        return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
-    else:
-        f = os.fdopen(newfd, mode, buffering)
-        if encoding is not None:
-            return EncodedFile(f, encoding)
-        return f
-
-class EncodedFile(object):
-    def __init__(self, _stream, encoding):
-        self._stream = _stream
-        self.encoding = encoding
-
-    def write(self, obj):
-        if isinstance(obj, unicode):
-            obj = obj.encode(self.encoding)
-        elif isinstance(obj, str):
-            pass
-        else:
-            obj = str(obj)
-        self._stream.write(obj)
-
-    def writelines(self, linelist):
-        data = ''.join(linelist)
-        self.write(data)
-
-    def __getattr__(self, name):
-        return getattr(self._stream, name)
-
-class Capture(object):
-    def call(cls, func, *args, **kwargs):
-        """ return a (res, out, err) tuple where
-            out and err represent the output/error output
-            during function execution.
-            call the given function with args/kwargs
-            and capture output/error during its execution.
-        """
-        so = cls()
-        try:
-            res = func(*args, **kwargs)
-        finally:
-            out, err = so.reset()
-        return res, out, err
-    call = classmethod(call)
-
-    def reset(self):
-        """ reset sys.stdout/stderr and return captured output as strings. """
-        if hasattr(self, '_reset'):
-            raise ValueError("was already reset")
-        self._reset = True
-        outfile, errfile = self.done(save=False)
-        out, err = "", ""
-        if outfile and not outfile.closed:
-            out = outfile.read()
-            outfile.close()
-        if errfile and errfile != outfile and not errfile.closed:
-            err = errfile.read()
-            errfile.close()
-        return out, err
-
-    def suspend(self):
-        """ return current snapshot captures, memorize tempfiles. """
-        outerr = self.readouterr()
-        outfile, errfile = self.done()
-        return outerr
-
-
-class StdCaptureFD(Capture):
-    """ This class allows to capture writes to FD1 and FD2
-        and may connect a NULL file to FD0 (and prevent
-        reads from sys.stdin).  If any of the 0,1,2 file descriptors
-        is invalid it will not be captured.
-    """
-    def __init__(self, out=True, err=True, mixed=False,
-        in_=True, patchsys=True, now=True):
-        self._options = {
-            "out": out,
-            "err": err,
-            "mixed": mixed,
-            "in_": in_,
-            "patchsys": patchsys,
-            "now": now,
-        }
-        self._save()
-        if now:
-            self.startall()
-
-    def _save(self):
-        in_ = self._options['in_']
-        out = self._options['out']
-        err = self._options['err']
-        mixed = self._options['mixed']
-        patchsys = self._options['patchsys']
-        if in_:
-            try:
-                self.in_ = FDCapture(0, tmpfile=None, now=False,
-                    patchsys=patchsys)
-            except OSError:
-                pass
-        if out:
-            tmpfile = None
-            if hasattr(out, 'write'):
-                tmpfile = out
-            try:
-                self.out = FDCapture(1, tmpfile=tmpfile,
-                           now=False, patchsys=patchsys)
-                self._options['out'] = self.out.tmpfile
-            except OSError:
-                pass
-        if err:
-            if out and mixed:
-                tmpfile = self.out.tmpfile
-            elif hasattr(err, 'write'):
-                tmpfile = err
-            else:
-                tmpfile = None
-            try:
-                self.err = FDCapture(2, tmpfile=tmpfile,
-                           now=False, patchsys=patchsys)
-                self._options['err'] = self.err.tmpfile
-            except OSError:
-                pass
-
-    def startall(self):
-        if hasattr(self, 'in_'):
-            self.in_.start()
-        if hasattr(self, 'out'):
-            self.out.start()
-        if hasattr(self, 'err'):
-            self.err.start()
-
-    def resume(self):
-        """ resume capturing with original temp files. """
-        self.startall()
-
-    def done(self, save=True):
-        """ return (outfile, errfile) and stop capturing. """
-        outfile = errfile = None
-        if hasattr(self, 'out') and not self.out.tmpfile.closed:
-            outfile = self.out.done()
-        if hasattr(self, 'err') and not self.err.tmpfile.closed:
-            errfile = self.err.done()
-        if hasattr(self, 'in_'):
-            tmpfile = self.in_.done()
-        if save:
-            self._save()
-        return outfile, errfile
-
-    def readouterr(self):
-        """ return snapshot value of stdout/stderr capturings. """
-        if hasattr(self, "out"):
-            out = self._readsnapshot(self.out.tmpfile)
-        else:
-            out = ""
-        if hasattr(self, "err"):
-            err = self._readsnapshot(self.err.tmpfile)
-        else:
-            err = ""
-        return [out, err]
-
-    def _readsnapshot(self, f):
-        f.seek(0)
-        res = f.read()
-        enc = getattr(f, "encoding", None)
-        if enc:
-            res = py.builtin._totext(res, enc, "replace")
-        f.truncate(0)
-        f.seek(0)
-        return res
-
-
-class StdCapture(Capture):
-    """ This class allows to capture writes to sys.stdout|stderr "in-memory"
-        and will raise errors on tries to read from sys.stdin. It only
-        modifies sys.stdout|stderr|stdin attributes and does not
-        touch underlying File Descriptors (use StdCaptureFD for that).
-    """
-    def __init__(self, out=True, err=True, in_=True, mixed=False, now=True):
-        self._oldout = sys.stdout
-        self._olderr = sys.stderr
-        self._oldin  = sys.stdin
-        if out and not hasattr(out, 'file'):
-            out = TextIO()
-        self.out = out
-        if err:
-            if mixed:
-                err = out
-            elif not hasattr(err, 'write'):
-                err = TextIO()
-        self.err = err
-        self.in_ = in_
-        if now:
-            self.startall()
-
-    def startall(self):
-        if self.out:
-            sys.stdout = self.out
-        if self.err:
-            sys.stderr = self.err
-        if self.in_:
-            sys.stdin  = self.in_  = DontReadFromInput()
-
-    def done(self, save=True):
-        """ return (outfile, errfile) and stop capturing. """
-        outfile = errfile = None
-        if self.out and not self.out.closed:
-            sys.stdout = self._oldout
-            outfile = self.out
-            outfile.seek(0)
-        if self.err and not self.err.closed:
-            sys.stderr = self._olderr
-            errfile = self.err
-            errfile.seek(0)
-        if self.in_:
-            sys.stdin = self._oldin
-        return outfile, errfile
-
-    def resume(self):
-        """ resume capturing with original temp files. """
-        self.startall()
-
-    def readouterr(self):
-        """ return snapshot value of stdout/stderr capturings. """
-        out = err = ""
-        if self.out:
-            out = self.out.getvalue()
-            self.out.truncate(0)
-            self.out.seek(0)
-        if self.err:
-            err = self.err.getvalue()
-            self.err.truncate(0)
-            self.err.seek(0)
-        return out, err
-
-class DontReadFromInput:
-    """Temporary stub class.  Ideally when stdin is accessed, the
-    capturing should be turned off, with possibly all data captured
-    so far sent to the screen.  This should be configurable, though,
-    because in automated test runs it is better to crash than
-    hang indefinitely.
-    """
-    def read(self, *args):
-        raise IOError("reading from stdin while output is captured")
-    readline = read
-    readlines = read
-    __iter__ = read
-
-    def fileno(self):
-        raise ValueError("redirected Stdin is pseudofile, has no fileno()")
-    def isatty(self):
-        return False
-    def close(self):
-        pass
-
-try:
-    devnullpath = os.devnull
-except AttributeError:
-    if os.name == 'nt':
-        devnullpath = 'NUL'
-    else:
-        devnullpath = '/dev/null'
+import os
+import sys
+import py
+import tempfile
+
+try:
+    from io import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+if sys.version_info < (3,0):
+    class TextIO(StringIO):
+        def write(self, data):
+            if not isinstance(data, unicode):
+                data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
+            StringIO.write(self, data)
+else:
+    TextIO = StringIO
+
+try:
+    from io import BytesIO
+except ImportError:
+    class BytesIO(StringIO):
+        def write(self, data):
+            if isinstance(data, unicode):
+                raise TypeError("not a byte value: %r" %(data,))
+            StringIO.write(self, data)
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
+class FDCapture:
+    """ Capture IO to/from a given os-level filedescriptor. """
+
+    def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False):
+        """ save targetfd descriptor, and open a new
+            temporary file there.  If no tmpfile is
+            specified a tempfile.Tempfile() will be opened
+            in text mode.
+        """
+        self.targetfd = targetfd
+        if tmpfile is None and targetfd != 0:
+            f = tempfile.TemporaryFile('wb+')
+            tmpfile = dupfile(f, encoding="UTF-8")
+            f.close()
+        self.tmpfile = tmpfile
+        self._savefd = os.dup(self.targetfd)
+        if patchsys:
+            self._oldsys = getattr(sys, patchsysdict[targetfd])
+        if now:
+            self.start()
+
+    def start(self):
+        try:
+            os.fstat(self._savefd)
+        except OSError:
+            raise ValueError("saved filedescriptor not valid, "
+                "did you call start() twice?")
+        if self.targetfd == 0 and not self.tmpfile:
+            fd = os.open(devnullpath, os.O_RDONLY)
+            os.dup2(fd, 0)
+            os.close(fd)
+            if hasattr(self, '_oldsys'):
+                setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+        else:
+            os.dup2(self.tmpfile.fileno(), self.targetfd)
+            if hasattr(self, '_oldsys'):
+                setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+    def done(self):
+        """ unpatch and clean up, returns the self.tmpfile (file object)
+        """
+        os.dup2(self._savefd, self.targetfd)
+        os.close(self._savefd)
+        if self.targetfd != 0:
+            self.tmpfile.seek(0)
+        if hasattr(self, '_oldsys'):
+            setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+        return self.tmpfile
+
+    def writeorg(self, data):
+        """ write a string to the original file descriptor
+        """
+        tempfp = tempfile.TemporaryFile()
+        try:
+            os.dup2(self._savefd, tempfp.fileno())
+            tempfp.write(data)
+        finally:
+            tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+    """ return a new open file object that's a duplicate of f
+
+        mode is duplicated if not given, 'buffering' controls
+        buffer size (defaulting to no buffering) and 'raising'
+        defines whether an exception is raised when an incompatible
+        file object is passed in (if raising is False, the file
+        object itself will be returned)
+    """
+    try:
+        fd = f.fileno()
+        mode = mode or f.mode
+    except AttributeError:
+        if raising:
+            raise
+        return f
+    newfd = os.dup(fd)
+    if sys.version_info >= (3,0):
+        if encoding is not None:
+            mode = mode.replace("b", "")
+            buffering = True
+        return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+    else:
+        f = os.fdopen(newfd, mode, buffering)
+        if encoding is not None:
+            return EncodedFile(f, encoding)
+        return f
+
+class EncodedFile(object):
+    def __init__(self, _stream, encoding):
+        self._stream = _stream
+        self.encoding = encoding
+
+    def write(self, obj):
+        if isinstance(obj, unicode):
+            obj = obj.encode(self.encoding)
+        elif isinstance(obj, str):
+            pass
+        else:
+            obj = str(obj)
+        self._stream.write(obj)
+
+    def writelines(self, linelist):
+        data = ''.join(linelist)
+        self.write(data)
+
+    def __getattr__(self, name):
+        return getattr(self._stream, name)
+
+class Capture(object):
+    def call(cls, func, *args, **kwargs):
+        """ return a (res, out, err) tuple where
+            out and err represent the output/error output
+            during function execution.
+            call the given function with args/kwargs
+            and capture output/error during its execution.
+        """
+        so = cls()
+        try:
+            res = func(*args, **kwargs)
+        finally:
+            out, err = so.reset()
+        return res, out, err
+    call = classmethod(call)
+
+    def reset(self):
+        """ reset sys.stdout/stderr and return captured output as strings. """
+        if hasattr(self, '_reset'):
+            raise ValueError("was already reset")
+        self._reset = True
+        outfile, errfile = self.done(save=False)
+        out, err = "", ""
+        if outfile and not outfile.closed:
+            out = outfile.read()
+            outfile.close()
+        if errfile and errfile != outfile and not errfile.closed:
+            err = errfile.read()
+            errfile.close()
+        return out, err
+
+    def suspend(self):
+        """ return current snapshot captures, memorize tempfiles. """
+        outerr = self.readouterr()
+        outfile, errfile = self.done()
+        return outerr
+
+
+class StdCaptureFD(Capture):
+    """ This class allows to capture writes to FD1 and FD2
+        and may connect a NULL file to FD0 (and prevent
+        reads from sys.stdin).  If any of the 0,1,2 file descriptors
+        is invalid it will not be captured.
+    """
+    def __init__(self, out=True, err=True, mixed=False,
+        in_=True, patchsys=True, now=True):
+        self._options = {
+            "out": out,
+            "err": err,
+            "mixed": mixed,
+            "in_": in_,
+            "patchsys": patchsys,
+            "now": now,
+        }
+        self._save()
+        if now:
+            self.startall()
+
+    def _save(self):
+        in_ = self._options['in_']
+        out = self._options['out']
+        err = self._options['err']
+        mixed = self._options['mixed']
+        patchsys = self._options['patchsys']
+        if in_:
+            try:
+                self.in_ = FDCapture(0, tmpfile=None, now=False,
+                    patchsys=patchsys)
+            except OSError:
+                pass
+        if out:
+            tmpfile = None
+            if hasattr(out, 'write'):
+                tmpfile = out
+            try:
+                self.out = FDCapture(1, tmpfile=tmpfile,
+                           now=False, patchsys=patchsys)
+                self._options['out'] = self.out.tmpfile
+            except OSError:
+                pass
+        if err:
+            if out and mixed:
+                tmpfile = self.out.tmpfile
+            elif hasattr(err, 'write'):
+                tmpfile = err
+            else:
+                tmpfile = None
+            try:
+                self.err = FDCapture(2, tmpfile=tmpfile,
+                           now=False, patchsys=patchsys)
+                self._options['err'] = self.err.tmpfile
+            except OSError:
+                pass
+
+    def startall(self):
+        if hasattr(self, 'in_'):
+            self.in_.start()
+        if hasattr(self, 'out'):
+            self.out.start()
+        if hasattr(self, 'err'):
+            self.err.start()
+
+    def resume(self):
+        """ resume capturing with original temp files. """
+        self.startall()
+
+    def done(self, save=True):
+        """ return (outfile, errfile) and stop capturing. """
+        outfile = errfile = None
+        if hasattr(self, 'out') and not self.out.tmpfile.closed:
+            outfile = self.out.done()
+        if hasattr(self, 'err') and not self.err.tmpfile.closed:
+            errfile = self.err.done()
+        if hasattr(self, 'in_'):
+            tmpfile = self.in_.done()
+        if save:
+            self._save()
+        return outfile, errfile
+
+    def readouterr(self):
+        """ return snapshot value of stdout/stderr capturings. """
+        if hasattr(self, "out"):
+            out = self._readsnapshot(self.out.tmpfile)
+        else:
+            out = ""
+        if hasattr(self, "err"):
+            err = self._readsnapshot(self.err.tmpfile)
+        else:
+            err = ""
+        return [out, err]
+
+    def _readsnapshot(self, f):
+        f.seek(0)
+        res = f.read()
+        enc = getattr(f, "encoding", None)
+        if enc:
+            res = py.builtin._totext(res, enc, "replace")
+        f.truncate(0)
+        f.seek(0)
+        return res
+
+
+class StdCapture(Capture):
+    """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+        and will raise errors on tries to read from sys.stdin. It only
+        modifies sys.stdout|stderr|stdin attributes and does not
+        touch underlying File Descriptors (use StdCaptureFD for that).
+    """
+    def __init__(self, out=True, err=True, in_=True, mixed=False, now=True):
+        self._oldout = sys.stdout
+        self._olderr = sys.stderr
+        self._oldin  = sys.stdin
+        if out and not hasattr(out, 'file'):
+            out = TextIO()
+        self.out = out
+        if err:
+            if mixed:
+                err = out
+            elif not hasattr(err, 'write'):
+                err = TextIO()
+        self.err = err
+        self.in_ = in_
+        if now:
+            self.startall()
+
+    def startall(self):
+        if self.out:
+            sys.stdout = self.out
+        if self.err:
+            sys.stderr = self.err
+        if self.in_:
+            sys.stdin  = self.in_  = DontReadFromInput()
+
+    def done(self, save=True):
+        """ return (outfile, errfile) and stop capturing. """
+        outfile = errfile = None
+        if self.out and not self.out.closed:
+            sys.stdout = self._oldout
+            outfile = self.out
+            outfile.seek(0)
+        if self.err and not self.err.closed:
+            sys.stderr = self._olderr
+            errfile = self.err
+            errfile.seek(0)
+        if self.in_:
+            sys.stdin = self._oldin
+        return outfile, errfile
+
+    def resume(self):
+        """ resume capturing with original temp files. """
+        self.startall()
+
+    def readouterr(self):
+        """ return snapshot value of stdout/stderr capturings. """
+        out = err = ""
+        if self.out:
+            out = self.out.getvalue()
+            self.out.truncate(0)
+            self.out.seek(0)
+        if self.err:
+            err = self.err.getvalue()
+            self.err.truncate(0)
+            self.err.seek(0)
+        return out, err
+
+class DontReadFromInput:
+    """Temporary stub class.  Ideally when stdin is accessed, the
+    capturing should be turned off, with possibly all data captured
+    so far sent to the screen.  This should be configurable, though,
+    because in automated test runs it is better to crash than
+    hang indefinitely.
+    """
+    def read(self, *args):
+        raise IOError("reading from stdin while output is captured")
+    readline = read
+    readlines = read
+    __iter__ = read
+
+    def fileno(self):
+        raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+    def isatty(self):
+        return False
+    def close(self):
+        pass
+
+try:
+    devnullpath = os.devnull
+except AttributeError:
+    if os.name == 'nt':
+        devnullpath = 'NUL'
+    else:
+        devnullpath = '/dev/null'
--- a/third_party/python/py/py/_io/saferepr.py
+++ b/third_party/python/py/py/_io/saferepr.py
@@ -1,71 +1,71 @@
-import py
-import sys
-
-builtin_repr = repr
-
-reprlib = py.builtin._tryimport('repr', 'reprlib')
-
-class SafeRepr(reprlib.Repr):
-    """ subclass of repr.Repr that limits the resulting size of repr()
-        and includes information on exceptions raised during the call.
-    """
-    def repr(self, x):
-        return self._callhelper(reprlib.Repr.repr, self, x)
-
-    def repr_unicode(self, x, level):
-        # Strictly speaking wrong on narrow builds
-        def repr(u):
-            if "'" not in u:
-                return py.builtin._totext("'%s'") % u
-            elif '"' not in u:
-                return py.builtin._totext('"%s"') % u
-            else:
-                return py.builtin._totext("'%s'") % u.replace("'", r"\'")
-        s = repr(x[:self.maxstring])
-        if len(s) > self.maxstring:
-            i = max(0, (self.maxstring-3)//2)
-            j = max(0, self.maxstring-3-i)
-            s = repr(x[:i] + x[len(x)-j:])
-            s = s[:i] + '...' + s[len(s)-j:]
-        return s
-
-    def repr_instance(self, x, level):
-        return self._callhelper(builtin_repr, x)
-
-    def _callhelper(self, call, x, *args):
-        try:
-            # Try the vanilla repr and make sure that the result is a string
-            s = call(x, *args)
-        except py.builtin._sysex:
-            raise
-        except:
-            cls, e, tb = sys.exc_info()
-            exc_name = getattr(cls, '__name__', 'unknown')
-            try:
-                exc_info = str(e)
-            except py.builtin._sysex:
-                raise
-            except:
-                exc_info = 'unknown'
-            return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
-                exc_name, exc_info, x.__class__.__name__, id(x))
-        else:
-            if len(s) > self.maxsize:
-                i = max(0, (self.maxsize-3)//2)
-                j = max(0, self.maxsize-3-i)
-                s = s[:i] + '...' + s[len(s)-j:]
-            return s
-
-def saferepr(obj, maxsize=240):
-    """ return a size-limited safe repr-string for the given object.
-    Failing __repr__ functions of user instances will be represented
-    with a short exception info and 'saferepr' generally takes
-    care to never raise exceptions itself.  This function is a wrapper
-    around the Repr/reprlib functionality of the standard 2.6 lib.
-    """
-    # review exception handling
-    srepr = SafeRepr()
-    srepr.maxstring = maxsize
-    srepr.maxsize = maxsize
-    srepr.maxother = 160
-    return srepr.repr(obj)
+import py
+import sys
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+class SafeRepr(reprlib.Repr):
+    """ subclass of repr.Repr that limits the resulting size of repr()
+        and includes information on exceptions raised during the call.
+    """
+    def repr(self, x):
+        return self._callhelper(reprlib.Repr.repr, self, x)
+
+    def repr_unicode(self, x, level):
+        # Strictly speaking wrong on narrow builds
+        def repr(u):
+            if "'" not in u:
+                return py.builtin._totext("'%s'") % u
+            elif '"' not in u:
+                return py.builtin._totext('"%s"') % u
+            else:
+                return py.builtin._totext("'%s'") % u.replace("'", r"\'")
+        s = repr(x[:self.maxstring])
+        if len(s) > self.maxstring:
+            i = max(0, (self.maxstring-3)//2)
+            j = max(0, self.maxstring-3-i)
+            s = repr(x[:i] + x[len(x)-j:])
+            s = s[:i] + '...' + s[len(s)-j:]
+        return s
+
+    def repr_instance(self, x, level):
+        return self._callhelper(builtin_repr, x)
+
+    def _callhelper(self, call, x, *args):
+        try:
+            # Try the vanilla repr and make sure that the result is a string
+            s = call(x, *args)
+        except py.builtin._sysex:
+            raise
+        except:
+            cls, e, tb = sys.exc_info()
+            exc_name = getattr(cls, '__name__', 'unknown')
+            try:
+                exc_info = str(e)
+            except py.builtin._sysex:
+                raise
+            except:
+                exc_info = 'unknown'
+            return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
+                exc_name, exc_info, x.__class__.__name__, id(x))
+        else:
+            if len(s) > self.maxsize:
+                i = max(0, (self.maxsize-3)//2)
+                j = max(0, self.maxsize-3-i)
+                s = s[:i] + '...' + s[len(s)-j:]
+            return s
+
+def saferepr(obj, maxsize=240):
+    """ return a size-limited safe repr-string for the given object.
+    Failing __repr__ functions of user instances will be represented
+    with a short exception info and 'saferepr' generally takes
+    care to never raise exceptions itself.  This function is a wrapper
+    around the Repr/reprlib functionality of the standard 2.6 lib.
+    """
+    # review exception handling
+    srepr = SafeRepr()
+    srepr.maxstring = maxsize
+    srepr.maxsize = maxsize
+    srepr.maxother = 160
+    return srepr.repr(obj)
--- a/third_party/python/py/py/_io/terminalwriter.py
+++ b/third_party/python/py/py/_io/terminalwriter.py
@@ -1,348 +1,357 @@
-"""
-
-Helper functions for writing to terminals and files.
-
-"""
-
-
-import sys, os
-import py
-py3k = sys.version_info[0] >= 3
-from py.builtin import text, bytes
-
-win32_and_ctypes = False
-colorama = None
-if sys.platform == "win32":
-    try:
-        import colorama
-    except ImportError:
-        try:
-            import ctypes
-            win32_and_ctypes = True
-        except ImportError:
-            pass
-
-
-def _getdimensions():
-    import termios,fcntl,struct
-    call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
-    height,width = struct.unpack( "hhhh", call ) [:2]
-    return height, width
-
-
-def get_terminal_width():
-    height = width = 0
-    try:
-        height, width = _getdimensions()
-    except py.builtin._sysex:
-        raise
-    except:
-        # pass to fallback below
-        pass
-
-    if width == 0:
-        # FALLBACK:
-        # * some exception happened
-        # * or this is emacs terminal which reports (0,0)
-        width = int(os.environ.get('COLUMNS', 80))
-
-    # XXX the windows getdimensions may be bogus, let's sanify a bit
-    if width < 40:
-        width = 80
-    return width
-
-terminal_width = get_terminal_width()
-
-# XXX unify with _escaped func below
-def ansi_print(text, esc, file=None, newline=True, flush=False):
-    if file is None:
-        file = sys.stderr
-    text = text.rstrip()
-    if esc and not isinstance(esc, tuple):
-        esc = (esc,)
-    if esc and sys.platform != "win32" and file.isatty():
-        text = (''.join(['\x1b[%sm' % cod for cod in esc])  +
-                text +
-                '\x1b[0m')     # ANSI color code "reset"
-    if newline:
-        text += '\n'
-
-    if esc and win32_and_ctypes and file.isatty():
-        if 1 in esc:
-            bold = True
-            esc = tuple([x for x in esc if x != 1])
-        else:
-            bold = False
-        esctable = {()   : FOREGROUND_WHITE,                 # normal
-                    (31,): FOREGROUND_RED,                   # red
-                    (32,): FOREGROUND_GREEN,                 # green
-                    (33,): FOREGROUND_GREEN|FOREGROUND_RED,  # yellow
-                    (34,): FOREGROUND_BLUE,                  # blue
-                    (35,): FOREGROUND_BLUE|FOREGROUND_RED,   # purple
-                    (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
-                    (37,): FOREGROUND_WHITE,                 # white
-                    (39,): FOREGROUND_WHITE,                 # reset
-                    }
-        attr = esctable.get(esc, FOREGROUND_WHITE)
-        if bold:
-            attr |= FOREGROUND_INTENSITY
-        STD_OUTPUT_HANDLE = -11
-        STD_ERROR_HANDLE = -12
-        if file is sys.stderr:
-            handle = GetStdHandle(STD_ERROR_HANDLE)
-        else:
-            handle = GetStdHandle(STD_OUTPUT_HANDLE)
-        oldcolors = GetConsoleInfo(handle).wAttributes
-        attr |= (oldcolors & 0x0f0)
-        SetConsoleTextAttribute(handle, attr)
-        while len(text) > 32768:
-            file.write(text[:32768])
-            text = text[32768:]
-        if text:
-            file.write(text)
-        SetConsoleTextAttribute(handle, oldcolors)
-    else:
-        file.write(text)
-
-    if flush:
-        file.flush()
-
-def should_do_markup(file):
-    if os.environ.get('PY_COLORS') == '1':
-        return True
-    if os.environ.get('PY_COLORS') == '0':
-        return False
-    return hasattr(file, 'isatty') and file.isatty() \
-           and os.environ.get('TERM') != 'dumb' \
-           and not (sys.platform.startswith('java') and os._name == 'nt')
-
-class TerminalWriter(object):
-    _esctable = dict(black=30, red=31, green=32, yellow=33,
-                     blue=34, purple=35, cyan=36, white=37,
-                     Black=40, Red=41, Green=42, Yellow=43,
-                     Blue=44, Purple=45, Cyan=46, White=47,
-                     bold=1, light=2, blink=5, invert=7)
-
-    # XXX deprecate stringio argument
-    def __init__(self, file=None, stringio=False, encoding=None):
-        if file is None:
-            if stringio:
-                self.stringio = file = py.io.TextIO()
-            else:
-                file = py.std.sys.stdout
-        elif py.builtin.callable(file) and not (
-             hasattr(file, "write") and hasattr(file, "flush")):
-            file = WriteFile(file, encoding=encoding)
-        if hasattr(file, "isatty") and file.isatty() and colorama:
-            file = colorama.AnsiToWin32(file).stream
-        self.encoding = encoding or getattr(file, 'encoding', "utf-8")
-        self._file = file
-        self.fullwidth = get_terminal_width()
-        self.hasmarkup = should_do_markup(file)
-        self._lastlen = 0
-
-    def _escaped(self, text, esc):
-        if esc and self.hasmarkup:
-            text = (''.join(['\x1b[%sm' % cod for cod in esc])  +
-                text +'\x1b[0m')
-        return text
-
-    def markup(self, text, **kw):
-        esc = []
-        for name in kw:
-            if name not in self._esctable:
-                raise ValueError("unknown markup: %r" %(name,))
-            if kw[name]:
-                esc.append(self._esctable[name])
-        return self._escaped(text, tuple(esc))
-
-    def sep(self, sepchar, title=None, fullwidth=None, **kw):
-        if fullwidth is None:
-            fullwidth = self.fullwidth
-        # the goal is to have the line be as long as possible
-        # under the condition that len(line) <= fullwidth
-        if sys.platform == "win32":
-            # if we print in the last column on windows we are on a
-            # new line but there is no way to verify/neutralize this
-            # (we may not know the exact line width)
-            # so let's be defensive to avoid empty lines in the output
-            fullwidth -= 1
-        if title is not None:
-            # we want 2 + 2*len(fill) + len(title) <= fullwidth
-            # i.e.    2 + 2*len(sepchar)*N + len(title) <= fullwidth
-            #         2*len(sepchar)*N <= fullwidth - len(title) - 2
-            #         N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
-            N = (fullwidth - len(title) - 2) // (2*len(sepchar))
-            fill = sepchar * N
-            line = "%s %s %s" % (fill, title, fill)
-        else:
-            # we want len(sepchar)*N <= fullwidth
-            # i.e.    N <= fullwidth // len(sepchar)
-            line = sepchar * (fullwidth // len(sepchar))
-        # in some situations there is room for an extra sepchar at the right,
-        # in particular if we consider that with a sepchar like "_ " the
-        # trailing space is not important at the end of the line
-        if len(line) + len(sepchar.rstrip()) <= fullwidth:
-            line += sepchar.rstrip()
-
-        self.line(line, **kw)
-
-    def write(self, msg, **kw):
-        if msg:
-            if not isinstance(msg, (bytes, text)):
-                msg = text(msg)
-            if self.hasmarkup and kw:
-                markupmsg = self.markup(msg, **kw)
-            else:
-                markupmsg = msg
-            write_out(self._file, markupmsg)
-
-    def line(self, s='', **kw):
-        self.write(s, **kw)
-        self._checkfill(s)
-        self.write('\n')
-
-    def reline(self, line, **kw):
-        if not self.hasmarkup:
-            raise ValueError("cannot use rewrite-line without terminal")
-        self.write(line, **kw)
-        self._checkfill(line)
-        self.write('\r')
-        self._lastlen = len(line)
-
-    def _checkfill(self, line):
-        diff2last = self._lastlen - len(line)
-        if diff2last > 0:
-            self.write(" " * diff2last)
-
-class Win32ConsoleWriter(TerminalWriter):
-    def write(self, msg, **kw):
-        if msg:
-            if not isinstance(msg, (bytes, text)):
-                msg = text(msg)
-            oldcolors = None
-            if self.hasmarkup and kw:
-                handle = GetStdHandle(STD_OUTPUT_HANDLE)
-                oldcolors = GetConsoleInfo(handle).wAttributes
-                default_bg = oldcolors & 0x00F0
-                attr = default_bg
-                if kw.pop('bold', False):
-                    attr |= FOREGROUND_INTENSITY
-
-                if kw.pop('red', False):
-                    attr |= FOREGROUND_RED
-                elif kw.pop('blue', False):
-                    attr |= FOREGROUND_BLUE
-                elif kw.pop('green', False):
-                    attr |= FOREGROUND_GREEN
-                elif kw.pop('yellow', False):
-                    attr |= FOREGROUND_GREEN|FOREGROUND_RED
-                else:
-                    attr |= oldcolors & 0x0007
-
-                SetConsoleTextAttribute(handle, attr)
-            write_out(self._file, msg)
-            if oldcolors:
-                SetConsoleTextAttribute(handle, oldcolors)
-
-class WriteFile(object):
-    def __init__(self, writemethod, encoding=None):
-        self.encoding = encoding
-        self._writemethod = writemethod
-
-    def write(self, data):
-        if self.encoding:
-            data = data.encode(self.encoding, "replace")
-        self._writemethod(data)
-
-    def flush(self):
-        return
-
-
-if win32_and_ctypes:
-    TerminalWriter = Win32ConsoleWriter
-    import ctypes
-    from ctypes import wintypes
-
-    # ctypes access to the Windows console
-    STD_OUTPUT_HANDLE = -11
-    STD_ERROR_HANDLE  = -12
-    FOREGROUND_BLACK     = 0x0000 # black text
-    FOREGROUND_BLUE      = 0x0001 # text color contains blue.
-    FOREGROUND_GREEN     = 0x0002 # text color contains green.
-    FOREGROUND_RED       = 0x0004 # text color contains red.
-    FOREGROUND_WHITE     = 0x0007
-    FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
-    BACKGROUND_BLACK     = 0x0000 # background color black
-    BACKGROUND_BLUE      = 0x0010 # background color contains blue.
-    BACKGROUND_GREEN     = 0x0020 # background color contains green.
-    BACKGROUND_RED       = 0x0040 # background color contains red.
-    BACKGROUND_WHITE     = 0x0070
-    BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
-
-    SHORT = ctypes.c_short
-    class COORD(ctypes.Structure):
-        _fields_ = [('X', SHORT),
-                    ('Y', SHORT)]
-    class SMALL_RECT(ctypes.Structure):
-        _fields_ = [('Left', SHORT),
-                    ('Top', SHORT),
-                    ('Right', SHORT),
-                    ('Bottom', SHORT)]
-    class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
-        _fields_ = [('dwSize', COORD),
-                    ('dwCursorPosition', COORD),
-                    ('wAttributes', wintypes.WORD),
-                    ('srWindow', SMALL_RECT),
-                    ('dwMaximumWindowSize', COORD)]
-
-    _GetStdHandle = ctypes.windll.kernel32.GetStdHandle
-    _GetStdHandle.argtypes = [wintypes.DWORD]
-    _GetStdHandle.restype = wintypes.HANDLE
-    def GetStdHandle(kind):
-        return _GetStdHandle(kind)
-
-    SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
-    SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
-    SetConsoleTextAttribute.restype = wintypes.BOOL
-
-    _GetConsoleScreenBufferInfo = \
-        ctypes.windll.kernel32.GetConsoleScreenBufferInfo
-    _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
-                                ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
-    _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
-    def GetConsoleInfo(handle):
-        info = CONSOLE_SCREEN_BUFFER_INFO()
-        _GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
-        return info
-
-    def _getdimensions():
-        handle = GetStdHandle(STD_OUTPUT_HANDLE)
-        info = GetConsoleInfo(handle)
-        # Substract one from the width, otherwise the cursor wraps
-        # and the ending \n causes an empty line to display.
-        return info.dwSize.Y, info.dwSize.X - 1
-
-def write_out(fil, msg):
-    # XXX sometimes "msg" is of type bytes, sometimes text which
-    # complicates the situation.  Should we try to enforce unicode?
-    try:
-        # on py27 and above writing out to sys.stdout with an encoding
-        # should usually work for unicode messages (if the encoding is
-        # capable of it)
-        fil.write(msg)
-    except UnicodeEncodeError:
-        # on py26 it might not work because stdout expects bytes
-        if fil.encoding:
-            try:
-                fil.write(msg.encode(fil.encoding))
-            except UnicodeEncodeError:
-                # it might still fail if the encoding is not capable
-                pass
-            else:
-                fil.flush()
-                return
-        # fallback: escape all unicode characters
-        msg = msg.encode("unicode-escape").decode("ascii")
-        fil.write(msg)
-    fil.flush()
+"""
+
+Helper functions for writing to terminals and files.
+
+"""
+
+
+import sys, os
+import py
+py3k = sys.version_info[0] >= 3
+from py.builtin import text, bytes
+
+win32_and_ctypes = False
+colorama = None
+if sys.platform == "win32":
+    try:
+        import colorama
+    except ImportError:
+        try:
+            import ctypes
+            win32_and_ctypes = True
+        except ImportError:
+            pass
+
+
+def _getdimensions():
+    import termios,fcntl,struct
+    call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
+    height,width = struct.unpack( "hhhh", call ) [:2]
+    return height, width
+
+
+def get_terminal_width():
+    height = width = 0
+    try:
+        height, width = _getdimensions()
+    except py.builtin._sysex:
+        raise
+    except:
+        # pass to fallback below
+        pass
+
+    if width == 0:
+        # FALLBACK:
+        # * some exception happened
+        # * or this is emacs terminal which reports (0,0)
+        width = int(os.environ.get('COLUMNS', 80))
+
+    # XXX the windows getdimensions may be bogus, let's sanify a bit
+    if width < 40:
+        width = 80
+    return width
+
+terminal_width = get_terminal_width()
+
+# XXX unify with _escaped func below
+def ansi_print(text, esc, file=None, newline=True, flush=False):
+    if file is None:
+        file = sys.stderr
+    text = text.rstrip()
+    if esc and not isinstance(esc, tuple):
+        esc = (esc,)
+    if esc and sys.platform != "win32" and file.isatty():
+        text = (''.join(['\x1b[%sm' % cod for cod in esc])  +
+                text +
+                '\x1b[0m')     # ANSI color code "reset"
+    if newline:
+        text += '\n'
+
+    if esc and win32_and_ctypes and file.isatty():
+        if 1 in esc:
+            bold = True
+            esc = tuple([x for x in esc if x != 1])
+        else:
+            bold = False
+        esctable = {()   : FOREGROUND_WHITE,                 # normal
+                    (31,): FOREGROUND_RED,                   # red
+                    (32,): FOREGROUND_GREEN,                 # green
+                    (33,): FOREGROUND_GREEN|FOREGROUND_RED,  # yellow
+                    (34,): FOREGROUND_BLUE,                  # blue
+                    (35,): FOREGROUND_BLUE|FOREGROUND_RED,   # purple
+                    (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
+                    (37,): FOREGROUND_WHITE,                 # white
+                    (39,): FOREGROUND_WHITE,                 # reset
+                    }
+        attr = esctable.get(esc, FOREGROUND_WHITE)
+        if bold:
+            attr |= FOREGROUND_INTENSITY
+        STD_OUTPUT_HANDLE = -11
+        STD_ERROR_HANDLE = -12
+        if file is sys.stderr:
+            handle = GetStdHandle(STD_ERROR_HANDLE)
+        else:
+            handle = GetStdHandle(STD_OUTPUT_HANDLE)
+        oldcolors = GetConsoleInfo(handle).wAttributes
+        attr |= (oldcolors & 0x0f0)
+        SetConsoleTextAttribute(handle, attr)
+        while len(text) > 32768:
+            file.write(text[:32768])
+            text = text[32768:]
+        if text:
+            file.write(text)
+        SetConsoleTextAttribute(handle, oldcolors)
+    else:
+        file.write(text)
+
+    if flush:
+        file.flush()
+
+def should_do_markup(file):
+    if os.environ.get('PY_COLORS') == '1':
+        return True
+    if os.environ.get('PY_COLORS') == '0':
+        return False
+    return hasattr(file, 'isatty') and file.isatty() \
+           and os.environ.get('TERM') != 'dumb' \
+           and not (sys.platform.startswith('java') and os._name == 'nt')
+
+class TerminalWriter(object):
+    _esctable = dict(black=30, red=31, green=32, yellow=33,
+                     blue=34, purple=35, cyan=36, white=37,
+                     Black=40, Red=41, Green=42, Yellow=43,
+                     Blue=44, Purple=45, Cyan=46, White=47,
+                     bold=1, light=2, blink=5, invert=7)
+
+    # XXX deprecate stringio argument
+    def __init__(self, file=None, stringio=False, encoding=None):
+        if file is None:
+            if stringio:
+                self.stringio = file = py.io.TextIO()
+            else:
+                file = py.std.sys.stdout
+        elif py.builtin.callable(file) and not (
+             hasattr(file, "write") and hasattr(file, "flush")):
+            file = WriteFile(file, encoding=encoding)
+        if hasattr(file, "isatty") and file.isatty() and colorama:
+            file = colorama.AnsiToWin32(file).stream
+        self.encoding = encoding or getattr(file, 'encoding', "utf-8")
+        self._file = file
+        self.hasmarkup = should_do_markup(file)
+        self._lastlen = 0
+
+    @property
+    def fullwidth(self):
+        if hasattr(self, '_terminal_width'):
+            return self._terminal_width
+        return get_terminal_width()
+
+    @fullwidth.setter
+    def fullwidth(self, value):
+        self._terminal_width = value
+
+    def _escaped(self, text, esc):
+        if esc and self.hasmarkup:
+            text = (''.join(['\x1b[%sm' % cod for cod in esc])  +
+                text +'\x1b[0m')
+        return text
+
+    def markup(self, text, **kw):
+        esc = []
+        for name in kw:
+            if name not in self._esctable:
+                raise ValueError("unknown markup: %r" %(name,))
+            if kw[name]:
+                esc.append(self._esctable[name])
+        return self._escaped(text, tuple(esc))
+
+    def sep(self, sepchar, title=None, fullwidth=None, **kw):
+        if fullwidth is None:
+            fullwidth = self.fullwidth
+        # the goal is to have the line be as long as possible
+        # under the condition that len(line) <= fullwidth
+        if sys.platform == "win32":
+            # if we print in the last column on windows we are on a
+            # new line but there is no way to verify/neutralize this
+            # (we may not know the exact line width)
+            # so let's be defensive to avoid empty lines in the output
+            fullwidth -= 1
+        if title is not None:
+            # we want 2 + 2*len(fill) + len(title) <= fullwidth
+            # i.e.    2 + 2*len(sepchar)*N + len(title) <= fullwidth
+            #         2*len(sepchar)*N <= fullwidth - len(title) - 2
+            #         N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
+            N = (fullwidth - len(title) - 2) // (2*len(sepchar))
+            fill = sepchar * N
+            line = "%s %s %s" % (fill, title, fill)
+        else:
+            # we want len(sepchar)*N <= fullwidth
+            # i.e.    N <= fullwidth // len(sepchar)
+            line = sepchar * (fullwidth // len(sepchar))
+        # in some situations there is room for an extra sepchar at the right,
+        # in particular if we consider that with a sepchar like "_ " the
+        # trailing space is not important at the end of the line
+        if len(line) + len(sepchar.rstrip()) <= fullwidth:
+            line += sepchar.rstrip()
+
+        self.line(line, **kw)
+
+    def write(self, msg, **kw):
+        if msg:
+            if not isinstance(msg, (bytes, text)):
+                msg = text(msg)
+            if self.hasmarkup and kw:
+                markupmsg = self.markup(msg, **kw)
+            else:
+                markupmsg = msg
+            write_out(self._file, markupmsg)
+
+    def line(self, s='', **kw):
+        self.write(s, **kw)
+        self._checkfill(s)
+        self.write('\n')
+
+    def reline(self, line, **kw):
+        if not self.hasmarkup:
+            raise ValueError("cannot use rewrite-line without terminal")
+        self.write(line, **kw)
+        self._checkfill(line)
+        self.write('\r')
+        self._lastlen = len(line)
+
+    def _checkfill(self, line):
+        diff2last = self._lastlen - len(line)
+        if diff2last > 0:
+            self.write(" " * diff2last)
+
+class Win32ConsoleWriter(TerminalWriter):
+    def write(self, msg, **kw):
+        if msg:
+            if not isinstance(msg, (bytes, text)):
+                msg = text(msg)
+            oldcolors = None
+            if self.hasmarkup and kw:
+                handle = GetStdHandle(STD_OUTPUT_HANDLE)
+                oldcolors = GetConsoleInfo(handle).wAttributes
+                default_bg = oldcolors & 0x00F0
+                attr = default_bg
+                if kw.pop('bold', False):
+                    attr |= FOREGROUND_INTENSITY
+
+                if kw.pop('red', False):
+                    attr |= FOREGROUND_RED
+                elif kw.pop('blue', False):
+                    attr |= FOREGROUND_BLUE
+                elif kw.pop('green', False):
+                    attr |= FOREGROUND_GREEN
+                elif kw.pop('yellow', False):
+                    attr |= FOREGROUND_GREEN|FOREGROUND_RED
+                else:
+                    attr |= oldcolors & 0x0007
+
+                SetConsoleTextAttribute(handle, attr)
+            write_out(self._file, msg)
+            if oldcolors:
+                SetConsoleTextAttribute(handle, oldcolors)
+
+class WriteFile(object):
+    def __init__(self, writemethod, encoding=None):
+        self.encoding = encoding
+        self._writemethod = writemethod
+
+    def write(self, data):
+        if self.encoding:
+            data = data.encode(self.encoding, "replace")
+        self._writemethod(data)
+
+    def flush(self):
+        return
+
+
+if win32_and_ctypes:
+    TerminalWriter = Win32ConsoleWriter
+    import ctypes
+    from ctypes import wintypes
+
+    # ctypes access to the Windows console
+    STD_OUTPUT_HANDLE = -11
+    STD_ERROR_HANDLE  = -12
+    FOREGROUND_BLACK     = 0x0000 # black text
+    FOREGROUND_BLUE      = 0x0001 # text color contains blue.
+    FOREGROUND_GREEN     = 0x0002 # text color contains green.
+    FOREGROUND_RED       = 0x0004 # text color contains red.
+    FOREGROUND_WHITE     = 0x0007
+    FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
+    BACKGROUND_BLACK     = 0x0000 # background color black
+    BACKGROUND_BLUE      = 0x0010 # background color contains blue.
+    BACKGROUND_GREEN     = 0x0020 # background color contains green.
+    BACKGROUND_RED       = 0x0040 # background color contains red.
+    BACKGROUND_WHITE     = 0x0070
+    BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
+
+    SHORT = ctypes.c_short
+    class COORD(ctypes.Structure):
+        _fields_ = [('X', SHORT),
+                    ('Y', SHORT)]
+    class SMALL_RECT(ctypes.Structure):
+        _fields_ = [('Left', SHORT),
+                    ('Top', SHORT),
+                    ('Right', SHORT),
+                    ('Bottom', SHORT)]
+    class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+        _fields_ = [('dwSize', COORD),
+                    ('dwCursorPosition', COORD),
+                    ('wAttributes', wintypes.WORD),
+                    ('srWindow', SMALL_RECT),
+                    ('dwMaximumWindowSize', COORD)]
+
+    _GetStdHandle = ctypes.windll.kernel32.GetStdHandle
+    _GetStdHandle.argtypes = [wintypes.DWORD]
+    _GetStdHandle.restype = wintypes.HANDLE
+    def GetStdHandle(kind):
+        return _GetStdHandle(kind)
+
+    SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
+    SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
+    SetConsoleTextAttribute.restype = wintypes.BOOL
+
+    _GetConsoleScreenBufferInfo = \
+        ctypes.windll.kernel32.GetConsoleScreenBufferInfo
+    _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
+                                ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
+    _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
+    def GetConsoleInfo(handle):
+        info = CONSOLE_SCREEN_BUFFER_INFO()
+        _GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
+        return info
+
+    def _getdimensions():
+        handle = GetStdHandle(STD_OUTPUT_HANDLE)
+        info = GetConsoleInfo(handle)
+        # Substract one from the width, otherwise the cursor wraps
+        # and the ending \n causes an empty line to display.
+        return info.dwSize.Y, info.dwSize.X - 1
+
+def write_out(fil, msg):
+    # XXX sometimes "msg" is of type bytes, sometimes text which
+    # complicates the situation.  Should we try to enforce unicode?
+    try:
+        # on py27 and above writing out to sys.stdout with an encoding
+        # should usually work for unicode messages (if the encoding is
+        # capable of it)
+        fil.write(msg)
+    except UnicodeEncodeError:
+        # on py26 it might not work because stdout expects bytes
+        if fil.encoding:
+            try:
+                fil.write(msg.encode(fil.encoding))
+            except UnicodeEncodeError:
+                # it might still fail if the encoding is not capable
+                pass
+            else:
+                fil.flush()
+                return
+        # fallback: escape all unicode characters
+        msg = msg.encode("unicode-escape").decode("ascii")
+        fil.write(msg)
+    fil.flush()
--- a/third_party/python/py/py/_log/__init__.py
+++ b/third_party/python/py/py/_log/__init__.py
@@ -1,2 +1,2 @@
-""" logging API ('producers' and 'consumers' connected via keywords) """
-
+""" logging API ('producers' and 'consumers' connected via keywords) """
+
--- a/third_party/python/py/py/_log/log.py
+++ b/third_party/python/py/py/_log/log.py
@@ -1,186 +1,186 @@
-"""
-basic logging functionality based on a producer/consumer scheme.
-
-XXX implement this API: (maybe put it into slogger.py?)
-
-        log = Logger(
-                    info=py.log.STDOUT,
-                    debug=py.log.STDOUT,
-                    command=None)
-        log.info("hello", "world")
-        log.command("hello", "world")
-
-        log = Logger(info=Logger(something=...),
-                     debug=py.log.STDOUT,
-                     command=None)
-"""
-import py, sys
-
-class Message(object):
-    def __init__(self, keywords, args):
-        self.keywords = keywords
-        self.args = args
-
-    def content(self):
-        return " ".join(map(str, self.args))
-
-    def prefix(self):
-        return "[%s] " % (":".join(self.keywords))
-
-    def __str__(self):
-        return self.prefix() + self.content()
-
-
-class Producer(object):
-    """ (deprecated) Log producer API which sends messages to be logged
-        to a 'consumer' object, which then prints them to stdout,
-        stderr, files, etc. Used extensively by PyPy-1.1.
-    """
-
-    Message = Message  # to allow later customization
-    keywords2consumer = {}
-
-    def __init__(self, keywords, keywordmapper=None, **kw):
-        if hasattr(keywords, 'split'):
-            keywords = tuple(keywords.split())
-        self._keywords = keywords
-        if keywordmapper is None:
-            keywordmapper = default_keywordmapper
-        self._keywordmapper = keywordmapper
-
-    def __repr__(self):
-        return "<py.log.Producer %s>" % ":".join(self._keywords)
-
-    def __getattr__(self, name):
-        if '_' in name:
-            raise AttributeError(name)
-        producer = self.__class__(self._keywords + (name,))
-        setattr(self, name, producer)
-        return producer
-
-    def __call__(self, *args):
-        """ write a message to the appropriate consumer(s) """
-        func = self._keywordmapper.getconsumer(self._keywords)
-        if func is not None:
-            func(self.Message(self._keywords, args))
-
-class KeywordMapper:
-    def __init__(self):
-        self.keywords2consumer = {}
-
-    def getstate(self):
-        return self.keywords2consumer.copy()
-    def setstate(self, state):
-        self.keywords2consumer.clear()
-        self.keywords2consumer.update(state)
-
-    def getconsumer(self, keywords):
-        """ return a consumer matching the given keywords.
-
-            tries to find the most suitable consumer by walking, starting from
-            the back, the list of keywords, the first consumer matching a
-            keyword is returned (falling back to py.log.default)
-        """
-        for i in range(len(keywords), 0, -1):
-            try:
-                return self.keywords2consumer[keywords[:i]]
-            except KeyError:
-                continue
-        return self.keywords2consumer.get('default', default_consumer)
-
-    def setconsumer(self, keywords, consumer):
-        """ set a consumer for a set of keywords. """
-        # normalize to tuples
-        if isinstance(keywords, str):
-            keywords = tuple(filter(None, keywords.split()))
-        elif hasattr(keywords, '_keywords'):
-            keywords = keywords._keywords
-        elif not isinstance(keywords, tuple):
-            raise TypeError("key %r is not a string or tuple" % (keywords,))
-        if consumer is not None and not py.builtin.callable(consumer):
-            if not hasattr(consumer, 'write'):
-                raise TypeError(
-                    "%r should be None, callable or file-like" % (consumer,))
-            consumer = File(consumer)
-        self.keywords2consumer[keywords] = consumer
-
-def default_consumer(msg):
-    """ the default consumer, prints the message to stdout (using 'print') """
-    sys.stderr.write(str(msg)+"\n")
-
-default_keywordmapper = KeywordMapper()
-
-def setconsumer(keywords, consumer):
-    default_keywordmapper.setconsumer(keywords, consumer)
-
-def setstate(state):
-    default_keywordmapper.setstate(state)
-def getstate():
-    return default_keywordmapper.getstate()
-
-#
-# Consumers
-#
-
-class File(object):
-    """ log consumer wrapping a file(-like) object """
-    def __init__(self, f):
-        assert hasattr(f, 'write')
-        #assert isinstance(f, file) or not hasattr(f, 'open')
-        self._file = f
-
-    def __call__(self, msg):
-        """ write a message to the log """
-        self._file.write(str(msg) + "\n")
-        if hasattr(self._file, 'flush'):
-            self._file.flush()
-
-class Path(object):
-    """ log consumer that opens and writes to a Path """
-    def __init__(self, filename, append=False,
-                 delayed_create=False, buffering=False):
-        self._append = append
-        self._filename = str(filename)
-        self._buffering = buffering
-        if not delayed_create:
-            self._openfile()
-
-    def _openfile(self):
-        mode = self._append and 'a' or 'w'
-        f = open(self._filename, mode)
-        self._file = f
-
-    def __call__(self, msg):
-        """ write a message to the log """
-        if not hasattr(self, "_file"):
-            self._openfile()
-        self._file.write(str(msg) + "\n")
-        if not self._buffering:
-            self._file.flush()
-
-def STDOUT(msg):
-    """ consumer that writes to sys.stdout """
-    sys.stdout.write(str(msg)+"\n")
-
-def STDERR(msg):
-    """ consumer that writes to sys.stderr """
-    sys.stderr.write(str(msg)+"\n")
-
-class Syslog:
-    """ consumer that writes to the syslog daemon """
-
-    def __init__(self, priority = None):
-        if priority is None:
-            priority = self.LOG_INFO
-        self.priority = priority
-
-    def __call__(self, msg):
-        """ write a message to the log """
-        py.std.syslog.syslog(self.priority, str(msg))
-
-for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
-    _prio = "LOG_" + _prio
-    try:
-        setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
-    except AttributeError:
-        pass
+"""
+basic logging functionality based on a producer/consumer scheme.
+
+XXX implement this API: (maybe put it into slogger.py?)
+
+        log = Logger(
+                    info=py.log.STDOUT,
+                    debug=py.log.STDOUT,
+                    command=None)
+        log.info("hello", "world")
+        log.command("hello", "world")
+
+        log = Logger(info=Logger(something=...),
+                     debug=py.log.STDOUT,
+                     command=None)
+"""
+import py, sys
+
+class Message(object):
+    def __init__(self, keywords, args):
+        self.keywords = keywords
+        self.args = args
+
+    def content(self):
+        return " ".join(map(str, self.args))
+
+    def prefix(self):
+        return "[%s] " % (":".join(self.keywords))
+
+    def __str__(self):
+        return self.prefix() + self.content()
+
+
+class Producer(object):
+    """ (deprecated) Log producer API which sends messages to be logged
+        to a 'consumer' object, which then prints them to stdout,
+        stderr, files, etc. Used extensively by PyPy-1.1.
+    """
+
+    Message = Message  # to allow later customization
+    keywords2consumer = {}
+
+    def __init__(self, keywords, keywordmapper=None, **kw):
+        if hasattr(keywords, 'split'):
+            keywords = tuple(keywords.split())
+        self._keywords = keywords
+        if keywordmapper is None:
+            keywordmapper = default_keywordmapper
+        self._keywordmapper = keywordmapper
+
+    def __repr__(self):
+        return "<py.log.Producer %s>" % ":".join(self._keywords)
+
+    def __getattr__(self, name):
+        if '_' in name:
+            raise AttributeError(name)
+        producer = self.__class__(self._keywords + (name,))
+        setattr(self, name, producer)
+        return producer
+
+    def __call__(self, *args):
+        """ write a message to the appropriate consumer(s) """
+        func = self._keywordmapper.getconsumer(self._keywords)
+        if func is not None:
+            func(self.Message(self._keywords, args))
+
+class KeywordMapper:
+    def __init__(self):
+        self.keywords2consumer = {}
+
+    def getstate(self):
+        return self.keywords2consumer.copy()
+    def setstate(self, state):
+        self.keywords2consumer.clear()
+        self.keywords2consumer.update(state)
+
+    def getconsumer(self, keywords):
+        """ return a consumer matching the given keywords.
+
+            tries to find the most suitable consumer by walking, starting from
+            the back, the list of keywords, the first consumer matching a
+            keyword is returned (falling back to py.log.default)
+        """
+        for i in range(len(keywords), 0, -1):
+            try:
+                return self.keywords2consumer[keywords[:i]]
+            except KeyError:
+                continue
+        return self.keywords2consumer.get('default', default_consumer)
+
+    def setconsumer(self, keywords, consumer):
+        """ set a consumer for a set of keywords. """
+        # normalize to tuples
+        if isinstance(keywords, str):
+            keywords = tuple(filter(None, keywords.split()))
+        elif hasattr(keywords, '_keywords'):
+            keywords = keywords._keywords
+        elif not isinstance(keywords, tuple):
+            raise TypeError("key %r is not a string or tuple" % (keywords,))
+        if consumer is not None and not py.builtin.callable(consumer):
+            if not hasattr(consumer, 'write'):
+                raise TypeError(
+                    "%r should be None, callable or file-like" % (consumer,))
+            consumer = File(consumer)
+        self.keywords2consumer[keywords] = consumer
+
+def default_consumer(msg):
+    """ the default consumer, prints the message to stdout (using 'print') """
+    sys.stderr.write(str(msg)+"\n")
+
+default_keywordmapper = KeywordMapper()
+
+def setconsumer(keywords, consumer):
+    default_keywordmapper.setconsumer(keywords, consumer)
+
+def setstate(state):
+    default_keywordmapper.setstate(state)
+def getstate():
+    return default_keywordmapper.getstate()
+
+#
+# Consumers
+#
+
+class File(object):
+    """ log consumer wrapping a file(-like) object """
+    def __init__(self, f):
+        assert hasattr(f, 'write')
+        #assert isinstance(f, file) or not hasattr(f, 'open')
+        self._file = f
+
+    def __call__(self, msg):
+        """ write a message to the log """
+        self._file.write(str(msg) + "\n")
+        if hasattr(self._file, 'flush'):
+            self._file.flush()
+
+class Path(object):
+    """ log consumer that opens and writes to a Path """
+    def __init__(self, filename, append=False,
+                 delayed_create=False, buffering=False):
+        self._append = append
+        self._filename = str(filename)
+        self._buffering = buffering
+        if not delayed_create:
+            self._openfile()
+
+    def _openfile(self):
+        mode = self._append and 'a' or 'w'
+        f = open(self._filename, mode)
+        self._file = f
+
+    def __call__(self, msg):
+        """ write a message to the log """
+        if not hasattr(self, "_file"):
+            self._openfile()
+        self._file.write(str(msg) + "\n")
+        if not self._buffering:
+            self._file.flush()
+
+def STDOUT(msg):
+    """ consumer that writes to sys.stdout """
+    sys.stdout.write(str(msg)+"\n")
+
+def STDERR(msg):
+    """ consumer that writes to sys.stderr """
+    sys.stderr.write(str(msg)+"\n")
+
+class Syslog:
+    """ consumer that writes to the syslog daemon """
+
+    def __init__(self, priority = None):
+        if priority is None:
+            priority = self.LOG_INFO
+        self.priority = priority
+
+    def __call__(self, msg):
+        """ write a message to the log """
+        py.std.syslog.syslog(self.priority, str(msg))
+
+for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
+    _prio = "LOG_" + _prio
+    try:
+        setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
+    except AttributeError:
+        pass
--- a/third_party/python/py/py/_log/warning.py
+++ b/third_party/python/py/py/_log/warning.py
@@ -1,76 +1,76 @@
-import py, sys
-
-class DeprecationWarning(DeprecationWarning):
-    def __init__(self, msg, path, lineno):
-        self.msg = msg
-        self.path = path
-        self.lineno = lineno
-    def __repr__(self):
-        return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
-    def __str__(self):
-        return self.msg
-
-def _apiwarn(startversion, msg, stacklevel=2, function=None):
-    # below is mostly COPIED from python2.4/warnings.py's def warn()
-    # Get context information
-    if isinstance(stacklevel, str):
-        frame = sys._getframe(1)
-        level = 1
-        found = frame.f_code.co_filename.find(stacklevel) != -1
-        while frame:
-            co = frame.f_code
-            if co.co_filename.find(stacklevel) == -1:
-                if found:
-                    stacklevel = level
-                    break
-            else:
-                found = True
-            level += 1
-            frame = frame.f_back
-        else:
-            stacklevel = 1
-    msg = "%s (since version %s)" %(msg, startversion)
-    warn(msg, stacklevel=stacklevel+1, function=function)
-
-def warn(msg, stacklevel=1, function=None):
-    if function is not None:
-        filename = py.std.inspect.getfile(function)
-        lineno = py.code.getrawcode(function).co_firstlineno
-    else:
-        try:
-            caller = sys._getframe(stacklevel)
-        except ValueError:
-            globals = sys.__dict__
-            lineno = 1
-        else:
-            globals = caller.f_globals
-            lineno = caller.f_lineno
-        if '__name__' in globals:
-            module = globals['__name__']
-        else:
-            module = "<string>"
-        filename = globals.get('__file__')
-    if filename:
-        fnl = filename.lower()
-        if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
-            filename = filename[:-1]
-        elif fnl.endswith("$py.class"):
-            filename = filename.replace('$py.class', '.py')
-    else:
-        if module == "__main__":
-            try:
-                filename = sys.argv[0]
-            except AttributeError:
-                # embedded interpreters don't have sys.argv, see bug #839151
-                filename = '__main__'
-        if not filename:
-            filename = module
-    path = py.path.local(filename)
-    warning = DeprecationWarning(msg, path, lineno)
-    py.std.warnings.warn_explicit(warning, category=Warning,
-        filename=str(warning.path),
-        lineno=warning.lineno,
-        registry=py.std.warnings.__dict__.setdefault(
-            "__warningsregistry__", {})
-    )
-
+import py, sys
+
+class DeprecationWarning(DeprecationWarning):
+    def __init__(self, msg, path, lineno):
+        self.msg = msg
+        self.path = path
+        self.lineno = lineno
+    def __repr__(self):
+        return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
+    def __str__(self):
+        return self.msg
+
+def _apiwarn(startversion, msg, stacklevel=2, function=None):
+    # below is mostly COPIED from python2.4/warnings.py's def warn()
+    # Get context information
+    if isinstance(stacklevel, str):
+        frame = sys._getframe(1)
+        level = 1
+        found = frame.f_code.co_filename.find(stacklevel) != -1
+        while frame:
+            co = frame.f_code
+            if co.co_filename.find(stacklevel) == -1:
+                if found:
+                    stacklevel = level
+                    break
+            else:
+                found = True
+            level += 1
+            frame = frame.f_back
+        else:
+            stacklevel = 1
+    msg = "%s (since version %s)" %(msg, startversion)
+    warn(msg, stacklevel=stacklevel+1, function=function)
+
+def warn(msg, stacklevel=1, function=None):
+    if function is not None:
+        filename = py.std.inspect.getfile(function)
+        lineno = py.code.getrawcode(function).co_firstlineno
+    else:
+        try:
+            caller = sys._getframe(stacklevel)
+        except ValueError:
+            globals = sys.__dict__
+            lineno = 1
+        else:
+            globals = caller.f_globals
+            lineno = caller.f_lineno
+        if '__name__' in globals:
+            module = globals['__name__']
+        else:
+            module = "<string>"
+        filename = globals.get('__file__')
+    if filename:
+        fnl = filename.lower()
+        if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
+            filename = filename[:-1]
+        elif fnl.endswith("$py.class"):
+            filename = filename.replace('$py.class', '.py')
+    else:
+        if module == "__main__":
+            try:
+                filename = sys.argv[0]
+            except AttributeError:
+                # embedded interpreters don't have sys.argv, see bug #839151
+                filename = '__main__'
+        if not filename:
+            filename = module
+    path = py.path.local(filename)
+    warning = DeprecationWarning(msg, path, lineno)
+    py.std.warnings.warn_explicit(warning, category=Warning,
+        filename=str(warning.path),
+        lineno=warning.lineno,
+        registry=py.std.warnings.__dict__.setdefault(
+            "__warningsregistry__", {})
+    )
+
--- a/third_party/python/py/py/_path/__init__.py
+++ b/third_party/python/py/py/_path/__init__.py
@@ -1,1 +1,1 @@
-""" unified file system api """
+""" unified file system api """
--- a/third_party/python/py/py/_path/cacheutil.py
+++ b/third_party/python/py/py/_path/cacheutil.py
@@ -1,114 +1,114 @@
-"""
-This module contains multithread-safe cache implementations.
-
-All Caches have
-
-    getorbuild(key, builder)
-    delentry(key)
-
-methods and allow configuration when instantiating the cache class.
-"""
-from time import time as gettime
-
-class BasicCache(object):
-    def __init__(self, maxentries=128):
-        self.maxentries = maxentries
-        self.prunenum = int(maxentries - maxentries/8)
-        self._dict = {}
-
-    def clear(self):
-        self._dict.clear()
-
-    def _getentry(self, key):
-        return self._dict[key]
-
-    def _putentry(self, key, entry):
-        self._prunelowestweight()
-        self._dict[key] = entry
-
-    def delentry(self, key, raising=False):
-        try:
-            del self._dict[key]
-        except KeyError:
-            if raising:
-                raise
-
-    def getorbuild(self, key, builder):
-        try:
-            entry = self._getentry(key)
-        except KeyError:
-            entry = self._build(key, builder)
-            self._putentry(key, entry)
-        return entry.value
-
-    def _prunelowestweight(self):
-        """ prune out entries with lowest weight. """
-        numentries = len(self._dict)
-        if numentries >= self.maxentries:
-            # evict according to entry's weight
-            items = [(entry.weight, key)
-                        for key, entry in self._dict.items()]
-            items.sort()
-            index = numentries - self.prunenum
-            if index > 0:
-                for weight, key in items[:index]:
-                    # in MT situations the element might be gone
-                    self.delentry(key, raising=False)
-
-class BuildcostAccessCache(BasicCache):
-    """ A BuildTime/Access-counting cache implementation.
-        the weight of a value is computed as the product of
-
-            num-accesses-of-a-value * time-to-build-the-value
-
-        The values with the least such weights are evicted
-        if the cache maxentries threshold is superceded.
-        For implementation flexibility more than one object
-        might be evicted at a time.
-    """
-    # time function to use for measuring build-times
-
-    def _build(self, key, builder):
-        start = gettime()
-        val = builder()
-        end = gettime()
-        return WeightedCountingEntry(val, end-start)
-
-
-class WeightedCountingEntry(object):
-    def __init__(self, value, oneweight):
-        self._value = value
-        self.weight = self._oneweight = oneweight
-
-    def value(self):
-        self.weight += self._oneweight
-        return self._value
-    value = property(value)
-
-class AgingCache(BasicCache):
-    """ This cache prunes out cache entries that are too old.
-    """
-    def __init__(self, maxentries=128, maxseconds=10.0):
-        super(AgingCache, self).__init__(maxentries)
-        self.maxseconds = maxseconds
-
-    def _getentry(self, key):
-        entry = self._dict[key]
-        if entry.isexpired():
-            self.delentry(key)
-            raise KeyError(key)
-        return entry
-
-    def _build(self, key, builder):
-        val = builder()
-        entry = AgingEntry(val, gettime() + self.maxseconds)
-        return entry
-
-class AgingEntry(object):
-    def __init__(self, value, expirationtime):
-        self.value = value
-        self.weight = expirationtime
-
-    def isexpired(self):
-        t = gettime()
-        return t >= self.weight
+"""
+This module contains multithread-safe cache implementations.
+
+All Caches have
+
+    getorbuild(key, builder)
+    delentry(key)
+
+methods and allow configuration when instantiating the cache class.
+"""
+from time import time as gettime
+
+class BasicCache(object):
+    def __init__(self, maxentries=128):
+        self.maxentries = maxentries
+        self.prunenum = int(maxentries - maxentries/8)
+        self._dict = {}
+
+    def clear(self):
+        self._dict.clear()
+
+    def _getentry(self, key):
+        return self._dict[key]
+
+    def _putentry(self, key, entry):
+        self._prunelowestweight()
+        self._dict[key] = entry
+
+    def delentry(self, key, raising=False):
+        try:
+            del self._dict[key]
+        except KeyError:
+            if raising:
+                raise
+
+    def getorbuild(self, key, builder):
+        try:
+            entry = self._getentry(key)
+        except KeyError:
+            entry = self._build(key, builder)
+            self._putentry(key, entry)
+        return entry.value
+
+    def _prunelowestweight(self):
+        """ prune out entries with lowest weight. """
+        numentries = len(self._dict)
+        if numentries >= self.maxentries:
+            # evict according to entry's weight
+            items = [(entry.weight, key)
+                        for key, entry in self._dict.items()]
+            items.sort()
+            index = numentries - self.prunenum
+            if index > 0:
+                for weight, key in items[:index]:
+                    # in MT situations the element might be gone
+                    self.delentry(key, raising=False)
+
+class BuildcostAccessCache(BasicCache):
+    """ A BuildTime/Access-counting cache implementation.
+        the weight of a value is computed as the product of
+
+            num-accesses-of-a-value * time-to-build-the-value
+
+        The values with the least such weights are evicted
+        if the cache maxentries threshold is superceded.
+        For implementation flexibility more than one object
+        might be evicted at a time.
+    """
+    # time function to use for measuring build-times
+
+    def _build(self, key, builder):
+        start = gettime()
+        val = builder()
+        end = gettime()
+        return WeightedCountingEntry(val, end-start)
+
+
+class WeightedCountingEntry(object):
+    def __init__(self, value, oneweight):
+        self._value = value
+        self.weight = self._oneweight = oneweight
+
+    def value(self):
+        self.weight += self._oneweight
+        return self._value
+    value = property(value)
+
+class AgingCache(BasicCache):
+    """ This cache prunes out cache entries that are too old.
+    """
+    def __init__(self, maxentries=128, maxseconds=10.0):
+        super(AgingCache, self).__init__(maxentries)
+        self.maxseconds = maxseconds
+
+    def _getentry(self, key):
+        entry = self._dict[key]
+        if entry.isexpired():
+            self.delentry(key)
+            raise KeyError(key)
+        return entry
+
+    def _build(self, key, builder):
+        val = builder()
+        entry = AgingEntry(val, gettime() + self.maxseconds)
+        return entry
+
+class AgingEntry(object):
+    def __init__(self, value, expirationtime):
+        self.value = value
+        self.weight = expirationtime
+
+    def isexpired(self):
+        t = gettime()
+        return t >= self.weight
--- a/third_party/python/py/py/_path/common.py
+++ b/third_party/python/py/py/_path/common.py
@@ -1,403 +1,445 @@
-"""
-"""
-import os, sys, posixpath
-import py
-
-# Moved from local.py.
-iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
-
-class Checkers:
-    _depend_on_existence = 'exists', 'link', 'dir', 'file'
-
-    def __init__(self, path):
-        self.path = path
-
-    def dir(self):
-        raise NotImplementedError
-
-    def file(self):
-        raise NotImplementedError
-
-    def dotfile(self):
-        return self.path.basename.startswith('.')
-
-    def ext(self, arg):
-        if not arg.startswith('.'):
-            arg = '.' + arg
-        return self.path.ext == arg
-
-    def exists(self):
-        raise NotImplementedError
-
-    def basename(self, arg):
-        return self.path.basename == arg
-
-    def basestarts(self, arg):
-        return self.path.basename.startswith(arg)
-
-    def relto(self, arg):
-        return self.path.relto(arg)
-
-    def fnmatch(self, arg):
-        return self.path.fnmatch(arg)
-
-    def endswith(self, arg):
-        return str(self.path).endswith(arg)
-
-    def _evaluate(self, kw):
-        for name, value in kw.items():
-            invert = False
-            meth = None
-            try:
-                meth = getattr(self, name)
-            except AttributeError:
-                if name[:3] == 'not':
-                    invert = True
-                    try:
-                        meth = getattr(self, name[3:])
-                    except AttributeError:
-                        pass
-            if meth is None:
-                raise TypeError(
-                    "no %r checker available for %r" % (name, self.path))
-            try:
-                if py.code.getrawcode(meth).co_argcount > 1:
-                    if (not meth(value)) ^ invert:
-                        return False
-                else:
-                    if bool(value) ^ bool(meth()) ^ invert:
-                        return False
-            except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
-                # EBUSY feels not entirely correct,
-                # but its kind of necessary since ENOMEDIUM
-                # is not accessible in python
-                for name in self._depend_on_existence:
-                    if name in kw:
-                        if kw.get(name):
-                            return False
-                    name = 'not' + name
-                    if name in kw:
-                        if not kw.get(name):
-                            return False
-        return True
-
-class NeverRaised(Exception):
-    pass
-
-class PathBase(object):
-    """ shared implementation for filesystem path objects."""
-    Checkers = Checkers
-
-    def __div__(self, other):
-        return self.join(str(other))
-    __truediv__ = __div__ # py3k
-
-    def basename(self):
-        """ basename part of path. """
-        return self._getbyspec('basename')[0]
-    basename = property(basename, None, None, basename.__doc__)
-
-    def dirname(self):
-        """ dirname part of path. """
-        return self._getbyspec('dirname')[0]
-    dirname = property(dirname, None, None, dirname.__doc__)
-
-    def purebasename(self):
-        """ pure base name of the path."""
-        return self._getbyspec('purebasename')[0]
-    purebasename = property(purebasename, None, None, purebasename.__doc__)
-
-    def ext(self):
-        """ extension of the path (including the '.')."""
-        return self._getbyspec('ext')[0]
-    ext = property(ext, None, None, ext.__doc__)
-
-    def dirpath(self, *args, **kwargs):
-        """ return the directory path joined with any given path arguments.  """
-        return self.new(basename='').join(*args, **kwargs)
-
-    def read_binary(self):
-        """ read and return a bytestring from reading the path. """
-        with self.open('rb') as f:
-            return f.read()
-
-    def read_text(self, encoding):
-        """ read and return a Unicode string from reading the path. """
-        with self.open("r", encoding=encoding) as f:
-            return f.read()
-
-
-    def read(self, mode='r'):
-        """ read and return a bytestring from reading the path. """
-        with self.open(mode) as f:
-            return f.read()
-
-    def readlines(self, cr=1):
-        """ read and return a list of lines from the path. if cr is False, the
-newline will be removed from the end of each line. """
-        if not cr:
-            content = self.read('rU')
-            return content.split('\n')
-        else:
-            f = self.open('rU')
-            try:
-                return f.readlines()
-            finally:
-                f.close()
-
-    def load(self):
-        """ (deprecated) return object unpickled from self.read() """
-        f = self.open('rb')
-        try:
-            return py.error.checked_call(py.std.pickle.load, f)
-        finally:
-            f.close()
-
-    def move(self, target):
-        """ move this path to target. """
-        if target.relto(self):
-            raise py.error.EINVAL(target,
-                "cannot move path into a subdirectory of itself")
-        try:
-            self.rename(target)
-        except py.error.EXDEV:  # invalid cross-device link
-            self.copy(target)
-            self.remove()
-
-    def __repr__(self):
-        """ return a string representation of this path. """
-        return repr(str(self))
-
-    def check(self, **kw):
-        """ check a path for existence and properties.
-
-            Without arguments, return True if the path exists, otherwise False.
-
-            valid checkers::
-
-                file=1    # is a file
-                file=0    # is not a file (may not even exist)
-                dir=1     # is a dir
-                link=1    # is a link
-                exists=1  # exists
-
-            You can specify multiple checker definitions, for example::
-
-                path.check(file=1, link=1)  # a link pointing to a file
-        """
-        if not kw:
-            kw = {'exists' : 1}
-        return self.Checkers(self)._evaluate(kw)
-
-    def fnmatch(self, pattern):
-        """return true if the basename/fullname matches the glob-'pattern'.
-
-        valid pattern characters::
-
-            *       matches everything
-            ?       matches any single character
-            [seq]   matches any character in seq
-            [!seq]  matches any char not in seq
-
-        If the pattern contains a path-separator then the full path
-        is used for pattern matching and a '*' is prepended to the
-        pattern.
-
-        if the pattern doesn't contain a path-separator the pattern
-        is only matched against the basename.
-        """
-        return FNMatcher(pattern)(self)
-
-    def relto(self, relpath):
-        """ return a string which is the relative part of the path
-        to the given 'relpath'.
-        """
-        if not isinstance(relpath, (str, PathBase)):
-            raise TypeError("%r: not a string or path object" %(relpath,))
-        strrelpath = str(relpath)
-        if strrelpath and strrelpath[-1] != self.sep:
-            strrelpath += self.sep
-        #assert strrelpath[-1] == self.sep
-        #assert strrelpath[-2] != self.sep
-        strself = self.strpath
-        if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
-            if os.path.normcase(strself).startswith(
-               os.path.normcase(strrelpath)):
-                return strself[len(strrelpath):]
-        elif strself.startswith(strrelpath):
-            return strself[len(strrelpath):]
-        return ""
-
-    def ensure_dir(self, *args):
-        """ ensure the path joined with args is a directory. """
-        return self.ensure(*args, **{"dir": True})
-
-    def bestrelpath(self, dest):
-        """ return a string which is a relative path from self
-            (assumed to be a directory) to dest such that
-            self.join(bestrelpath) == dest and if not such
-            path can be determined return dest.
-        """
-        try:
-            if self == dest:
-                return os.curdir
-            base = self.common(dest)
-            if not base:  # can be the case on windows
-                return str(dest)
-            self2base = self.relto(base)
-            reldest = dest.relto(base)
-            if self2base:
-                n = self2base.count(self.sep) + 1
-            else:
-                n = 0
-            l = [os.pardir] * n
-            if reldest:
-                l.append(reldest)
-            target = dest.sep.join(l)
-            return target
-        except AttributeError:
-            return str(dest)
-
-    def exists(self):
-        return self.check()
-
-    def isdir(self):
-        return self.check(dir=1)
-
-    def isfile(self):
-        return self.check(file=1)
-
-    def parts(self, reverse=False):
-        """ return a root-first list of all ancestor directories
-            plus the path itself.
-        """
-        current = self
-        l = [self]
-        while 1:
-            last = current
-            current = current.dirpath()
-            if last == current:
-                break
-            l.append(current)
-        if not reverse:
-            l.reverse()
-        return l
-
-    def common(self, other):
-        """ return the common part shared with the other path
-            or None if there is no common part.
-        """
-        last = None
-        for x, y in zip(self.parts(), other.parts()):
-            if x != y:
-                return last
-            last = x
-        return last
-
-    def __add__(self, other):
-        """ return new path object with 'other' added to the basename"""
-        return self.new(basename=self.basename+str(other))
-
-    def __cmp__(self, other):
-        """ return sort value (-1, 0, +1). """
-        try:
-            return cmp(self.strpath, other.strpath)
-        except AttributeError:
-            return cmp(str(self), str(other)) # self.path, other.path)
-
-    def __lt__(self, other):
-        try:
-            return self.strpath < other.strpath
-        except AttributeError:
-            return str(self) < str(other)
-
-    def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
-        """ yields all paths below the current one
-
-            fil is a filter (glob pattern or callable), if not matching the
-            path will not be yielded, defaulting to None (everything is
-            returned)
-
-            rec is a filter (glob pattern or callable) that controls whether
-            a node is descended, defaulting to None
-
-            ignore is an Exception class that is ignoredwhen calling dirlist()
-            on any of the paths (by default, all exceptions are reported)
-
-            bf if True will cause a breadthfirst search instead of the
-            default depthfirst. Default: False
-
-            sort if True will sort entries within each directory level.
-        """
-        for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
-            yield x
-
-    def _sortlist(self, res, sort):
-        if sort:
-            if hasattr(sort, '__call__'):
-                res.sort(sort)
-            else:
-                res.sort()
-
-    def samefile(self, other):
-        """ return True if other refers to the same stat object as self. """
-        return self.strpath == str(other)
-
-class Visitor:
-    def __init__(self, fil, rec, ignore, bf, sort):
-        if isinstance(fil, str):
-            fil = FNMatcher(fil)
-        if isinstance(rec, str):
-            self.rec = FNMatcher(rec)
-        elif not hasattr(rec, '__call__') and rec:
-            self.rec = lambda path: True
-        else:
-            self.rec = rec
-        self.fil = fil
-        self.ignore = ignore
-        self.breadthfirst = bf
-        self.optsort = sort and sorted or (lambda x: x)
-
-    def gen(self, path):
-        try:
-            entries = path.listdir()
-        except self.ignore:
-            return
-        rec = self.rec
-        dirs = self.optsort([p for p in entries
-                    if p.check(dir=1) and (rec is None or rec(p))])
-        if not self.breadthfirst:
-            for subdir in dirs:
-                for p in self.gen(subdir):
-                    yield p
-        for p in self.optsort(entries):
-            if self.fil is None or self.fil(p):
-                yield p
-        if self.breadthfirst:
-            for subdir in dirs:
-                for p in self.gen(subdir):
-                    yield p
-
-class FNMatcher:
-    def __init__(self, pattern):
-        self.pattern = pattern
-
-    def __call__(self, path):
-        pattern = self.pattern
-
-        if (pattern.find(path.sep) == -1 and
-        iswin32 and
-        pattern.find(posixpath.sep) != -1):
-            # Running on Windows, the pattern has no Windows path separators,
-            # and the pattern has one or more Posix path separators. Replace
-            # the Posix path separators with the Windows path separator.
-            pattern = pattern.replace(posixpath.sep, path.sep)
-
-        if pattern.find(path.sep) == -1:
-            name = path.basename
-        else:
-            name = str(path) # path.strpath # XXX svn?
-            if not os.path.isabs(pattern):
-                pattern = '*' + path.sep + pattern
-        return py.std.fnmatch.fnmatch(name, pattern)
-
+"""
+"""
+import os, sys, posixpath
+import fnmatch
+import py
+
+# Moved from local.py.
+iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
+
+try:
+    from os import fspath
+except ImportError:
+    def fspath(path):
+        """
+        Return the string representation of the path.
+        If str or bytes is passed in, it is returned unchanged.
+        This code comes from PEP 519, modified to support earlier versions of
+        python.
+
+        This is required for python < 3.6.
+        """
+        if isinstance(path, (py.builtin.text, py.builtin.bytes)):
+            return path
+
+        # Work from the object's type to match method resolution of other magic
+        # methods.
+        path_type = type(path)
+        try:
+            return path_type.__fspath__(path)
+        except AttributeError:
+            if hasattr(path_type, '__fspath__'):
+                raise
+            try:
+                import pathlib
+            except ImportError:
+                pass
+            else:
+                if isinstance(path, pathlib.PurePath):
+                    return py.builtin.text(path)
+
+            raise TypeError("expected str, bytes or os.PathLike object, not "
+                            + path_type.__name__)
+
+class Checkers:
+    _depend_on_existence = 'exists', 'link', 'dir', 'file'
+
+    def __init__(self, path):
+        self.path = path
+
+    def dir(self):
+        raise NotImplementedError
+
+    def file(self):
+        raise NotImplementedError
+
+    def dotfile(self):
+        return self.path.basename.startswith('.')
+
+    def ext(self, arg):
+        if not arg.startswith('.'):
+            arg = '.' + arg
+        return self.path.ext == arg
+
+    def exists(self):
+        raise NotImplementedError
+
+    def basename(self, arg):
+        return self.path.basename == arg
+
+    def basestarts(self, arg):
+        return self.path.basename.startswith(arg)
+
+    def relto(self, arg):
+        return self.path.relto(arg)
+
+    def fnmatch(self, arg):
+        return self.path.fnmatch(arg)
+
+    def endswith(self, arg):
+        return str(self.path).endswith(arg)
+
+    def _evaluate(self, kw):
+        for name, value in kw.items():
+            invert = False
+            meth = None
+            try:
+                meth = getattr(self, name)
+            except AttributeError:
+                if name[:3] == 'not':
+                    invert = True
+                    try:
+                        meth = getattr(self, name[3:])
+                    except AttributeError:
+                        pass
+            if meth is None:
+                raise TypeError(
+                    "no %r checker available for %r" % (name, self.path))
+            try:
+                if py.code.getrawcode(meth).co_argcount > 1:
+                    if (not meth(value)) ^ invert:
+                        return False
+                else:
+                    if bool(value) ^ bool(meth()) ^ invert:
+                        return False
+            except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
+                # EBUSY feels not entirely correct,
+                # but its kind of necessary since ENOMEDIUM
+                # is not accessible in python
+                for name in self._depend_on_existence:
+                    if name in kw:
+                        if kw.get(name):
+                            return False
+                    name = 'not' + name
+                    if name in kw:
+                        if not kw.get(name):
+                            return False
+        return True
+
+class NeverRaised(Exception):
+    pass
+
+class PathBase(object):
+    """ shared implementation for filesystem path objects."""
+    Checkers = Checkers
+
+    def __div__(self, other):
+        return self.join(fspath(other))
+    __truediv__ = __div__ # py3k
+
+    def basename(self):
+        """ basename part of path. """
+        return self._getbyspec('basename')[0]
+    basename = property(basename, None, None, basename.__doc__)
+
+    def dirname(self):
+        """ dirname part of path. """
+        return self._getbyspec('dirname')[0]
+    dirname = property(dirname, None, None, dirname.__doc__)
+
+    def purebasename(self):
+        """ pure base name of the path."""
+        return self._getbyspec('purebasename')[0]
+    purebasename = property(purebasename, None, None, purebasename.__doc__)
+
+    def ext(self):
+        """ extension of the path (including the '.')."""
+        return self._getbyspec('ext')[0]
+    ext = property(ext, None, None, ext.__doc__)
+
+    def dirpath(self, *args, **kwargs):
+        """ return the directory path joined with any given path arguments.  """
+        return self.new(basename='').join(*args, **kwargs)
+
+    def read_binary(self):
+        """ read and return a bytestring from reading the path. """
+        with self.open('rb') as f:
+            return f.read()
+
+    def read_text(self, encoding):
+        """ read and return a Unicode string from reading the path. """
+        with self.open("r", encoding=encoding) as f:
+            return f.read()
+
+
+    def read(self, mode='r'):
+        """ read and return a bytestring from reading the path. """
+        with self.open(mode) as f:
+            return f.read()
+
+    def readlines(self, cr=1):
+        """ read and return a list of lines from the path. if cr is False, the
+newline will be removed from the end of each line. """
+        if sys.version_info < (3, ):
+            mode = 'rU'
+        else:  # python 3 deprecates mode "U" in favor of "newline" option
+            mode = 'r'
+
+        if not cr:
+            content = self.read(mode)
+            return content.split('\n')
+        else:
+            f = self.open(mode)
+            try:
+                return f.readlines()
+            finally:
+                f.close()
+
+    def load(self):
+        """ (deprecated) return object unpickled from self.read() """
+        f = self.open('rb')
+        try:
+            return py.error.checked_call(py.std.pickle.load, f)
+        finally:
+            f.close()
+
+    def move(self, target):
+        """ move this path to target. """
+        if target.relto(self):
+            raise py.error.EINVAL(target,
+                "cannot move path into a subdirectory of itself")
+        try:
+            self.rename(target)
+        except py.error.EXDEV:  # invalid cross-device link
+            self.copy(target)
+            self.remove()
+
+    def __repr__(self):
+        """ return a string representation of this path. """
+        return repr(str(self))
+
+    def check(self, **kw):
+        """ check a path for existence and properties.
+
+            Without arguments, return True if the path exists, otherwise False.
+
+            valid checkers::
+
+                file=1    # is a file
+                file=0    # is not a file (may not even exist)
+                dir=1     # is a dir
+                link=1    # is a link
+                exists=1  # exists
+
+            You can specify multiple checker definitions, for example::
+
+                path.check(file=1, link=1)  # a link pointing to a file
+        """
+        if not kw:
+            kw = {'exists' : 1}
+        return self.Checkers(self)._evaluate(kw)
+
+    def fnmatch(self, pattern):
+        """return true if the basename/fullname matches the glob-'pattern'.
+
+        valid pattern characters::
+
+            *       matches everything
+            ?       matches any single character
+            [seq]   matches any character in seq
+            [!seq]  matches any char not in seq
+
+        If the pattern contains a path-separator then the full path
+        is used for pattern matching and a '*' is prepended to the
+        pattern.
+
+        if the pattern doesn't contain a path-separator the pattern
+        is only matched against the basename.
+        """
+        return FNMatcher(pattern)(self)
+
+    def relto(self, relpath):
+        """ return a string which is the relative part of the path
+        to the given 'relpath'.
+        """
+        if not isinstance(relpath, (str, PathBase)):
+            raise TypeError("%r: not a string or path object" %(relpath,))
+        strrelpath = str(relpath)
+        if strrelpath and strrelpath[-1] != self.sep:
+            strrelpath += self.sep
+        #assert strrelpath[-1] == self.sep
+        #assert strrelpath[-2] != self.sep
+        strself = self.strpath
+        if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
+            if os.path.normcase(strself).startswith(
+               os.path.normcase(strrelpath)):
+                return strself[len(strrelpath):]
+        elif strself.startswith(strrelpath):
+            return strself[len(strrelpath):]
+        return ""
+
+    def ensure_dir(self, *args):
+        """ ensure the path joined with args is a directory. """
+        return self.ensure(*args, **{"dir": True})
+
+    def bestrelpath(self, dest):
+        """ return a string which is a relative path from self
+            (assumed to be a directory) to dest such that
+            self.join(bestrelpath) == dest and if not such
+            path can be determined return dest.
+        """
+        try:
+            if self == dest:
+                return os.curdir
+            base = self.common(dest)
+            if not base:  # can be the case on windows
+                return str(dest)
+            self2base = self.relto(base)
+            reldest = dest.relto(base)
+            if self2base:
+                n = self2base.count(self.sep) + 1
+            else:
+                n = 0
+            l = [os.pardir] * n
+            if reldest:
+                l.append(reldest)
+            target = dest.sep.join(l)
+            return target
+        except AttributeError:
+            return str(dest)
+
+    def exists(self):
+        return self.check()
+
+    def isdir(self):
+        return self.check(dir=1)
+
+    def isfile(self):
+        return self.check(file=1)
+
+    def parts(self, reverse=False):
+        """ return a root-first list of all ancestor directories
+            plus the path itself.
+        """
+        current = self
+        l = [self]
+        while 1:
+            last = current
+            current = current.dirpath()
+            if last == current:
+                break
+            l.append(current)
+        if not reverse:
+            l.reverse()
+        return l
+
+    def common(self, other):
+        """ return the common part shared with the other path
+            or None if there is no common part.
+        """
+        last = None
+        for x, y in zip(self.parts(), other.parts()):
+            if x != y:
+                return last
+            last = x
+        return last
+
+    def __add__(self, other):
+        """ return new path object with 'other' added to the basename"""
+        return self.new(basename=self.basename+str(other))
+
+    def __cmp__(self, other):
+        """ return sort value (-1, 0, +1). """
+        try:
+            return cmp(self.strpath, other.strpath)
+        except AttributeError:
+            return cmp(str(self), str(other)) # self.path, other.path)
+
+    def __lt__(self, other):
+        try:
+            return self.strpath < other.strpath
+        except AttributeError:
+            return str(self) < str(other)
+
+    def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
+        """ yields all paths below the current one
+
+            fil is a filter (glob pattern or callable), if not matching the
+            path will not be yielded, defaulting to None (everything is
+            returned)
+
+            rec is a filter (glob pattern or callable) that controls whether
+            a node is descended, defaulting to None
+
+            ignore is an Exception class that is ignoredwhen calling dirlist()
+            on any of the paths (by default, all exceptions are reported)
+
+            bf if True will cause a breadthfirst search instead of the
+            default depthfirst. Default: False
+
+            sort if True will sort entries within each directory level.
+        """
+        for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
+            yield x
+
+    def _sortlist(self, res, sort):
+        if sort:
+            if hasattr(sort, '__call__'):
+                res.sort(sort)
+            else:
+                res.sort()
+
+    def samefile(self, other):
+        """ return True if other refers to the same stat object as self. """
+        return self.strpath == str(other)
+
+    def __fspath__(self):
+        return self.strpath
+
+class Visitor:
+    def __init__(self, fil, rec, ignore, bf, sort):
+        if isinstance(fil, py.builtin._basestring):
+            fil = FNMatcher(fil)
+        if isinstance(rec, py.builtin._basestring):
+            self.rec = FNMatcher(rec)
+        elif not hasattr(rec, '__call__') and rec:
+            self.rec = lambda path: True
+        else:
+            self.rec = rec
+        self.fil = fil
+        self.ignore = ignore
+        self.breadthfirst = bf
+        self.optsort = sort and sorted or (lambda x: x)
+
+    def gen(self, path):
+        try:
+            entries = path.listdir()
+        except self.ignore:
+            return
+        rec = self.rec
+        dirs = self.optsort([p for p in entries
+                    if p.check(dir=1) and (rec is None or rec(p))])
+        if not self.breadthfirst:
+            for subdir in dirs:
+                for p in self.gen(subdir):
+                    yield p
+        for p in self.optsort(entries):
+            if self.fil is None or self.fil(p):
+                yield p
+        if self.breadthfirst:
+            for subdir in dirs:
+                for p in self.gen(subdir):
+                    yield p
+
+class FNMatcher:
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __call__(self, path):
+        pattern = self.pattern
+
+        if (pattern.find(path.sep) == -1 and
+        iswin32 and
+        pattern.find(posixpath.sep) != -1):
+            # Running on Windows, the pattern has no Windows path separators,
+            # and the pattern has one or more Posix path separators. Replace
+            # the Posix path separators with the Windows path separator.
+            pattern = pattern.replace(posixpath.sep, path.sep)
+
+        if pattern.find(path.sep) == -1:
+            name = path.basename
+        else:
+            name = str(path) # path.strpath # XXX svn?
+            if not os.path.isabs(pattern):
+                pattern = '*' + path.sep + pattern
+        return fnmatch.fnmatch(name, pattern)
--- a/third_party/python/py/py/_path/local.py
+++ b/third_party/python/py/py/_path/local.py
@@ -1,911 +1,930 @@
-"""
-local path implementation.
-"""
-from __future__ import with_statement
-
-from contextlib import contextmanager
-import sys, os, re, atexit, io
-import py
-from py._path import common
-from py._path.common import iswin32
-from stat import S_ISLNK, S_ISDIR, S_ISREG
-
-from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
-
-if sys.version_info > (3,0):
-    def map_as_list(func, iter):
-        return list(map(func, iter))
-else:
-    map_as_list = map
-
-class Stat(object):
-    def __getattr__(self, name):
-        return getattr(self._osstatresult, "st_" + name)
-
-    def __init__(self, path, osstatresult):
-        self.path = path
-        self._osstatresult = osstatresult
-
-    @property
-    def owner(self):
-        if iswin32:
-            raise NotImplementedError("XXX win32")
-        import pwd
-        entry = py.error.checked_call(pwd.getpwuid, self.uid)
-        return entry[0]
-
-    @property
-    def group(self):
-        """ return group name of file. """
-        if iswin32:
-            raise NotImplementedError("XXX win32")
-        import grp
-        entry = py.error.checked_call(grp.getgrgid, self.gid)
-        return entry[0]
-
-    def isdir(self):
-        return S_ISDIR(self._osstatresult.st_mode)
-
-    def isfile(self):
-        return S_ISREG(self._osstatresult.st_mode)
-
-    def islink(self):
-        st = self.path.lstat()
-        return S_ISLNK(self._osstatresult.st_mode)
-
-class PosixPath(common.PathBase):
-    def chown(self, user, group, rec=0):
-        """ change ownership to the given user and group.
-            user and group may be specified by a number or
-            by a name.  if rec is True change ownership
-            recursively.
-        """
-        uid = getuserid(user)
-        gid = getgroupid(group)
-        if rec:
-            for x in self.visit(rec=lambda x: x.check(link=0)):
-                if x.check(link=0):
-                    py.error.checked_call(os.chown, str(x), uid, gid)
-        py.error.checked_call(os.chown, str(self), uid, gid)
-
-    def readlink(self):
-        """ return value of a symbolic link. """
-        return py.error.checked_call(os.readlink, self.strpath)
-
-    def mklinkto(self, oldname):
-        """ posix style hard link to another name. """
-        py.error.checked_call(os.link, str(oldname), str(self))
-
-    def mksymlinkto(self, value, absolute=1):
-        """ create a symbolic link with the given value (pointing to another name). """
-        if absolute:
-            py.error.checked_call(os.symlink, str(value), self.strpath)
-        else:
-            base = self.common(value)
-            # with posix local paths '/' is always a common base
-            relsource = self.__class__(value).relto(base)
-            reldest = self.relto(base)
-            n = reldest.count(self.sep)
-            target = self.sep.join(('..', )*n + (relsource, ))
-            py.error.checked_call(os.symlink, target, self.strpath)
-
-def getuserid(user):
-    import pwd
-    if not isinstance(user, int):
-        user = pwd.getpwnam(user)[2]
-    return user
-
-def getgroupid(group):
-    import grp
-    if not isinstance(group, int):
-        group = grp.getgrnam(group)[2]
-    return group
-
-FSBase = not iswin32 and PosixPath or common.PathBase
-
-class LocalPath(FSBase):
-    """ object oriented interface to os.path and other local filesystem
-        related information.
-    """
-    class ImportMismatchError(ImportError):
-        """ raised on pyimport() if there is a mismatch of __file__'s"""
-
-    sep = os.sep
-    class Checkers(common.Checkers):
-        def _stat(self):
-            try:
-                return self._statcache
-            except AttributeError:
-                try:
-                    self._statcache = self.path.stat()
-                except py.error.ELOOP:
-                    self._statcache = self.path.lstat()
-                return self._statcache
-
-        def dir(self):
-            return S_ISDIR(self._stat().mode)
-
-        def file(self):
-            return S_ISREG(self._stat().mode)
-
-        def exists(self):
-            return self._stat()
-
-        def link(self):
-            st = self.path.lstat()
-            return S_ISLNK(st.mode)
-
-    def __init__(self, path=None, expanduser=False):
-        """ Initialize and return a local Path instance.
-
-        Path can be relative to the current directory.
-        If path is None it defaults to the current working directory.
-        If expanduser is True, tilde-expansion is performed.
-        Note that Path instances always carry an absolute path.
-        Note also that passing in a local path object will simply return
-        the exact same path object. Use new() to get a new copy.
-        """
-        if path is None:
-            self.strpath = py.error.checked_call(os.getcwd)
-        elif isinstance(path, common.PathBase):
-            self.strpath = path.strpath
-        elif isinstance(path, py.builtin._basestring):
-            if expanduser:
-                path = os.path.expanduser(path)
-            self.strpath = abspath(path)
-        else:
-            raise ValueError("can only pass None, Path instances "
-                             "or non-empty strings to LocalPath")
-
-    def __hash__(self):
-        return hash(self.strpath)
-
-    def __eq__(self, other):
-        s1 = self.strpath
-        s2 = getattr(other, "strpath", other)
-        if iswin32:
-            s1 = s1.lower()
-            try:
-                s2 = s2.lower()
-            except AttributeError:
-                return False
-        return s1 == s2
-
-    def __ne__(self, other):
-        return not (self == other)
-
-    def __lt__(self, other):
-        return self.strpath < getattr(other, "strpath", other)
-
-    def __gt__(self, other):
-        return self.strpath > getattr(other, "strpath", other)
-
-    def samefile(self, other):
-        """ return True if 'other' references the same file as 'self'.
-        """
-        other = getattr(other, "strpath", other)
-        if not isabs(other):
-            other = abspath(other)
-        if self == other:
-            return True
-        if iswin32:
-            return False # there is no samefile
-        return py.error.checked_call(
-                os.path.samefile, self.strpath, other)
-
-    def remove(self, rec=1, ignore_errors=False):
-        """ remove a file or directory (or a directory tree if rec=1).
-        if ignore_errors is True, errors while removing directories will
-        be ignored.
-        """
-        if self.check(dir=1, link=0):
-            if rec:
-                # force remove of readonly files on windows
-                if iswin32:
-                    self.chmod(448, rec=1) # octcal 0700
-                py.error.checked_call(py.std.shutil.rmtree, self.strpath,
-                    ignore_errors=ignore_errors)
-            else:
-                py.error.checked_call(os.rmdir, self.strpath)
-        else:
-            if iswin32:
-                self.chmod(448) # octcal 0700
-            py.error.checked_call(os.remove, self.strpath)
-
-    def computehash(self, hashtype="md5", chunksize=524288):
-        """ return hexdigest of hashvalue for this file. """
-        try:
-            try:
-                import hashlib as mod
-            except ImportError:
-                if hashtype == "sha1":
-                    hashtype = "sha"
-                mod = __import__(hashtype)
-            hash = getattr(mod, hashtype)()
-        except (AttributeError, ImportError):
-            raise ValueError("Don't know how to compute %r hash" %(hashtype,))
-        f = self.open('rb')
-        try:
-            while 1:
-                buf = f.read(chunksize)
-                if not buf:
-                    return hash.hexdigest()
-                hash.update(buf)
-        finally:
-            f.close()
-
-    def new(self, **kw):
-        """ create a modified version of this path.
-            the following keyword arguments modify various path parts::
-
-              a:/some/path/to/a/file.ext
-              xx                           drive
-              xxxxxxxxxxxxxxxxx            dirname
-                                xxxxxxxx   basename
-                                xxxx       purebasename
-                                     xxx   ext
-        """
-        obj = object.__new__(self.__class__)
-        if not kw:
-            obj.strpath = self.strpath
-            return obj
-        drive, dirname, basename, purebasename,ext = self._getbyspec(
-             "drive,dirname,basename,purebasename,ext")
-        if 'basename' in kw:
-            if 'purebasename' in kw or 'ext' in kw:
-                raise ValueError("invalid specification %r" % kw)
-        else:
-            pb = kw.setdefault('purebasename', purebasename)
-            try:
-                ext = kw['ext']
-            except KeyError:
-                pass
-            else:
-                if ext and not ext.startswith('.'):
-                    ext = '.' + ext
-            kw['basename'] = pb + ext
-
-        if ('dirname' in kw and not kw['dirname']):
-            kw['dirname'] = drive
-        else:
-            kw.setdefault('dirname', dirname)
-        kw.setdefault('sep', self.sep)
-        obj.strpath = normpath(
-            "%(dirname)s%(sep)s%(basename)s" % kw)
-        return obj
-
-    def _getbyspec(self, spec):
-        """ see new for what 'spec' can be. """
-        res = []
-        parts = self.strpath.split(self.sep)
-
-        args = filter(None, spec.split(',') )
-        append = res.append
-        for name in args:
-            if name == 'drive':
-                append(parts[0])
-            elif name == 'dirname':
-                append(self.sep.join(parts[:-1]))
-            else:
-                basename = parts[-1]
-                if name == 'basename':
-                    append(basename)
-                else:
-                    i = basename.rfind('.')
-                    if i == -1:
-                        purebasename, ext = basename, ''
-                    else:
-                        purebasename, ext = basename[:i], basename[i:]
-                    if name == 'purebasename':
-                        append(purebasename)
-                    elif name == 'ext':
-                        append(ext)
-                    else:
-                        raise ValueError("invalid part specification %r" % name)
-        return res
-
-    def dirpath(self, *args, **kwargs):
-        """ return the directory path joined with any given path arguments.  """
-        if not kwargs:
-            path = object.__new__(self.__class__)
-            path.strpath = dirname(self.strpath)
-            if args:
-                path = path.join(*args)
-            return path
-        return super(LocalPath, self).dirpath(*args, **kwargs)
-
-    def join(self, *args, **kwargs):
-        """ return a new path by appending all 'args' as path
-        components.  if abs=1 is used restart from root if any
-        of the args is an absolute path.
-        """
-        sep = self.sep
-        strargs = [getattr(arg, "strpath", arg) for arg in args]
-        strpath = self.strpath
-        if kwargs.get('abs'):
-            newargs = []
-            for arg in reversed(strargs):
-                if isabs(arg):
-                    strpath = arg
-                    strargs = newargs
-                    break
-                newargs.insert(0, arg)
-        for arg in strargs:
-            arg = arg.strip(sep)
-            if iswin32:
-                # allow unix style paths even on windows.
-                arg = arg.strip('/')
-                arg = arg.replace('/', sep)
-            strpath = strpath + sep + arg
-        obj = object.__new__(self.__class__)
-        obj.strpath = normpath(strpath)
-        return obj
-
-    def open(self, mode='r', ensure=False, encoding=None):
-        """ return an opened file with the given mode.
-
-        If ensure is True, create parent directories if needed.
-        """
-        if ensure:
-            self.dirpath().ensure(dir=1)
-        if encoding:
-            return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
-        return py.error.checked_call(open, self.strpath, mode)
-
-    def _fastjoin(self, name):
-        child = object.__new__(self.__class__)
-        child.strpath = self.strpath + self.sep + name
-        return child
-
-    def islink(self):
-        return islink(self.strpath)
-
-    def check(self, **kw):
-        if not kw:
-            return exists(self.strpath)
-        if len(kw) == 1:
-            if "dir" in kw:
-                return not kw["dir"] ^ isdir(self.strpath)
-            if "file" in kw:
-                return not kw["file"] ^ isfile(self.strpath)
-        return super(LocalPath, self).check(**kw)
-
-    _patternchars = set("*?[" + os.path.sep)
-    def listdir(self, fil=None, sort=None):
-        """ list directory contents, possibly filter by the given fil func
-            and possibly sorted.
-        """
-        if fil is None and sort is None:
-            names = py.error.checked_call(os.listdir, self.strpath)
-            return map_as_list(self._fastjoin, names)
-        if isinstance(fil, py.builtin._basestring):
-            if not self._patternchars.intersection(fil):
-                child = self._fastjoin(fil)
-                if exists(child.strpath):
-                    return [child]
-                return []
-            fil = common.FNMatcher(fil)
-        names = py.error.checked_call(os.listdir, self.strpath)
-        res = []
-        for name in names:
-            child = self._fastjoin(name)
-            if fil is None or fil(child):
-                res.append(child)
-        self._sortlist(res, sort)
-        return res
-
-    def size(self):
-        """ return size of the underlying file object """
-        return self.stat().size
-
-    def mtime(self):
-        """ return last modification time of the path. """
-        return self.stat().mtime
-
-    def copy(self, target, mode=False):
-        """ copy path to target."""
-        if self.check(file=1):
-            if target.check(dir=1):
-                target = target.join(self.basename)
-            assert self!=target
-            copychunked(self, target)
-            if mode:
-                copymode(self.strpath, target.strpath)
-        else:
-            def rec(p):
-                return p.check(link=0)
-            for x in self.visit(rec=rec):
-                relpath = x.relto(self)
-                newx = target.join(relpath)
-                newx.dirpath().ensure(dir=1)
-                if x.check(link=1):
-                    newx.mksymlinkto(x.readlink())
-                    continue
-                elif x.check(file=1):
-                    copychunked(x, newx)
-                elif x.check(dir=1):
-                    newx.ensure(dir=1)
-                if mode:
-                    copymode(x.strpath, newx.strpath)
-
-    def rename(self, target):
-        """ rename this path to target. """
-        target = getattr(target, "strpath", target)
-        return py.error.checked_call(os.rename, self.strpath, target)
-
-    def dump(self, obj, bin=1):
-        """ pickle object into path location"""
-        f = self.open('wb')
-        try:
-            py.error.checked_call(py.std.pickle.dump, obj, f, bin)
-        finally:
-            f.close()
-
-    def mkdir(self, *args):
-        """ create & return the directory joined with args. """
-        p = self.join(*args)
-        py.error.checked_call(os.mkdir, getattr(p, "strpath", p))
-        return p
-
-    def write_binary(self, data, ensure=False):
-        """ write binary data into path.   If ensure is True create
-        missing parent directories.
-        """
-        if ensure:
-            self.dirpath().ensure(dir=1)
-        with self.open('wb') as f:
-            f.write(data)
-
-    def write_text(self, data, encoding, ensure=False):
-        """ write text data into path using the specified encoding.
-        If ensure is True create missing parent directories.
-        """
-        if ensure:
-            self.dirpath().ensure(dir=1)
-        with self.open('w', encoding=encoding) as f:
-            f.write(data)
-
-    def write(self, data, mode='w', ensure=False):
-        """ write data into path.   If ensure is True create
-        missing parent directories.
-        """
-        if ensure:
-            self.dirpath().ensure(dir=1)
-        if 'b' in mode:
-            if not py.builtin._isbytes(data):
-                raise ValueError("can only process bytes")
-        else:
-            if not py.builtin._istext(data):
-                if not py.builtin._isbytes(data):
-                    data = str(data)
-                else:
-                    data = py.builtin._totext(data, sys.getdefaultencoding())
-        f = self.open(mode)
-        try:
-            f.write(data)
-        finally:
-            f.close()
-
-    def _ensuredirs(self):
-        parent = self.dirpath()
-        if parent == self:
-            return self
-        if parent.check(dir=0):
-            parent._ensuredirs()
-        if self.check(dir=0):
-            try:
-                self.mkdir()
-            except py.error.EEXIST:
-                # race condition: file/dir created by another thread/process.
-                # complain if it is not a dir
-                if self.check(dir=0):
-                    raise
-        return self
-
-    def ensure(self, *args, **kwargs):
-        """ ensure that an args-joined path exists (by default as
-            a file). if you specify a keyword argument 'dir=True'
-            then the path is forced to be a directory path.
-        """
-        p = self.join(*args)
-        if kwargs.get('dir', 0):
-            return p._ensuredirs()
-        else:
-            p.dirpath()._ensuredirs()
-            if not p.check(file=1):
-                p.open('w').close()
-            return p
-
-    def stat(self, raising=True):
-        """ Return an os.stat() tuple. """
-        if raising == True:
-            return Stat(self, py.error.checked_call(os.stat, self.strpath))
-        try:
-            return Stat(self, os.stat(self.strpath))
-        except KeyboardInterrupt:
-            raise
-        except Exception:
-            return None
-
-    def lstat(self):
-        """ Return an os.lstat() tuple. """
-        return Stat(self, py.error.checked_call(os.lstat, self.strpath))
-
-    def setmtime(self, mtime=None):
-        """ set modification time for the given path.  if 'mtime' is None
-        (the default) then the file's mtime is set to current time.
-
-        Note that the resolution for 'mtime' is platform dependent.
-        """
-        if mtime is None:
-            return py.error.checked_call(os.utime, self.strpath, mtime)
-        try:
-            return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
-        except py.error.EINVAL:
-            return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
-
-    def chdir(self):
-        """ change directory to self and return old current directory """
-        try:
-            old = self.__class__()
-        except py.error.ENOENT:
-            old = None
-        py.error.checked_call(os.chdir, self.strpath)
-        return old
-
-
-    @contextmanager
-    def as_cwd(self):
-        """ return context manager which changes to current dir during the
-        managed "with" context. On __enter__ it returns the old dir.
-        """
-        old = self.chdir()
-        try:
-            yield old
-        finally:
-            old.chdir()
-
-    def realpath(self):
-        """ return a new path which contains no symbolic links."""
-        return self.__class__(os.path.realpath(self.strpath))
-
-    def atime(self):
-        """ return last access time of the path. """
-        return self.stat().atime
-
-    def __repr__(self):
-        return 'local(%r)' % self.strpath
-
-    def __str__(self):
-        """ return string representation of the Path. """
-        return self.strpath
-
-    def chmod(self, mode, rec=0):
-        """ change permissions to the given mode. If mode is an
-            integer it directly encodes the os-specific modes.
-            if rec is True perform recursively.
-        """
-        if not isinstance(mode, int):
-            raise TypeError("mode %r must be an integer" % (mode,))
-        if rec:
-            for x in self.visit(rec=rec):
-                py.error.checked_call(os.chmod, str(x), mode)
-        py.error.checked_call(os.chmod, self.strpath, mode)
-
-    def pypkgpath(self):
-        """ return the Python package path by looking for the last
-        directory upwards which still contains an __init__.py.
-        Return None if a pkgpath can not be determined.
-        """
-        pkgpath = None
-        for parent in self.parts(reverse=True):
-            if parent.isdir():
-                if not parent.join('__init__.py').exists():
-                    break
-                if not isimportable(parent.basename):
-                    break
-                pkgpath = parent
-        return pkgpath
-
-    def _ensuresyspath(self, ensuremode, path):
-        if ensuremode:
-            s = str(path)
-            if ensuremode == "append":
-                if s not in sys.path:
-                    sys.path.append(s)
-            else:
-                if s != sys.path[0]:
-                    sys.path.insert(0, s)
-
-    def pyimport(self, modname=None, ensuresyspath=True):
-        """ return path as an imported python module.
-
-        If modname is None, look for the containing package
-        and construct an according module name.
-        The module will be put/looked up in sys.modules.
-        if ensuresyspath is True then the root dir for importing
-        the file (taking __init__.py files into account) will
-        be prepended to sys.path if it isn't there already.
-        If ensuresyspath=="append" the root dir will be appended
-        if it isn't already contained in sys.path.
-        if ensuresyspath is False no modification of syspath happens.
-        """
-        if not self.check():
-            raise py.error.ENOENT(self)
-
-        pkgpath = None
-        if modname is None:
-            pkgpath = self.pypkgpath()
-            if pkgpath is not None:
-                pkgroot = pkgpath.dirpath()
-                names = self.new(ext="").relto(pkgroot).split(self.sep)
-                if names[-1] == "__init__":
-                    names.pop()
-                modname = ".".join(names)
-            else:
-                pkgroot = self.dirpath()
-                modname = self.purebasename
-
-            self._ensuresyspath(ensuresyspath, pkgroot)
-            __import__(modname)
-            mod = sys.modules[modname]
-            if self.basename == "__init__.py":
-                return mod # we don't check anything as we might
-                       # we in a namespace package ... too icky to check
-            modfile = mod.__file__
-            if modfile[-4:] in ('.pyc', '.pyo'):
-                modfile = modfile[:-1]
-            elif modfile.endswith('$py.class'):
-                modfile = modfile[:-9] + '.py'
-            if modfile.endswith(os.path.sep + "__init__.py"):
-                if self.basename != "__init__.py":
-                    modfile = modfile[:-12]
-            try:
-                issame = self.samefile(modfile)
-            except py.error.ENOENT:
-                issame = False
-            if not issame:
-                raise self.ImportMismatchError(modname, modfile, self)
-            return mod
-        else:
-            try:
-                return sys.modules[modname]
-            except KeyError:
-                # we have a custom modname, do a pseudo-import
-                mod = py.std.types.ModuleType(modname)
-                mod.__file__ = str(self)
-                sys.modules[modname] = mod
-                try:
-                    py.builtin.execfile(str(self), mod.__dict__)
-                except:
-                    del sys.modules[modname]
-                    raise
-                return mod
-
-    def sysexec(self, *argv, **popen_opts):
-        """ return stdout text from executing a system child process,
-            where the 'self' path points to executable.
-            The process is directly invoked and not through a system shell.
-        """
-        from subprocess import Popen, PIPE
-        argv = map_as_list(str, argv)
-        popen_opts['stdout'] = popen_opts['stderr'] = PIPE
-        proc = Popen([str(self)] + argv, **popen_opts)
-        stdout, stderr = proc.communicate()
-        ret = proc.wait()
-        if py.builtin._isbytes(stdout):
-            stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
-        if ret != 0:
-            if py.builtin._isbytes(stderr):
-                stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
-            raise py.process.cmdexec.Error(ret, ret, str(self),
-                                           stdout, stderr,)
-        return stdout
-
-    def sysfind(cls, name, checker=None, paths=None):
-        """ return a path object found by looking at the systems
-            underlying PATH specification. If the checker is not None
-            it will be invoked to filter matching paths.  If a binary
-            cannot be found, None is returned
-            Note: This is probably not working on plain win32 systems
-            but may work on cygwin.
-        """
-        if isabs(name):
-            p = py.path.local(name)
-            if p.check(file=1):
-                return p
-        else:
-            if paths is None:
-                if iswin32:
-                    paths = py.std.os.environ['Path'].split(';')
-                    if '' not in paths and '.' not in paths:
-                        paths.append('.')
-                    try:
-                        systemroot = os.environ['SYSTEMROOT']
-                    except KeyError:
-                        pass
-                    else:
-                        paths = [re.sub('%SystemRoot%', systemroot, path)
-                                 for path in paths]
-                else:
-                    paths = py.std.os.environ['PATH'].split(':')
-            tryadd = []
-            if iswin32:
-                tryadd += os.environ['PATHEXT'].split(os.pathsep)
-            tryadd.append("")
-
-            for x in paths:
-                for addext in tryadd:
-                    p = py.path.local(x).join(name, abs=True) + addext
-                    try:
-                        if p.check(file=1):
-                            if checker:
-                                if not checker(p):
-                                    continue
-                            return p
-                    except py.error.EACCES:
-                        pass
-        return None
-    sysfind = classmethod(sysfind)
-
-    def _gethomedir(cls):
-        try:
-            x = os.environ['HOME']
-        except KeyError:
-            try:
-                x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
-            except KeyError:
-                return None
-        return cls(x)
-    _gethomedir = classmethod(_gethomedir)
-
-    #"""
-    #special class constructors for local filesystem paths
-    #"""
-    def get_temproot(cls):
-        """ return the system's temporary directory
-            (where tempfiles are usually created in)
-        """
-        return py.path.local(py.std.tempfile.gettempdir())
-    get_temproot = classmethod(get_temproot)
-
-    def mkdtemp(cls, rootdir=None):
-        """ return a Path object pointing to a fresh new temporary directory
-            (which we created ourself).
-        """
-        import tempfile
-        if rootdir is None:
-            rootdir = cls.get_temproot()
-        return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
-    mkdtemp = classmethod(mkdtemp)
-
-    def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
-                          lock_timeout = 172800):   # two days
-        """ return unique directory with a number greater than the current
-            maximum one.  The number is assumed to start directly after prefix.
-            if keep is true directories with a number less than (maxnum-keep)
-            will be removed.
-        """
-        if rootdir is None:
-            rootdir = cls.get_temproot()
-
-        def parse_num(path):
-            """ parse the number out of a path (if it matches the prefix) """
-            bn = path.basename
-            if bn.startswith(prefix):
-                try:
-                    return int(bn[len(prefix):])
-                except ValueError:
-                    pass
-
-        # compute the maximum number currently in use with the
-        # prefix
-        lastmax = None
-        while True:
-            maxnum = -1
-            for path in rootdir.listdir():
-                num = parse_num(path)
-                if num is not None:
-                    maxnum = max(maxnum, num)
-
-            # make the new directory
-            try:
-                udir = rootdir.mkdir(prefix + str(maxnum+1))
-            except py.error.EEXIST:
-                # race condition: another thread/process created the dir
-                # in the meantime.  Try counting again
-                if lastmax == maxnum:
-                    raise
-                lastmax = maxnum
-                continue
-            break
-
-        # put a .lock file in the new directory that will be removed at
-        # process exit
-        if lock_timeout:
-            lockfile = udir.join('.lock')
-            mypid = os.getpid()
-            if hasattr(lockfile, 'mksymlinkto'):
-                lockfile.mksymlinkto(str(mypid))
-            else:
-                lockfile.write(str(mypid))
-            def try_remove_lockfile():
-                # in a fork() situation, only the last process should
-                # remove the .lock, otherwise the other processes run the
-                # risk of seeing their temporary dir disappear.  For now
-                # we remove the .lock in the parent only (i.e. we assume
-                # that the children finish before the parent).
-                if os.getpid() != mypid:
-                    return
-                try:
-                    lockfile.remove()
-                except py.error.Error:
-                    pass
-            atexit.register(try_remove_lockfile)
-
-        # prune old directories
-        if keep:
-            for path in rootdir.listdir():
-                num = parse_num(path)
-                if num is not None and num <= (maxnum - keep):
-                    lf = path.join('.lock')
-                    try:
-                        t1 = lf.lstat().mtime
-                        t2 = lockfile.lstat().mtime
-                        if not lock_timeout or abs(t2-t1) < lock_timeout:
-                            continue   # skip directories still locked
-                    except py.error.Error:
-                        pass   # assume that it means that there is no 'lf'
-                    try:
-                        path.remove(rec=1)
-                    except KeyboardInterrupt:
-                        raise
-                    except: # this might be py.error.Error, WindowsError ...
-                        pass
-
-        # make link...
-        try:
-            username = os.environ['USER']           #linux, et al
-        except KeyError:
-            try:
-                username = os.environ['USERNAME']   #windows
-            except KeyError:
-                username = 'current'
-
-        src  = str(udir)
-        dest = src[:src.rfind('-')] + '-' + username
-        try:
-            os.unlink(dest)
-        except OSError:
-            pass
-        try:
-            os.symlink(src, dest)
-        except (OSError, AttributeError, NotImplementedError):
-            pass
-
-        return udir
-    make_numbered_dir = classmethod(make_numbered_dir)
-
-def copymode(src, dest):
-    py.std.shutil.copymode(src, dest)
-
-def copychunked(src, dest):
-    chunksize = 524288 # half a meg of bytes
-    fsrc = src.open('rb')
-    try:
-        fdest = dest.open('wb')
-        try:
-            while 1:
-                buf = fsrc.read(chunksize)
-                if not buf:
-                    break
-                fdest.write(buf)
-        finally:
-            fdest.close()
-    finally:
-        fsrc.close()
-
-def isimportable(name):
-    if name and (name[0].isalpha() or name[0] == '_'):
-        name = name.replace("_", '')
-        return not name or name.isalnum()
+"""
+local path implementation.
+"""
+from __future__ import with_statement
+
+from contextlib import contextmanager
+import sys, os, re, atexit, io
+import py
+from py._path import common
+from py._path.common import iswin32, fspath
+from stat import S_ISLNK, S_ISDIR, S_ISREG
+
+from os.path import abspath, normcase, normpath, isabs, exists, isdir, isfile, islink, dirname
+
+if sys.version_info > (3,0):
+    def map_as_list(func, iter):
+        return list(map(func, iter))
+else:
+    map_as_list = map
+
+class Stat(object):
+    def __getattr__(self, name):
+        return getattr(self._osstatresult, "st_" + name)
+
+    def __init__(self, path, osstatresult):
+        self.path = path
+        self._osstatresult = osstatresult
+
+    @property
+    def owner(self):
+        if iswin32:
+            raise NotImplementedError("XXX win32")
+        import pwd
+        entry = py.error.checked_call(pwd.getpwuid, self.uid)
+        return entry[0]
+
+    @property
+    def group(self):
+        """ return group name of file. """
+        if iswin32:
+            raise NotImplementedError("XXX win32")
+        import grp
+        entry = py.error.checked_call(grp.getgrgid, self.gid)
+        return entry[0]
+
+    def isdir(self):
+        return S_ISDIR(self._osstatresult.st_mode)
+
+    def isfile(self):
+        return S_ISREG(self._osstatresult.st_mode)
+
+    def islink(self):
+        st = self.path.lstat()
+        return S_ISLNK(self._osstatresult.st_mode)
+
+class PosixPath(common.PathBase):
+    def chown(self, user, group, rec=0):
+        """ change ownership to the given user and group.
+            user and group may be specified by a number or
+            by a name.  if rec is True change ownership
+            recursively.
+        """
+        uid = getuserid(user)
+        gid = getgroupid(group)
+        if rec:
+            for x in self.visit(rec=lambda x: x.check(link=0)):
+                if x.check(link=0):
+                    py.error.checked_call(os.chown, str(x), uid, gid)
+        py.error.checked_call(os.chown, str(self), uid, gid)
+
+    def readlink(self):
+        """ return value of a symbolic link. """
+        return py.error.checked_call(os.readlink, self.strpath)
+
+    def mklinkto(self, oldname):
+        """ posix style hard link to another name. """
+        py.error.checked_call(os.link, str(oldname), str(self))
+
+    def mksymlinkto(self, value, absolute=1):
+        """ create a symbolic link with the given value (pointing to another name). """
+        if absolute:
+            py.error.checked_call(os.symlink, str(value), self.strpath)
+        else:
+            base = self.common(value)
+            # with posix local paths '/' is always a common base
+            relsource = self.__class__(value).relto(base)
+            reldest = self.relto(base)
+            n = reldest.count(self.sep)
+            target = self.sep.join(('..', )*n + (relsource, ))
+            py.error.checked_call(os.symlink, target, self.strpath)
+
+def getuserid(user):
+    import pwd
+    if not isinstance(user, int):
+        user = pwd.getpwnam(user)[2]
+    return user
+
+def getgroupid(group):
+    import grp
+    if not isinstance(group, int):
+        group = grp.getgrnam(group)[2]
+    return group
+
+FSBase = not iswin32 and PosixPath or common.PathBase
+
+class LocalPath(FSBase):
+    """ object oriented interface to os.path and other local filesystem
+        related information.
+    """
+    class ImportMismatchError(ImportError):
+        """ raised on pyimport() if there is a mismatch of __file__'s"""
+
+    sep = os.sep
+    class Checkers(common.Checkers):
+        def _stat(self):
+            try:
+                return self._statcache
+            except AttributeError:
+                try:
+                    self._statcache = self.path.stat()
+                except py.error.ELOOP:
+                    self._statcache = self.path.lstat()
+                return self._statcache
+
+        def dir(self):
+            return S_ISDIR(self._stat().mode)
+
+        def file(self):
+            return S_ISREG(self._stat().mode)
+
+        def exists(self):
+            return self._stat()
+
+        def link(self):
+            st = self.path.lstat()
+            return S_ISLNK(st.mode)
+
+    def __init__(self, path=None, expanduser=False):
+        """ Initialize and return a local Path instance.
+
+        Path can be relative to the current directory.
+        If path is None it defaults to the current working directory.
+        If expanduser is True, tilde-expansion is performed.
+        Note that Path instances always carry an absolute path.
+        Note also that passing in a local path object will simply return
+        the exact same path object. Use new() to get a new copy.
+        """
+        if path is None:
+            self.strpath = py.error.checked_call(os.getcwd)
+        else:
+            try:
+                path = fspath(path)
+            except TypeError:
+                raise ValueError("can only pass None, Path instances "
+                                 "or non-empty strings to LocalPath")
+            if expanduser:
+                path = os.path.expanduser(path)
+            self.strpath = abspath(path)
+
+    def __hash__(self):
+        return hash(self.strpath)
+
+    def __eq__(self, other):
+        s1 = fspath(self)
+        try:
+            s2 = fspath(other)
+        except TypeError:
+            return False
+        if iswin32:
+            s1 = s1.lower()
+            try:
+                s2 = s2.lower()
+            except AttributeError:
+                return False
+        return s1 == s2
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __lt__(self, other):
+        return fspath(self) < fspath(other)
+
+    def __gt__(self, other):
+        return fspath(self) > fspath(other)
+
+    def samefile(self, other):
+        """ return True if 'other' references the same file as 'self'.
+        """
+        other = fspath(other)
+        if not isabs(other):
+            other = abspath(other)
+        if self == other:
+            return True
+        if iswin32:
+            return False # there is no samefile
+        return py.error.checked_call(
+                os.path.samefile, self.strpath, other)
+
+    def remove(self, rec=1, ignore_errors=False):
+        """ remove a file or directory (or a directory tree if rec=1).
+        if ignore_errors is True, errors while removing directories will
+        be ignored.
+        """
+        if self.check(dir=1, link=0):
+            if rec:
+                # force remove of readonly files on windows
+                if iswin32:
+                    self.chmod(0o700, rec=1)
+                py.error.checked_call(py.std.shutil.rmtree, self.strpath,
+                    ignore_errors=ignore_errors)
+            else:
+                py.error.checked_call(os.rmdir, self.strpath)
+        else:
+            if iswin32:
+                self.chmod(0o700)
+            py.error.checked_call(os.remove, self.strpath)
+
+    def computehash(self, hashtype="md5", chunksize=524288):
+        """ return hexdigest of hashvalue for this file. """
+        try:
+            try:
+                import hashlib as mod
+            except ImportError:
+                if hashtype == "sha1":
+                    hashtype = "sha"
+                mod = __import__(hashtype)
+            hash = getattr(mod, hashtype)()
+        except (AttributeError, ImportError):
+            raise ValueError("Don't know how to compute %r hash" %(hashtype,))
+        f = self.open('rb')
+        try:
+            while 1:
+                buf = f.read(chunksize)
+                if not buf:
+                    return hash.hexdigest()
+                hash.update(buf)
+        finally:
+            f.close()
+
+    def new(self, **kw):
+        """ create a modified version of this path.
+            the following keyword arguments modify various path parts::
+
+              a:/some/path/to/a/file.ext
+              xx                           drive
+              xxxxxxxxxxxxxxxxx            dirname
+                                xxxxxxxx   basename
+                                xxxx       purebasename
+                                     xxx   ext
+        """
+        obj = object.__new__(self.__class__)
+        if not kw:
+            obj.strpath = self.strpath
+            return obj
+        drive, dirname, basename, purebasename,ext = self._getbyspec(
+             "drive,dirname,basename,purebasename,ext")
+        if 'basename' in kw:
+            if 'purebasename' in kw or 'ext' in kw:
+                raise ValueError("invalid specification %r" % kw)
+        else:
+            pb = kw.setdefault('purebasename', purebasename)
+            try:
+                ext = kw['ext']
+            except KeyError:
+                pass
+            else:
+                if ext and not ext.startswith('.'):
+                    ext = '.' + ext
+            kw['basename'] = pb + ext
+
+        if ('dirname' in kw and not kw['dirname']):
+            kw['dirname'] = drive
+        else:
+            kw.setdefault('dirname', dirname)
+        kw.setdefault('sep', self.sep)
+        obj.strpath = normpath(
+            "%(dirname)s%(sep)s%(basename)s" % kw)
+        return obj
+
+    def _getbyspec(self, spec):
+        """ see new for what 'spec' can be. """
+        res = []
+        parts = self.strpath.split(self.sep)
+
+        args = filter(None, spec.split(',') )
+        append = res.append
+        for name in args:
+            if name == 'drive':
+                append(parts[0])
+            elif name == 'dirname':
+                append(self.sep.join(parts[:-1]))
+            else:
+                basename = parts[-1]
+                if name == 'basename':
+                    append(basename)
+                else:
+                    i = basename.rfind('.')
+                    if i == -1:
+                        purebasename, ext = basename, ''
+                    else:
+                        purebasename, ext = basename[:i], basename[i:]
+                    if name == 'purebasename':
+                        append(purebasename)
+                    elif name == 'ext':
+                        append(ext)
+                    else:
+                        raise ValueError("invalid part specification %r" % name)
+        return res
+
+    def dirpath(self, *args, **kwargs):
+        """ return the directory path joined with any given path arguments.  """
+        if not kwargs:
+            path = object.__new__(self.__class__)
+            path.strpath = dirname(self.strpath)
+            if args:
+                path = path.join(*args)
+            return path
+        return super(LocalPath, self).dirpath(*args, **kwargs)
+
+    def join(self, *args, **kwargs):
+        """ return a new path by appending all 'args' as path
+        components.  if abs=1 is used restart from root if any
+        of the args is an absolute path.
+        """
+        sep = self.sep
+        strargs = [fspath(arg) for arg in args]
+        strpath = self.strpath
+        if kwargs.get('abs'):
+            newargs = []
+            for arg in reversed(strargs):
+                if isabs(arg):
+                    strpath = arg
+                    strargs = newargs
+                    break
+                newargs.insert(0, arg)
+        for arg in strargs:
+            arg = arg.strip(sep)
+            if iswin32:
+                # allow unix style paths even on windows.
+                arg = arg.strip('/')
+                arg = arg.replace('/', sep)
+            strpath = strpath + sep + arg
+        obj = object.__new__(self.__class__)
+        obj.strpath = normpath(strpath)
+        return obj
+
+    def open(self, mode='r', ensure=False, encoding=None):
+        """ return an opened file with the given mode.
+
+        If ensure is True, create parent directories if needed.
+        """
+        if ensure:
+            self.dirpath().ensure(dir=1)
+        if encoding:
+            return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
+        return py.error.checked_call(open, self.strpath, mode)
+
+    def _fastjoin(self, name):
+        child = object.__new__(self.__class__)
+        child.strpath = self.strpath + self.sep + name
+        return child
+
+    def islink(self):
+        return islink(self.strpath)
+
+    def check(self, **kw):
+        if not kw:
+            return exists(self.strpath)
+        if len(kw) == 1:
+            if "dir" in kw:
+                return not kw["dir"] ^ isdir(self.strpath)
+            if "file" in kw:
+                return not kw["file"] ^ isfile(self.strpath)
+        return super(LocalPath, self).check(**kw)
+
+    _patternchars = set("*?[" + os.path.sep)
+    def listdir(self, fil=None, sort=None):
+        """ list directory contents, possibly filter by the given fil func
+            and possibly sorted.
+        """
+        if fil is None and sort is None:
+            names = py.error.checked_call(os.listdir, self.strpath)
+            return map_as_list(self._fastjoin, names)
+        if isinstance(fil, py.builtin._basestring):
+            if not self._patternchars.intersection(fil):
+                child = self._fastjoin(fil)
+                if exists(child.strpath):
+                    return [child]
+                return []
+            fil = common.FNMatcher(fil)
+        names = py.error.checked_call(os.listdir, self.strpath)
+        res = []
+        for name in names:
+            child = self._fastjoin(name)
+            if fil is None or fil(child):
+                res.append(child)
+        self._sortlist(res, sort)
+        return res
+
+    def size(self):
+        """ return size of the underlying file object """
+        return self.stat().size
+
+    def mtime(self):
+        """ return last modification time of the path. """
+        return self.stat().mtime
+
+    def copy(self, target, mode=False, stat=False):
+        """ copy path to target.
+
+            If mode is True, will copy copy permission from path to target.
+            If stat is True, copy permission, last modification
+            time, last access time, and flags from path to target.
+        """
+        if self.check(file=1):
+            if target.check(dir=1):
+                target = target.join(self.basename)
+            assert self!=target
+            copychunked(self, target)
+            if mode:
+                copymode(self.strpath, target.strpath)
+            if stat:
+                copystat(self, target)
+        else:
+            def rec(p):
+                return p.check(link=0)
+            for x in self.visit(rec=rec):
+                relpath = x.relto(self)
+                newx = target.join(relpath)
+                newx.dirpath().ensure(dir=1)
+                if x.check(link=1):
+                    newx.mksymlinkto(x.readlink())
+                    continue
+                elif x.check(file=1):
+                    copychunked(x, newx)
+                elif x.check(dir=1):
+                    newx.ensure(dir=1)
+                if mode:
+                    copymode(x.strpath, newx.strpath)
+                if stat:
+                    copystat(x, newx)
+
+    def rename(self, target):
+        """ rename this path to target. """
+        target = fspath(target)
+        return py.error.checked_call(os.rename, self.strpath, target)
+
+    def dump(self, obj, bin=1):
+        """ pickle object into path location"""
+        f = self.open('wb')
+        try:
+            py.error.checked_call(py.std.pickle.dump, obj, f, bin)
+        finally:
+            f.close()
+
+    def mkdir(self, *args):
+        """ create & return the directory joined with args. """
+        p = self.join(*args)
+        py.error.checked_call(os.mkdir, fspath(p))
+        return p
+
+    def write_binary(self, data, ensure=False):
+        """ write binary data into path.   If ensure is True create
+        missing parent directories.
+        """
+        if ensure:
+            self.dirpath().ensure(dir=1)
+        with self.open('wb') as f:
+            f.write(data)
+
+    def write_text(self, data, encoding, ensure=False):
+        """ write text data into path using the specified encoding.
+        If ensure is True create missing parent directories.
+        """
+        if ensure:
+            self.dirpath().ensure(dir=1)
+        with self.open('w', encoding=encoding) as f:
+            f.write(data)
+
+    def write(self, data, mode='w', ensure=False):
+        """ write data into path.   If ensure is True create
+        missing parent directories.
+        """
+        if ensure:
+            self.dirpath().ensure(dir=1)
+        if 'b' in mode:
+            if not py.builtin._isbytes(data):
+                raise ValueError("can only process bytes")
+        else:
+            if not py.builtin._istext(data):
+                if not py.builtin._isbytes(data):
+                    data = str(data)
+                else:
+                    data = py.builtin._totext(data, sys.getdefaultencoding())
+        f = self.open(mode)
+        try:
+            f.write(data)
+        finally:
+            f.close()
+
+    def _ensuredirs(self):
+        parent = self.dirpath()
+        if parent == self:
+            return self
+        if parent.check(dir=0):
+            parent._ensuredirs()
+        if self.check(dir=0):
+            try:
+                self.mkdir()
+            except py.error.EEXIST:
+                # race condition: file/dir created by another thread/process.
+                # complain if it is not a dir
+                if self.check(dir=0):
+                    raise
+        return self
+
+    def ensure(self, *args, **kwargs):
+        """ ensure that an args-joined path exists (by default as
+            a file). if you specify a keyword argument 'dir=True'
+            then the path is forced to be a directory path.
+        """
+        p = self.join(*args)
+        if kwargs.get('dir', 0):
+            return p._ensuredirs()
+        else:
+            p.dirpath()._ensuredirs()
+            if not p.check(file=1):
+                p.open('w').close()
+            return p
+
+    def stat(self, raising=True):
+        """ Return an os.stat() tuple. """
+        if raising == True:
+            return Stat(self, py.error.checked_call(os.stat, self.strpath))
+        try:
+            return Stat(self, os.stat(self.strpath))
+        except KeyboardInterrupt:
+            raise
+        except Exception:
+            return None
+
+    def lstat(self):
+        """ Return an os.lstat() tuple. """
+        return Stat(self, py.error.checked_call(os.lstat, self.strpath))
+
+    def setmtime(self, mtime=None):
+        """ set modification time for the given path.  if 'mtime' is None
+        (the default) then the file's mtime is set to current time.
+
+        Note that the resolution for 'mtime' is platform dependent.
+        """
+        if mtime is None:
+            return py.error.checked_call(os.utime, self.strpath, mtime)
+        try:
+            return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
+        except py.error.EINVAL:
+            return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
+
+    def chdir(self):
+        """ change directory to self and return old current directory """
+        try:
+            old = self.__class__()
+        except py.error.ENOENT:
+            old = None
+        py.error.checked_call(os.chdir, self.strpath)
+        return old
+
+
+    @contextmanager
+    def as_cwd(self):
+        """ return context manager which changes to current dir during the
+        managed "with" context. On __enter__ it returns the old dir.
+        """
+        old = self.chdir()
+        try:
+            yield old
+        finally:
+            old.chdir()
+
+    def realpath(self):
+        """ return a new path which contains no symbolic links."""
+        return self.__class__(os.path.realpath(self.strpath))
+
+    def atime(self):
+        """ return last access time of the path. """
+        return self.stat().atime
+
+    def __repr__(self):
+        return 'local(%r)' % self.strpath
+
+    def __str__(self):
+        """ return string representation of the Path. """
+        return self.strpath
+
+    def chmod(self, mode, rec=0):
+        """ change permissions to the given mode. If mode is an
+            integer it directly encodes the os-specific modes.
+            if rec is True perform recursively.
+        """
+        if not isinstance(mode, int):
+            raise TypeError("mode %r must be an integer" % (mode,))
+        if rec:
+            for x in self.visit(rec=rec):
+                py.error.checked_call(os.chmod, str(x), mode)
+        py.error.checked_call(os.chmod, self.strpath, mode)
+
+    def pypkgpath(self):
+        """ return the Python package path by looking for the last
+        directory upwards which still contains an __init__.py.
+        Return None if a pkgpath can not be determined.
+        """
+        pkgpath = None
+        for parent in self.parts(reverse=True):
+            if parent.isdir():
+                if not parent.join('__init__.py').exists():
+                    break
+                if not isimportable(parent.basename):
+                    break
+                pkgpath = parent
+        return pkgpath
+
+    def _ensuresyspath(self, ensuremode, path):
+        if ensuremode:
+            s = str(path)
+            if ensuremode == "append":
+                if s not in sys.path:
+                    sys.path.append(s)
+            else:
+                if s != sys.path[0]:
+                    sys.path.insert(0, s)
+
+    def pyimport(self, modname=None, ensuresyspath=True):
+        """ return path as an imported python module.
+
+        If modname is None, look for the containing package
+        and construct an according module name.
+        The module will be put/looked up in sys.modules.
+        if ensuresyspath is True then the root dir for importing
+        the file (taking __init__.py files into account) will
+        be prepended to sys.path if it isn't there already.
+        If ensuresyspath=="append" the root dir will be appended
+        if it isn't already contained in sys.path.
+        if ensuresyspath is False no modification of syspath happens.
+        """
+        if not self.check():
+            raise py.error.ENOENT(self)
+
+        pkgpath = None
+        if modname is None:
+            pkgpath = self.pypkgpath()
+            if pkgpath is not None:
+                pkgroot = pkgpath.dirpath()
+                names = self.new(ext="").relto(pkgroot).split(self.sep)
+                if names[-1] == "__init__":
+                    names.pop()
+                modname = ".".join(names)
+            else:
+                pkgroot = self.dirpath()
+                modname = self.purebasename
+
+            self._ensuresyspath(ensuresyspath, pkgroot)
+            __import__(modname)
+            mod = sys.modules[modname]
+            if self.basename == "__init__.py":
+                return mod # we don't check anything as we might
+                       # we in a namespace package ... too icky to check
+            modfile = mod.__file__
+            if modfile[-4:] in ('.pyc', '.pyo'):
+                modfile = modfile[:-1]
+            elif modfile.endswith('$py.class'):
+                modfile = modfile[:-9] + '.py'
+            if modfile.endswith(os.path.sep + "__init__.py"):
+                if self.basename != "__init__.py":
+                    modfile = modfile[:-12]
+            try:
+                issame = self.samefile(modfile)
+            except py.error.ENOENT:
+                issame = False
+            if not issame:
+                raise self.ImportMismatchError(modname, modfile, self)
+            return mod
+        else:
+            try:
+                return sys.modules[modname]
+            except KeyError:
+                # we have a custom modname, do a pseudo-import
+                mod = py.std.types.ModuleType(modname)
+                mod.__file__ = str(self)
+                sys.modules[modname] = mod
+                try:
+                    py.builtin.execfile(str(self), mod.__dict__)
+                except:
+                    del sys.modules[modname]
+                    raise
+                return mod
+
+    def sysexec(self, *argv, **popen_opts):
+        """ return stdout text from executing a system child process,
+            where the 'self' path points to executable.
+            The process is directly invoked and not through a system shell.
+        """
+        from subprocess import Popen, PIPE
+        argv = map_as_list(str, argv)
+        popen_opts['stdout'] = popen_opts['stderr'] = PIPE
+        proc = Popen([str(self)] + argv, **popen_opts)
+        stdout, stderr = proc.communicate()
+        ret = proc.wait()
+        if py.builtin._isbytes(stdout):
+            stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+        if ret != 0:
+            if py.builtin._isbytes(stderr):
+                stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
+            raise py.process.cmdexec.Error(ret, ret, str(self),
+                                           stdout, stderr,)
+        return stdout
+
+    def sysfind(cls, name, checker=None, paths=None):
+        """ return a path object found by looking at the systems
+            underlying PATH specification. If the checker is not None
+            it will be invoked to filter matching paths.  If a binary
+            cannot be found, None is returned
+            Note: This is probably not working on plain win32 systems
+            but may work on cygwin.
+        """
+        if isabs(name):
+            p = py.path.local(name)
+            if p.check(file=1):
+                return p
+        else:
+            if paths is None:
+                if iswin32:
+                    paths = py.std.os.environ['Path'].split(';')
+                    if '' not in paths and '.' not in paths:
+                        paths.append('.')
+                    try:
+                        systemroot = os.environ['SYSTEMROOT']
+                    except KeyError:
+                        pass
+                    else:
+                        paths = [re.sub('%SystemRoot%', systemroot, path)
+                                 for path in paths]
+                else:
+                    paths = py.std.os.environ['PATH'].split(':')
+            tryadd = []
+            if iswin32:
+                tryadd += os.environ['PATHEXT'].split(os.pathsep)
+            tryadd.append("")
+
+            for x in paths:
+                for addext in tryadd:
+                    p = py.path.local(x).join(name, abs=True) + addext
+                    try:
+                        if p.check(file=1):
+                            if checker:
+                                if not checker(p):
+                                    continue
+                            return p
+                    except py.error.EACCES:
+                        pass
+        return None
+    sysfind = classmethod(sysfind)
+
+    def _gethomedir(cls):
+        try:
+            x = os.environ['HOME']
+        except KeyError:
+            try:
+                x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
+            except KeyError:
+                return None
+        return cls(x)
+    _gethomedir = classmethod(_gethomedir)
+
+    #"""
+    #special class constructors for local filesystem paths
+    #"""
+    def get_temproot(cls):
+        """ return the system's temporary directory
+            (where tempfiles are usually created in)
+        """
+        return py.path.local(py.std.tempfile.gettempdir())
+    get_temproot = classmethod(get_temproot)
+
+    def mkdtemp(cls, rootdir=None):
+        """ return a Path object pointing to a fresh new temporary directory
+            (which we created ourself).
+        """
+        import tempfile
+        if rootdir is None:
+            rootdir = cls.get_temproot()
+        return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
+    mkdtemp = classmethod(mkdtemp)
+
+    def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
+                          lock_timeout = 172800):   # two days
+        """ return unique directory with a number greater than the current
+            maximum one.  The number is assumed to start directly after prefix.
+            if keep is true directories with a number less than (maxnum-keep)
+            will be removed.
+        """
+        if rootdir is None:
+            rootdir = cls.get_temproot()
+
+        nprefix = normcase(prefix)
+        def parse_num(path):
+            """ parse the number out of a path (if it matches the prefix) """
+            nbasename = normcase(path.basename)
+            if nbasename.startswith(nprefix):
+                try:
+                    return int(nbasename[len(nprefix):])
+                except ValueError:
+                    pass
+
+        # compute the maximum number currently in use with the
+        # prefix
+        lastmax = None
+        while True:
+            maxnum = -1
+            for path in rootdir.listdir():
+                num = parse_num(path)
+                if num is not None:
+                    maxnum = max(maxnum, num)
+
+            # make the new directory
+            try:
+                udir = rootdir.mkdir(prefix + str(maxnum+1))
+            except py.error.EEXIST:
+                # race condition: another thread/process created the dir
+                # in the meantime.  Try counting again
+                if lastmax == maxnum:
+                    raise
+                lastmax = maxnum
+                continue
+            break
+
+        # put a .lock file in the new directory that will be removed at
+        # process exit
+        if lock_timeout:
+            lockfile = udir.join('.lock')
+            mypid = os.getpid()
+            if hasattr(lockfile, 'mksymlinkto'):
+                lockfile.mksymlinkto(str(mypid))
+            else:
+                lockfile.write(str(mypid))
+            def try_remove_lockfile():
+                # in a fork() situation, only the last process should
+                # remove the .lock, otherwise the other processes run the
+                # risk of seeing their temporary dir disappear.  For now
+                # we remove the .lock in the parent only (i.e. we assume
+                # that the children finish before the parent).
+                if os.getpid() != mypid:
+                    return
+                try:
+                    lockfile.remove()
+                except py.error.Error:
+                    pass
+            atexit.register(try_remove_lockfile)
+
+        # prune old directories
+        if keep:
+            for path in rootdir.listdir():
+                num = parse_num(path)
+                if num is not None and num <= (maxnum - keep):
+                    lf = path.join('.lock')
+                    try:
+                        t1 = lf.lstat().mtime
+                        t2 = lockfile.lstat().mtime
+                        if not lock_timeout or abs(t2-t1) < lock_timeout:
+                            continue   # skip directories still locked
+                    except py.error.Error:
+                        pass   # assume that it means that there is no 'lf'
+                    try:
+                        path.remove(rec=1)
+                    except KeyboardInterrupt:
+                        raise
+                    except: # this might be py.error.Error, WindowsError ...
+                        pass
+
+        # make link...
+        try:
+            username = os.environ['USER']           #linux, et al
+        except KeyError:
+            try:
+                username = os.environ['USERNAME']   #windows
+            except KeyError:
+                username = 'current'
+
+        src  = str(udir)
+        dest = src[:src.rfind('-')] + '-' + username
+        try:
+            os.unlink(dest)
+        except OSError:
+            pass
+        try:
+            os.symlink(src, dest)
+        except (OSError, AttributeError, NotImplementedError):
+            pass
+
+        return udir
+    make_numbered_dir = classmethod(make_numbered_dir)
+
+
+def copymode(src, dest):
+    """ copy permission from src to dst. """
+    py.std.shutil.copymode(src, dest)
+
+def copystat(src, dest):
+    """ copy permission,  last modification time, last access time, and flags from src to dst."""
+    py.std.shutil.copystat(str(src), str(dest))
+
+def copychunked(src, dest):
+    chunksize = 524288 # half a meg of bytes
+    fsrc = src.open('rb')
+    try:
+        fdest = dest.open('wb')
+        try:
+            while 1:
+                buf = fsrc.read(chunksize)
+                if not buf:
+                    break
+                fdest.write(buf)
+        finally:
+            fdest.close()
+    finally:
+        fsrc.close()
+
+def isimportable(name):
+    if name and (name[0].isalpha() or name[0] == '_'):
+        name = name.replace("_", '')
+        return not name or name.isalnum()
--- a/third_party/python/py/py/_path/svnurl.py
+++ b/third_party/python/py/py/_path/svnurl.py
@@ -1,380 +1,380 @@
-"""
-module defining a subversion path object based on the external
-command 'svn'. This modules aims to work with svn 1.3 and higher
-but might also interact well with earlier versions.
-"""
-
-import os, sys, time, re
-import py
-from py import path, process
-from py._path import common
-from py._path import svnwc as svncommon
-from py._path.cacheutil import BuildcostAccessCache, AgingCache
-
-DEBUG=False
-
-class SvnCommandPath(svncommon.SvnPathBase):
-    """ path implementation that offers access to (possibly remote) subversion
-    repositories. """
-
-    _lsrevcache = BuildcostAccessCache(maxentries=128)
-    _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0)
-
-    def __new__(cls, path, rev=None, auth=None):
-        self = object.__new__(cls)
-        if isinstance(path, cls):
-            rev = path.rev
-            auth = path.auth
-            path = path.strpath
-        svncommon.checkbadchars(path)
-        path = path.rstrip('/')
-        self.strpath = path
-        self.rev = rev
-        self.auth = auth
-        return self
-
-    def __repr__(self):
-        if self.rev == -1:
-            return 'svnurl(%r)' % self.strpath
-        else:
-            return 'svnurl(%r, %r)' % (self.strpath, self.rev)
-
-    def _svnwithrev(self, cmd, *args):
-        """ execute an svn command, append our own url and revision """
-        if self.rev is None:
-            return self._svnwrite(cmd, *args)
-        else:
-            args = ['-r', self.rev] + list(args)
-            return self._svnwrite(cmd, *args)
-
-    def _svnwrite(self, cmd, *args):
-        """ execute an svn command, append our own url """
-        l = ['svn %s' % cmd]
-        args = ['"%s"' % self._escape(item) for item in args]
-        l.extend(args)
-        l.append('"%s"' % self._encodedurl())
-        # fixing the locale because we can't otherwise parse
-        string = " ".join(l)
-        if DEBUG:
-            print("execing %s" % string)
-        out = self._svncmdexecauth(string)
-        return out
-
-    def _svncmdexecauth(self, cmd):
-        """ execute an svn command 'as is' """
-        cmd = svncommon.fixlocale() + cmd
-        if self.auth is not None:
-            cmd += ' ' + self.auth.makecmdoptions()
-        return self._cmdexec(cmd)
-
-    def _cmdexec(self, cmd):
-        try:
-            out = process.cmdexec(cmd)
-        except py.process.cmdexec.Error:
-            e = sys.exc_info()[1]
-            if (e.err.find('File Exists') != -1 or
-                            e.err.find('File already exists') != -1):
-                raise py.error.EEXIST(self)
-            raise
-        return out
-
-    def _svnpopenauth(self, cmd):
-        """ execute an svn command, return a pipe for reading stdin """
-        cmd = svncommon.fixlocale() + cmd
-        if self.auth is not None:
-            cmd += ' ' + self.auth.makecmdoptions()
-        return self._popen(cmd)
-
-    def _popen(self, cmd):
-        return os.popen(cmd)
-
-    def _encodedurl(self):
-        return self._escape(self.strpath)
-
-    def _norev_delentry(self, path):
-        auth = self.auth and self.auth.makecmdoptions() or None
-        self._lsnorevcache.delentry((str(path), auth))
-
-    def open(self, mode='r'):
-        """ return an opened file with the given mode. """
-        if mode not in ("r", "rU",):
-            raise ValueError("mode %r not supported" % (mode,))
-        assert self.check(file=1) # svn cat returns an empty file otherwise
-        if self.rev is None:
-            return self._svnpopenauth('svn cat "%s"' % (
-                                      self._escape(self.strpath), ))
-        else:
-            return self._svnpopenauth('svn cat -r %s "%s"' % (
-                                      self.rev, self._escape(self.strpath)))
-
-    def dirpath(self, *args, **kwargs):
-        """ return the directory path of the current path joined
-            with any given path arguments.
-        """
-        l = self.strpath.split(self.sep)
-        if len(l) < 4:
-            raise py.error.EINVAL(self, "base is not valid")
-        elif len(l) == 4:
-            return self.join(*args, **kwargs)
-        else:
-            return self.new(basename='').join(*args, **kwargs)
-
-    # modifying methods (cache must be invalidated)
-    def mkdir(self, *args, **kwargs):
-        """ create & return the directory joined with args.
-        pass a 'msg' keyword argument to set the commit message.
-        """
-        commit_msg = kwargs.get('msg', "mkdir by py lib invocation")
-        createpath = self.join(*args)
-        createpath._svnwrite('mkdir', '-m', commit_msg)
-        self._norev_delentry(createpath.dirpath())
-        return createpath
-
-    def copy(self, target, msg='copied by py lib invocation'):
-        """ copy path to target with checkin message msg."""
-        if getattr(target, 'rev', None) is not None:
-            raise py.error.EINVAL(target, "revisions are immutable")
-        self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg,
-                             self._escape(self), self._escape(target)))
-        self._norev_delentry(target.dirpath())
-
-    def rename(self, target, msg="renamed by py lib invocation"):
-        """ rename this path to target with checkin message msg. """
-        if getattr(self, 'rev', None) is not None:
-            raise py.error.EINVAL(self, "revisions are immutable")
-        self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %(
-                             msg, self._escape(self), self._escape(target)))
-        self._norev_delentry(self.dirpath())
-        self._norev_delentry(self)
-
-    def remove(self, rec=1, msg='removed by py lib invocation'):
-        """ remove a file or directory (or a directory tree if rec=1) with
-checkin message msg."""
-        if self.rev is not None:
-            raise py.error.EINVAL(self, "revisions are immutable")
-        self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self)))
-        self._norev_delentry(self.dirpath())
-
-    def export(self, topath):
-        """ export to a local path
-
-            topath should not exist prior to calling this, returns a
-            py.path.local instance
-        """
-        topath = py.path.local(topath)
-        args = ['"%s"' % (self._escape(self),),
-                '"%s"' % (self._escape(topath),)]
-        if self.rev is not None:
-            args = ['-r', str(self.rev)] + args
-        self._svncmdexecauth('svn export %s' % (' '.join(args),))
-        return topath
-
-    def ensure(self, *args, **kwargs):
-        """ ensure that an args-joined path exists (by default as
-            a file). If you specify a keyword argument 'dir=True'
-            then the path is forced to be a directory path.
-        """
-        if getattr(self, 'rev', None) is not None:
-            raise py.error.EINVAL(self, "revisions are immutable")
-        target = self.join(*args)
-        dir = kwargs.get('dir', 0)
-        for x in target.parts(reverse=True):
-            if x.check():
-                break
-        else:
-            raise py.error.ENOENT(target, "has not any valid base!")
-        if x == target:
-            if not x.check(dir=dir):
-                raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x)
-            return x
-        tocreate = target.relto(x)
-        basename = tocreate.split(self.sep, 1)[0]
-        tempdir = py.path.local.mkdtemp()
-        try:
-            tempdir.ensure(tocreate, dir=dir)
-            cmd = 'svn import -m "%s" "%s" "%s"' % (
-                    "ensure %s" % self._escape(tocreate),
-                    self._escape(tempdir.join(basename)),
-                    x.join(basename)._encodedurl())
-            self._svncmdexecauth(cmd)
-            self._norev_delentry(x)
-        finally:
-            tempdir.remove()
-        return target
-
-    # end of modifying methods
-    def _propget(self, name):
-        res = self._svnwithrev('propget', name)
-        return res[:-1] # strip trailing newline
-
-    def _proplist(self):
-        res = self._svnwithrev('proplist')
-        lines = res.split('\n')
-        lines = [x.strip() for x in lines[1:]]
-        return svncommon.PropListDict(self, lines)
-
-    def info(self):
-        """ return an Info structure with svn-provided information. """
-        parent = self.dirpath()
-        nameinfo_seq = parent._listdir_nameinfo()
-        bn = self.basename
-        for name, info in nameinfo_seq:
-            if name == bn:
-                return info
-        raise py.error.ENOENT(self)
-
-
-    def _listdir_nameinfo(self):
-        """ return sequence of name-info directory entries of self """
-        def builder():
-            try:
-                res = self._svnwithrev('ls', '-v')
-            except process.cmdexec.Error:
-                e = sys.exc_info()[1]
-                if e.err.find('non-existent in that revision') != -1:
-                    raise py.error.ENOENT(self, e.err)
-                elif e.err.find("E200009:") != -1:
-                    raise py.error.ENOENT(self, e.err)
-                elif e.err.find('File not found') != -1:
-                    raise py.error.ENOENT(self, e.err)
-                elif e.err.find('not part of a repository')!=-1:
-                    raise py.error.ENOENT(self, e.err)
-                elif e.err.find('Unable to open')!=-1:
-                    raise py.error.ENOENT(self, e.err)
-                elif e.err.lower().find('method not allowed')!=-1:
-                    raise py.error.EACCES(self, e.err)
-                raise py.error.Error(e.err)
-            lines = res.split('\n')
-            nameinfo_seq = []
-            for lsline in lines:
-                if lsline:
-                    info = InfoSvnCommand(lsline)
-                    if info._name != '.':  # svn 1.5 produces '.' dirs,
-                        nameinfo_seq.append((info._name, info))
-            nameinfo_seq.sort()
-            return nameinfo_seq
-        auth = self.auth and self.auth.makecmdoptions() or None
-        if self.rev is not None:
-            return self._lsrevcache.getorbuild((self.strpath, self.rev, auth),
-                                               builder)
-        else:
-            return self._lsnorevcache.getorbuild((self.strpath, auth),
-                                                 builder)
-
-    def listdir(self, fil=None, sort=None):
-        """ list directory contents, possibly filter by the given fil func
-            and possibly sorted.
-        """
-        if isinstance(fil, str):
-            fil = common.FNMatcher(fil)
-        nameinfo_seq = self._listdir_nameinfo()
-        if len(nameinfo_seq) == 1:
-            name, info = nameinfo_seq[0]
-            if name == self.basename and info.kind == 'file':
-                #if not self.check(dir=1):
-                raise py.error.ENOTDIR(self)
-        paths = [self.join(name) for (name, info) in nameinfo_seq]
-        if fil:
-            paths = [x for x in paths if fil(x)]
-        self._sortlist(paths, sort)
-        return paths
-
-
-    def log(self, rev_start=None, rev_end=1, verbose=False):
-        """ return a list of LogEntry instances for this path.
-rev_start is the starting revision (defaulting to the first one).
-rev_end is the last revision (defaulting to HEAD).
-if verbose is True, then the LogEntry instances also know which files changed.
-"""
-        assert self.check() #make it simpler for the pipe
-        rev_start = rev_start is None and "HEAD" or rev_start
-        rev_end = rev_end is None and "HEAD" or rev_end
-
-        if rev_start == "HEAD" and rev_end == 1:
-            rev_opt = ""
-        else:
-            rev_opt = "-r %s:%s" % (rev_start, rev_end)
-        verbose_opt = verbose and "-v" or ""
-        xmlpipe =  self._svnpopenauth('svn log --xml %s %s "%s"' %
-                                      (rev_opt, verbose_opt, self.strpath))
-        from xml.dom import minidom
-        tree = minidom.parse(xmlpipe)
-        result = []
-        for logentry in filter(None, tree.firstChild.childNodes):
-            if logentry.nodeType == logentry.ELEMENT_NODE:
-                result.append(svncommon.LogEntry(logentry))
-        return result
-
-#01234567890123456789012345678901234567890123467
-#   2256      hpk        165 Nov 24 17:55 __init__.py
-# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!!
-#   1312 johnny           1627 May 05 14:32 test_decorators.py
-#
-class InfoSvnCommand:
-    # the '0?' part in the middle is an indication of whether the resource is
-    # locked, see 'svn help ls'
-    lspattern = re.compile(
-        r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? '
-            '*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$')
-    def __init__(self, line):
-        # this is a typical line from 'svn ls http://...'
-        #_    1127      jum        0 Jul 13 15:28 branch/
-        match = self.lspattern.match(line)
-        data = match.groupdict()
-        self._name = data['file']
-        if self._name[-1] == '/':
-            self._name = self._name[:-1]
-            self.kind = 'dir'
-        else:
-            self.kind = 'file'
-        #self.has_props = l.pop(0) == 'P'
-        self.created_rev = int(data['rev'])
-        self.last_author = data['author']
-        self.size = data['size'] and int(data['size']) or 0
-        self.mtime = parse_time_with_missing_year(data['date'])
-        self.time = self.mtime * 1000000
-
-    def __eq__(self, other):
-        return self.__dict__ == other.__dict__
-
-
-#____________________________________________________
-#
-# helper functions
-#____________________________________________________
-def parse_time_with_missing_year(timestr):
-    """ analyze the time part from a single line of "svn ls -v"
-    the svn output doesn't show the year makes the 'timestr'
-    ambigous.
-    """
-    import calendar
-    t_now = time.gmtime()
-
-    tparts = timestr.split()
-    month = time.strptime(tparts.pop(0), '%b')[1]
-    day = time.strptime(tparts.pop(0), '%d')[2]
-    last = tparts.pop(0) # year or hour:minute
-    try:
-        if ":" in last:
-            raise ValueError()
-        year = time.strptime(last, '%Y')[0]
-        hour = minute = 0
-    except ValueError:
-        hour, minute = time.strptime(last, '%H:%M')[3:5]
-        year = t_now[0]
-
-        t_result = (year, month, day, hour, minute, 0,0,0,0)
-        if t_result > t_now:
-            year -= 1
-    t_result = (year, month, day, hour, minute, 0,0,0,0)
-    return calendar.timegm(t_result)
-
-class PathEntry:
-    def __init__(self, ppart):
-        self.strpath = ppart.firstChild.nodeValue.encode('UTF-8')
-        self.action = ppart.getAttribute('action').encode('UTF-8')
-        if self.action == 'A':
-            self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8')
-            if self.copyfrom_path:
-                self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))
-
+"""
+module defining a subversion path object based on the external
+command 'svn'. This modules aims to work with svn 1.3 and higher
+but might also interact well with earlier versions.
+"""
+
+import os, sys, time, re
+import py
+from py import path, process
+from py._path import common
+from py._path import svnwc as svncommon
+from py._path.cacheutil import BuildcostAccessCache, AgingCache
+
+DEBUG=False
+
+class SvnCommandPath(svncommon.SvnPathBase):
+    """ path implementation that offers access to (possibly remote) subversion
+    repositories. """
+
+    _lsrevcache = BuildcostAccessCache(maxentries=128)
+    _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0)
+
+    def __new__(cls, path, rev=None, auth=None):
+        self = object.__new__(cls)
+        if isinstance(path, cls):
+            rev = path.rev
+            auth = path.auth
+            path = path.strpath
+        svncommon.checkbadchars(path)
+        path = path.rstrip('/')
+        self.strpath = path
+        self.rev = rev
+        self.auth = auth
+        return self
+
+    def __repr__(self):
+        if self.rev == -1:
+            return 'svnurl(%r)' % self.strpath
+        else:
+            return 'svnurl(%r, %r)' % (self.strpath, self.rev)
+
+    def _svnwithrev(self, cmd, *args):
+        """ execute an svn command, append our own url and revision """
+        if self.rev is None:
+            return self._svnwrite(cmd, *args)
+        else:
+            args = ['-r', self.rev] + list(args)
+            return self._svnwrite(cmd, *args)
+
+    def _svnwrite(self, cmd, *args):
+        """ execute an svn command, append our own url """
+        l = ['svn %s' % cmd]
+        args = ['"%s"' % self._escape(item) for item in args]
+        l.extend(args)
+        l.append('"%s"' % self._encodedurl())
+        # fixing the locale because we can't otherwise parse
+        string = " ".join(l)
+        if DEBUG:
+            print("execing %s" % string)
+        out = self._svncmdexecauth(string)
+        return out
+
+    def _svncmdexecauth(self, cmd):
+        """ execute an svn command 'as is' """
+        cmd = svncommon.fixlocale() + cmd
+        if self.auth is not None:
+            cmd += ' ' + self.auth.makecmdoptions()
+        return self._cmdexec(cmd)
+
+    def _cmdexec(self, cmd):
+        try:
+            out = process.cmdexec(cmd)
+        except py.process.cmdexec.Error:
+            e = sys.exc_info()[1]
+            if (e.err.find('File Exists') != -1 or
+                            e.err.find('File already exists') != -1):
+                raise py.error.EEXIST(self)
+            raise
+        return out
+
+    def _svnpopenauth(self, cmd):
+        """ execute an svn command, return a pipe for reading stdin """
+        cmd = svncommon.fixlocale() + cmd
+        if self.auth is not None:
+            cmd += ' ' + self.auth.makecmdoptions()
+        return self._popen(cmd)
+
+    def _popen(self, cmd):
+        return os.popen(cmd)
+
+    def _encodedurl(self):
+        return self._escape(self.strpath)
+
+    def _norev_delentry(self, path):
+        auth = self.auth and self.auth.makecmdoptions() or None
+        self._lsnorevcache.delentry((str(path), auth))
+
+    def open(self, mode='r'):
+        """ return an opened file with the given mode. """
+        if mode not in ("r", "rU",):
+            raise ValueError("mode %r not supported" % (mode,))
+        assert self.check(file=1) # svn cat returns an empty file otherwise
+        if self.rev is None:
+            return self._svnpopenauth('svn cat "%s"' % (
+                                      self._escape(self.strpath), ))
+        else:
+            return self._svnpopenauth('svn cat -r %s "%s"' % (
+                                      self.rev, self._escape(self.strpath)))
+
+    def dirpath(self, *args, **kwargs):
+        """ return the directory path of the current path joined
+            with any given path arguments.
+        """
+        l = self.strpath.split(self.sep)
+        if len(l) < 4:
+            raise py.error.EINVAL(self, "base is not valid")
+        elif len(l) == 4:
+            return self.join(*args, **kwargs)
+        else:
+            return self.new(basename='').join(*args, **kwargs)
+
+    # modifying methods (cache must be invalidated)
+    def mkdir(self, *args, **kwargs):
+        """ create & return the directory joined with args.
+        pass a 'msg' keyword argument to set the commit message.
+        """
+        commit_msg = kwargs.get('msg', "mkdir by py lib invocation")
+        createpath = self.join(*args)
+        createpath._svnwrite('mkdir', '-m', commit_msg)
+        self._norev_delentry(createpath.dirpath())
+        return createpath
+
+    def copy(self, target, msg='copied by py lib invocation'):
+        """ copy path to target with checkin message msg."""
+        if getattr(target, 'rev', None) is not None:
+            raise py.error.EINVAL(target, "revisions are immutable")
+        self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg,
+                             self._escape(self), self._escape(target)))
+        self._norev_delentry(target.dirpath())
+
+    def rename(self, target, msg="renamed by py lib invocation"):
+        """ rename this path to target with checkin message msg. """
+        if getattr(self, 'rev', None) is not None:
+            raise py.error.EINVAL(self, "revisions are immutable")
+        self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %(
+                             msg, self._escape(self), self._escape(target)))
+        self._norev_delentry(self.dirpath())
+        self._norev_delentry(self)
+
+    def remove(self, rec=1, msg='removed by py lib invocation'):
+        """ remove a file or directory (or a directory tree if rec=1) with
+checkin message msg."""
+        if self.rev is not None:
+            raise py.error.EINVAL(self, "revisions are immutable")
+        self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self)))
+        self._norev_delentry(self.dirpath())
+
+    def export(self, topath):
+        """ export to a local path
+
+            topath should not exist prior to calling this, returns a
+            py.path.local instance
+        """
+        topath = py.path.local(topath)
+        args = ['"%s"' % (self._escape(self),),
+                '"%s"' % (self._escape(topath),)]
+        if self.rev is not None:
+            args = ['-r', str(self.rev)] + args
+        self._svncmdexecauth('svn export %s' % (' '.join(args),))
+        return topath
+
+    def ensure(self, *args, **kwargs):
+        """ ensure that an args-joined path exists (by default as
+            a file). If you specify a keyword argument 'dir=True'
+            then the path is forced to be a directory path.
+        """
+        if getattr(self, 'rev', None) is not None:
+            raise py.error.EINVAL(self, "revisions are immutable")
+        target = self.join(*args)
+        dir = kwargs.get('dir', 0)
+        for x in target.parts(reverse=True):
+            if x.check():
+                break
+        else:
+            raise py.error.ENOENT(target, "has not any valid base!")
+        if x == target:
+            if not x.check(dir=dir):
+                raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x)
+            return x
+        tocreate = target.relto(x)
+        basename = tocreate.split(self.sep, 1)[0]
+        tempdir = py.path.local.mkdtemp()
+        try:
+            tempdir.ensure(tocreate, dir=dir)
+            cmd = 'svn import -m "%s" "%s" "%s"' % (
+                    "ensure %s" % self._escape(tocreate),
+                    self._escape(tempdir.join(basename)),
+                    x.join(basename)._encodedurl())
+            self._svncmdexecauth(cmd)
+            self._norev_delentry(x)
+        finally:
+            tempdir.remove()
+        return target
+
+    # end of modifying methods
+    def _propget(self, name):
+        res = self._svnwithrev('propget', name)
+        return res[:-1] # strip trailing newline
+
+    def _proplist(self):
+        res = self._svnwithrev('proplist')
+        lines = res.split('\n')
+        lines = [x.strip() for x in lines[1:]]
+        return svncommon.PropListDict(self, lines)
+
+    def info(self):
+        """ return an Info structure with svn-provided information. """
+        parent = self.dirpath()
+        nameinfo_seq = parent._listdir_nameinfo()
+        bn = self.basename
+        for name, info in nameinfo_seq:
+            if name == bn:
+                return info
+        raise py.error.ENOENT(self)
+
+
+    def _listdir_nameinfo(self):
+        """ return sequence of name-info directory entries of self """
+        def builder():
+            try:
+                res = self._svnwithrev('ls', '-v')
+            except process.cmdexec.Error:
+                e = sys.exc_info()[1]
+                if e.err.find('non-existent in that revision') != -1:
+                    raise py.error.ENOENT(self, e.err)
+                elif e.err.find("E200009:") != -1:
+                    raise py.error.ENOENT(self, e.err)
+                elif e.err.find('File not found') != -1:
+                    raise py.error.ENOENT(self, e.err)
+                elif e.err.find('not part of a repository')!=-1:
+                    raise py.error.ENOENT(self, e.err)
+                elif e.err.find('Unable to open')!=-1:
+                    raise py.error.ENOENT(self, e.err)
+                elif e.err.lower().find('method not allowed')!=-1:
+                    raise py.error.EACCES(self, e.err)
+                raise py.error.Error(e.err)
+            lines = res.split('\n')
+            nameinfo_seq = []
+            for lsline in lines:
+                if lsline:
+                    info = InfoSvnCommand(lsline)
+                    if info._name != '.':  # svn 1.5 produces '.' dirs,
+                        nameinfo_seq.append((info._name, info))
+            nameinfo_seq.sort()
+            return nameinfo_seq
+        auth = self.auth and self.auth.makecmdoptions() or None
+        if self.rev is not None:
+            return self._lsrevcache.getorbuild((self.strpath, self.rev, auth),
+                                               builder)
+        else:
+            return self._lsnorevcache.getorbuild((self.strpath, auth),
+                                                 builder)
+
+    def listdir(self, fil=None, sort=None):
+        """ list directory contents, possibly filter by the given fil func
+            and possibly sorted.
+        """
+        if isinstance(fil, str):
+            fil = common.FNMatcher(fil)
+        nameinfo_seq = self._listdir_nameinfo()
+        if len(nameinfo_seq) == 1:
+            name, info = nameinfo_seq[0]
+            if name == self.basename and info.kind == 'file':
+                #if not self.check(dir=1):
+                raise py.error.ENOTDIR(self)
+        paths = [self.join(name) for (name, info) in nameinfo_seq]
+        if fil:
+            paths = [x for x in paths if fil(x)]
+        self._sortlist(paths, sort)
+        return paths
+
+
+    def log(self, rev_start=None, rev_end=1, verbose=False):
+        """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+        assert self.check() #make it simpler for the pipe
+        rev_start = rev_start is None and "HEAD" or rev_start
+        rev_end = rev_end is None and "HEAD" or rev_end
+
+        if rev_start == "HEAD" and rev_end == 1:
+            rev_opt = ""
+        else:
+            rev_opt = "-r %s:%s" % (rev_start, rev_end)
+        verbose_opt = verbose and "-v" or ""
+        xmlpipe =  self._svnpopenauth('svn log --xml %s %s "%s"' %
+                                      (rev_opt, verbose_opt, self.strpath))
+        from xml.dom import minidom
+        tree = minidom.parse(xmlpipe)
+        result = []
+        for logentry in filter(None, tree.firstChild.childNodes):
+            if logentry.nodeType == logentry.ELEMENT_NODE:
+                result.append(svncommon.LogEntry(logentry))
+        return result
+
+#01234567890123456789012345678901234567890123467
+#   2256      hpk        165 Nov 24 17:55 __init__.py
+# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!!
+#   1312 johnny           1627 May 05 14:32 test_decorators.py
+#
+class InfoSvnCommand:
+    # the '0?' part in the middle is an indication of whether the resource is
+    # locked, see 'svn help ls'
+    lspattern = re.compile(
+        r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? '
+            r'*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$')
+    def __init__(self, line):
+        # this is a typical line from 'svn ls http://...'
+        #_    1127      jum        0 Jul 13 15:28 branch/
+        match = self.lspattern.match(line)
+        data = match.groupdict()
+        self._name = data['file']
+        if self._name[-1] == '/':
+            self._name = self._name[:-1]
+            self.kind = 'dir'
+        else:
+            self.kind = 'file'
+        #self.has_props = l.pop(0) == 'P'
+        self.created_rev = int(data['rev'])
+        self.last_author = data['author']
+        self.size = data['size'] and int(data['size']) or 0
+        self.mtime = parse_time_with_missing_year(data['date'])
+        self.time = self.mtime * 1000000
+
+    def __eq__(self, other):
+        return self.__dict__ == other.__dict__
+
+
+#____________________________________________________
+#
+# helper functions
+#____________________________________________________
+def parse_time_with_missing_year(timestr):
+    """ analyze the time part from a single line of "svn ls -v"
+    the svn output doesn't show the year makes the 'timestr'
+    ambigous.
+    """
+    import calendar
+    t_now = time.gmtime()
+
+    tparts = timestr.split()
+    month = time.strptime(tparts.pop(0), '%b')[1]
+    day = time.strptime(tparts.pop(0), '%d')[2]
+    last = tparts.pop(0) # year or hour:minute
+    try:
+        if ":" in last:
+            raise ValueError()
+        year = time.strptime(last, '%Y')[0]
+        hour = minute = 0
+    except ValueError:
+        hour, minute = time.strptime(last, '%H:%M')[3:5]
+        year = t_now[0]
+
+        t_result = (year, month, day, hour, minute, 0,0,0,0)
+        if t_result > t_now:
+            year -= 1
+    t_result = (year, month, day, hour, minute, 0,0,0,0)
+    return calendar.timegm(t_result)
+
+class PathEntry:
+    def __init__(self, ppart):
+        self.strpath = ppart.firstChild.nodeValue.encode('UTF-8')
+        self.action = ppart.getAttribute('action').encode('UTF-8')
+        if self.action == 'A':
+            self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8')
+            if self.copyfrom_path:
+                self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))
+
--- a/third_party/python/py/py/_path/svnwc.py
+++ b/third_party/python/py/py/_path/svnwc.py
@@ -1,1240 +1,1240 @@
-"""
-svn-Command based Implementation of a Subversion WorkingCopy Path.
-
-  SvnWCCommandPath  is the main class.
-
-"""
-
-import os, sys, time, re, calendar
-import py
-import subprocess
-from py._path import common
-
-#-----------------------------------------------------------
-# Caching latest repository revision and repo-paths
-# (getting them is slow with the current implementations)
-#
-# XXX make mt-safe
-#-----------------------------------------------------------
-
-class cache:
-    proplist = {}
-    info = {}
-    entries = {}
-    prop = {}
-
-class RepoEntry:
-    def __init__(self, url, rev, timestamp):
-        self.url = url
-        self.rev = rev
-        self.timestamp = timestamp
-
-    def __str__(self):
-        return "repo: %s;%s  %s" %(self.url, self.rev, self.timestamp)
-
-class RepoCache:
-    """ The Repocache manages discovered repository paths
-    and their revisions.  If inside a timeout the cache
-    will even return the revision of the root.
-    """
-    timeout = 20 # seconds after which we forget that we know the last revision
-
-    def __init__(self):
-        self.repos = []
-
-    def clear(self):
-        self.repos = []
-
-    def put(self, url, rev, timestamp=None):
-        if rev is None:
-            return
-        if timestamp is None:
-            timestamp = time.time()
-
-        for entry in self.repos:
-            if url == entry.url:
-                entry.timestamp = timestamp
-                entry.rev = rev
-                #print "set repo", entry
-                break
-        else:
-            entry = RepoEntry(url, rev, timestamp)
-            self.repos.append(entry)
-            #print "appended repo", entry
-
-    def get(self, url):
-        now = time.time()
-        for entry in self.repos:
-            if url.startswith(entry.url):
-                if now < entry.timestamp + self.timeout:
-                    #print "returning immediate Etrny", entry
-                    return entry.url, entry.rev
-                return entry.url, -1
-        return url, -1
-
-repositories = RepoCache()
-
-
-# svn support code
-
-ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
-if sys.platform == "win32":
-    ALLOWED_CHARS += ":"
-ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
-
-def _getsvnversion(ver=[]):
-    try:
-        return ver[0]
-    except IndexError:
-        v = py.process.cmdexec("svn -q --version")
-        v.strip()
-        v = '.'.join(v.split('.')[:2])
-        ver.append(v)
-        return v
-
-def _escape_helper(text):
-    text = str(text)
-    if py.std.sys.platform != 'win32':
-        text = str(text).replace('$', '\\$')
-    return text
-
-def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
-    for c in str(text):
-        if c.isalnum():
-            continue
-        if c in allowed_chars:
-            continue
-        return True
-    return False
-
-def checkbadchars(url):
-    # (hpk) not quite sure about the exact purpose, guido w.?
-    proto, uri = url.split("://", 1)
-    if proto != "file":
-        host, uripath = uri.split('/', 1)
-        # only check for bad chars in the non-protocol parts
-        if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
-            or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
-            raise ValueError("bad char in %r" % (url, ))
-
-
-#_______________________________________________________________
-
-class SvnPathBase(common.PathBase):
-    """ Base implementation for SvnPath implementations. """
-    sep = '/'
-
-    def _geturl(self):
-        return self.strpath
-    url = property(_geturl, None, None, "url of this svn-path.")
-
-    def __str__(self):
-        """ return a string representation (including rev-number) """
-        return self.strpath
-
-    def __hash__(self):
-        return hash(self.strpath)
-
-    def new(self, **kw):
-        """ create a modified version of this path. A 'rev' argument
-            indicates a new revision.
-            the following keyword arguments modify various path parts::
-
-              http://host.com/repo/path/file.ext
-              |-----------------------|          dirname
-                                        |------| basename
-                                        |--|     purebasename
-                                            |--| ext
-        """
-        obj = object.__new__(self.__class__)
-        obj.rev = kw.get('rev', self.rev)
-        obj.auth = kw.get('auth', self.auth)
-        dirname, basename, purebasename, ext = self._getbyspec(
-             "dirname,basename,purebasename,ext")
-        if 'basename' in kw:
-            if 'purebasename' in kw or 'ext' in kw:
-                raise ValueError("invalid specification %r" % kw)
-        else:
-            pb = kw.setdefault('purebasename', purebasename)
-            ext = kw.setdefault('ext', ext)
-            if ext and not ext.startswith('.'):
-                ext = '.' + ext
-            kw['basename'] = pb + ext
-
-        kw.setdefault('dirname', dirname)
-        kw.setdefault('sep', self.sep)
-        if kw['basename']:
-            obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
-        else:
-            obj.strpath = "%(dirname)s" % kw
-        return obj
-
-    def _getbyspec(self, spec):
-        """ get specified parts of the path.  'arg' is a string
-            with comma separated path parts. The parts are returned
-            in exactly the order of the specification.
-
-            you may specify the following parts:
-
-            http://host.com/repo/path/file.ext
-            |-----------------------|          dirname
-                                      |------| basename
-                                      |--|     purebasename
-                                          |--| ext
-        """
-        res = []
-        parts = self.strpath.split(self.sep)
-        for name in spec.split(','):
-            name = name.strip()
-            if name == 'dirname':
-                res.append(self.sep.join(parts[:-1]))
-            elif name == 'basename':
-                res.append(parts[-1])
-            else:
-                basename = parts[-1]
-                i = basename.rfind('.')
-                if i == -1:
-                    purebasename, ext = basename, ''
-                else:
-                    purebasename, ext = basename[:i], basename[i:]
-                if name == 'purebasename':
-                    res.append(purebasename)
-                elif name == 'ext':
-                    res.append(ext)
-                else:
-                    raise NameError("Don't know part %r" % name)
-        return res
-
-    def __eq__(self, other):
-        """ return true if path and rev attributes each match """
-        return (str(self) == str(other) and
-               (self.rev == other.rev or self.rev == other.rev))
-
-    def __ne__(self, other):
-        return not self == other
-
-    def join(self, *args):
-        """ return a new Path (with the same revision) which is composed
-            of the self Path followed by 'args' path components.
-        """
-        if not args:
-            return self
-
-        args = tuple([arg.strip(self.sep) for arg in args])
-        parts = (self.strpath, ) + args
-        newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
-        return newpath
-
-    def propget(self, name):
-        """ return the content of the given property. """
-        value = self._propget(name)
-        return value
-
-    def proplist(self):
-        """ list all property names. """
-        content = self._proplist()
-        return content
-
-    def size(self):
-        """ Return the size of the file content of the Path. """
-        return self.info().size
-
-    def mtime(self):
-        """ Return the last modification time of the file. """
-        return self.info().mtime
-
-    # shared help methods
-
-    def _escape(self, cmd):
-        return _escape_helper(cmd)
-
-
-    #def _childmaxrev(self):
-    #    """ return maximum revision number of childs (or self.rev if no childs) """
-    #    rev = self.rev
-    #    for name, info in self._listdir_nameinfo():
-    #        rev = max(rev, info.created_rev)
-    #    return rev
-
-    #def _getlatestrevision(self):
-    #    """ return latest repo-revision for this path. """
-    #    url = self.strpath
-    #    path = self.__class__(url, None)
-    #
-    #    # we need a long walk to find the root-repo and revision
-    #    while 1:
-    #        try:
-    #            rev = max(rev, path._childmaxrev())
-    #            previous = path
-    #            path = path.dirpath()
-    #        except (IOError, process.cmdexec.Error):
-    #            break
-    #    if rev is None:
-    #        raise IOError, "could not determine newest repo revision for %s" % self
-    #    return rev
-
-    class Checkers(common.Checkers):
-        def dir(self):
-            try:
-                return self.path.info().kind == 'dir'
-            except py.error.Error:
-                return self._listdirworks()
-
-        def _listdirworks(self):
-            try:
-                self.path.listdir()
-            except py.error.ENOENT:
-                return False
-            else:
-                return True
-
-        def file(self):
-            try:
-                return self.path.info().kind == 'file'
-            except py.error.ENOENT:
-                return False
-
-        def exists(self):
-            try:
-                return self.path.info()
-            except py.error.ENOENT:
-                return self._listdirworks()
-
-def parse_apr_time(timestr):
-    i = timestr.rfind('.')
-    if i == -1:
-        raise ValueError("could not parse %s" % timestr)
-    timestr = timestr[:i]
-    parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
-    return time.mktime(parsedtime)
-
-class PropListDict(dict):
-    """ a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
-    def __init__(self, path, keynames):
-        dict.__init__(self, [(x, None) for x in keynames])
-        self.path = path
-
-    def __getitem__(self, key):
-        value = dict.__getitem__(self, key)
-        if value is None:
-            value = self.path.propget(key)
-            dict.__setitem__(self, key, value)
-        return value
-
-def fixlocale():
-    if sys.platform != 'win32':
-        return 'LC_ALL=C '
-    return ''
-
-# some nasty chunk of code to solve path and url conversion and quoting issues
-ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ')
-if os.sep in ILLEGAL_CHARS:
-    ILLEGAL_CHARS.remove(os.sep)
-ISWINDOWS = sys.platform == 'win32'
-_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
-def _check_path(path):
-    illegal = ILLEGAL_CHARS[:]
-    sp = path.strpath
-    if ISWINDOWS:
-        illegal.remove(':')
-        if not _reg_allow_disk.match(sp):
-            raise ValueError('path may not contain a colon (:)')
-    for char in sp:
-        if char not in string.printable or char in illegal:
-            raise ValueError('illegal character %r in path' % (char,))
-
-def path_to_fspath(path, addat=True):
-    _check_path(path)
-    sp = path.strpath
-    if addat and path.rev != -1:
-        sp = '%s@%s' % (sp, path.rev)
-    elif addat:
-        sp = '%s@HEAD' % (sp,)
-    return sp
-
-def url_from_path(path):
-    fspath = path_to_fspath(path, False)
-    quote = py.std.urllib.quote
-    if ISWINDOWS:
-        match = _reg_allow_disk.match(fspath)
-        fspath = fspath.replace('\\', '/')
-        if match.group(1):
-            fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
-                                quote(fspath[len(match.group(1)):]))
-        else:
-            fspath = quote(fspath)
-    else:
-        fspath = quote(fspath)
-    if path.rev != -1:
-        fspath = '%s@%s' % (fspath, path.rev)
-    else:
-        fspath = '%s@HEAD' % (fspath,)
-    return 'file://%s' % (fspath,)
-
-class SvnAuth(object):
-    """ container for auth information for Subversion """
-    def __init__(self, username, password, cache_auth=True, interactive=True):
-        self.username = username
-        self.password = password
-        self.cache_auth = cache_auth
-        self.interactive = interactive
-
-    def makecmdoptions(self):
-        uname = self.username.replace('"', '\\"')
-        passwd = self.password.replace('"', '\\"')
-        ret = []
-        if uname:
-            ret.append('--username="%s"' % (uname,))
-        if passwd:
-            ret.append('--password="%s"' % (passwd,))
-        if not self.cache_auth:
-            ret.append('--no-auth-cache')
-        if not self.interactive:
-            ret.append('--non-interactive')
-        return ' '.join(ret)
-
-    def __str__(self):
-        return "<SvnAuth username=%s ...>" %(self.username,)
-
-rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
-
-class SvnWCCommandPath(common.PathBase):
-    """ path implementation offering access/modification to svn working copies.
-        It has methods similar to the functions in os.path and similar to the
-        commands of the svn client.
-    """
-    sep = os.sep
-
-    def __new__(cls, wcpath=None, auth=None):
-        self = object.__new__(cls)
-        if isinstance(wcpath, cls):
-            if wcpath.__class__ == cls:
-                return wcpath
-            wcpath = wcpath.localpath
-        if _check_for_bad_chars(str(wcpath),
-                                          ALLOWED_CHARS):
-            raise ValueError("bad char in wcpath %s" % (wcpath, ))
-        self.localpath = py.path.local(wcpath)
-        self.auth = auth
-        return self
-
-    strpath = property(lambda x: str(x.localpath), None, None, "string path")
-    rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
-
-    def __eq__(self, other):
-        return self.localpath == getattr(other, 'localpath', None)
-
-    def _geturl(self):
-        if getattr(self, '_url', None) is None:
-            info = self.info()
-            self._url = info.url #SvnPath(info.url, info.rev)
-        assert isinstance(self._url, py.builtin._basestring)
-        return self._url
-
-    url = property(_geturl, None, None, "url of this WC item")
-
-    def _escape(self, cmd):
-        return _escape_helper(cmd)
-
-    def dump(self, obj):
-        """ pickle object into path location"""
-        return self.localpath.dump(obj)
-
-    def svnurl(self):
-        """ return current SvnPath for this WC-item. """
-        info = self.info()
-        return py.path.svnurl(info.url)
-
-    def __repr__(self):
-        return "svnwc(%r)" % (self.strpath) # , self._url)
-
-    def __str__(self):
-        return str(self.localpath)
-
-    def _makeauthoptions(self):
-        if self.auth is None:
-            return ''
-        return self.auth.makecmdoptions()
-
-    def _authsvn(self, cmd, args=None):
-        args = args and list(args) or []
-        args.append(self._makeauthoptions())
-        return self._svn(cmd, *args)
-
-    def _svn(self, cmd, *args):
-        l = ['svn %s' % cmd]
-        args = [self._escape(item) for item in args]
-        l.extend(args)
-        l.append('"%s"' % self._escape(self.strpath))
-        # try fixing the locale because we can't otherwise parse
-        string = fixlocale() + " ".join(l)
-        try:
-            try:
-                key = 'LC_MESSAGES'
-                hold = os.environ.get(key)
-                os.environ[key] = 'C'
-                out = py.process.cmdexec(string)
-            finally:
-                if hold:
-                    os.environ[key] = hold
-                else:
-                    del os.environ[key]
-        except py.process.cmdexec.Error:
-            e = sys.exc_info()[1]
-            strerr = e.err.lower()
-            if strerr.find('not found') != -1:
-                raise py.error.ENOENT(self)
-            elif strerr.find("E200009:") != -1:
-                raise py.error.ENOENT(self)
-            if (strerr.find('file exists') != -1 or
-                strerr.find('file already exists') != -1 or
-                strerr.find('w150002:') != -1 or
-                strerr.find("can't create directory") != -1):
-                raise py.error.EEXIST(strerr) #self)
-            raise
-        return out
-
-    def switch(self, url):
-        """ switch to given URL. """
-        self._authsvn('switch', [url])
-
-    def checkout(self, url=None, rev=None):
-        """ checkout from url to local wcpath. """
-        args = []
-        if url is None:
-            url = self.url
-        if rev is None or rev == -1:
-            if (py.std.sys.platform != 'win32' and
-                    _getsvnversion() == '1.3'):
-                url += "@HEAD"
-        else:
-            if _getsvnversion() == '1.3':
-                url += "@%d" % rev
-            else:
-                args.append('-r' + str(rev))
-        args.append(url)
-        self._authsvn('co', args)
-
-    def update(self, rev='HEAD', interactive=True):
-        """ update working copy item to given revision. (None -> HEAD). """
-        opts = ['-r', rev]
-        if not interactive:
-            opts.append("--non-interactive")
-        self._authsvn('up', opts)
-
-    def write(self, content, mode='w'):
-        """ write content into local filesystem wc. """
-        self.localpath.write(content, mode)
-
-    def dirpath(self, *args):
-        """ return the directory Path of the current Path. """
-        return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
-
-    def _ensuredirs(self):
-        parent = self.dirpath()
-        if parent.check(dir=0):
-            parent._ensuredirs()
-        if self.check(dir=0):
-            self.mkdir()
-        return self
-
-    def ensure(self, *args, **kwargs):
-        """ ensure that an args-joined path exists (by default as
-            a file). if you specify a keyword argument 'directory=True'
-            then the path is forced  to be a directory path.
-        """
-        p = self.join(*args)
-        if p.check():
-            if p.check(versioned=False):
-                p.add()
-            return p
-        if kwargs.get('dir', 0):
-            return p._ensuredirs()
-        parent = p.dirpath()
-        parent._ensuredirs()
-        p.write("")
-        p.add()
-        return p
-
-    def mkdir(self, *args):
-        """ create & return the directory joined with args. """
-        if args:
-            return self.join(*args).mkdir()
-        else:
-            self._svn('mkdir')
-            return self
-
-    def add(self):
-        """ add ourself to svn """
-        self._svn('add')
-
-    def remove(self, rec=1, force=1):
-        """ remove a file or a directory tree. 'rec'ursive is
-            ignored and considered always true (because of
-            underlying svn semantics.
-        """
-        assert rec, "svn cannot remove non-recursively"
-        if not self.check(versioned=True):
-            # not added to svn (anymore?), just remove
-            py.path.local(self).remove()
-            return
-        flags = []
-        if force:
-            flags.append('--force')
-        self._svn('remove', *flags)
-
-    def copy(self, target):
-        """ copy path to target."""
-        py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
-
-    def rename(self, target):
-        """ rename this path to target. """
-        py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
-
-    def lock(self):
-        """ set a lock (exclusive) on the resource """
-        out = self._authsvn('lock').strip()
-        if not out:
-            # warning or error, raise exception
-            raise ValueError("unknown error in svn lock command")
-
-    def unlock(self):
-        """ unset a previously set lock """
-        out = self._authsvn('unlock').strip()
-        if out.startswith('svn:'):
-            # warning or error, raise exception
-            raise Exception(out[4:])
-
-    def cleanup(self):
-        """ remove any locks from the resource """
-        # XXX should be fixed properly!!!
-        try:
-            self.unlock()
-        except:
-            pass
-
-    def status(self, updates=0, rec=0, externals=0):
-        """ return (collective) Status object for this file. """
-        # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
-        #             2201     2192        jum   test
-        # XXX
-        if externals:
-            raise ValueError("XXX cannot perform status() "
-                             "on external items yet")
-        else:
-            #1.2 supports: externals = '--ignore-externals'
-            externals = ''
-        if rec:
-            rec= ''
-        else:
-            rec = '--non-recursive'
-
-        # XXX does not work on all subversion versions
-        #if not externals:
-        #    externals = '--ignore-externals'
-
-        if updates:
-            updates = '-u'
-        else:
-            updates = ''
-
-        try:
-            cmd = 'status -v --xml --no-ignore %s %s %s' % (
-                    updates, rec, externals)
-            out = self._authsvn(cmd)
-        except py.process.cmdexec.Error:
-            cmd = 'status -v --no-ignore %s %s %s' % (
-                    updates, rec, externals)
-            out = self._authsvn(cmd)
-            rootstatus = WCStatus(self).fromstring(out, self)
-        else:
-            rootstatus = XMLWCStatus(self).fromstring(out, self)
-        return rootstatus
-
-    def diff(self, rev=None):
-        """ return a diff of the current path against revision rev (defaulting
-            to the last one).
-        """
-        args = []
-        if rev is not None:
-            args.append("-r %d" % rev)
-        out = self._authsvn('diff', args)
-        return out
-
-    def blame(self):
-        """ return a list of tuples of three elements:
-            (revision, commiter, line)
-        """
-        out = self._svn('blame')
-        result = []
-        blamelines = out.splitlines()
-        reallines = py.path.svnurl(self.url).readlines()
-        for i, (blameline, line) in enumerate(
-                zip(blamelines, reallines)):
-            m = rex_blame.match(blameline)
-            if not m:
-                raise ValueError("output line %r of svn blame does not match "
-                                 "expected format" % (line, ))
-            rev, name, _ = m.groups()
-            result.append((int(rev), name, line))
-        return result
-
-    _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
-    def commit(self, msg='', rec=1):
-        """ commit with support for non-recursive commits """
-        # XXX i guess escaping should be done better here?!?
-        cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
-        if not rec:
-            cmd += ' -N'
-        out = self._authsvn(cmd)
-        try:
-            del cache.info[self]
-        except KeyError:
-            pass
-        if out:
-            m = self._rex_commit.match(out)
-            return int(m.group(1))
-
-    def propset(self, name, value, *args):
-        """ set property name to value on this path. """
-        d = py.path.local.mkdtemp()
-        try:
-            p = d.join('value')
-            p.write(value)
-            self._svn('propset', name, '--file', str(p), *args)
-        finally:
-            d.remove()
-
-    def propget(self, name):
-        """ get property name on this path. """
-        res = self._svn('propget', name)
-        return res[:-1] # strip trailing newline
-
-    def propdel(self, name):
-        """ delete property name on this path. """
-        res = self._svn('propdel', name)
-        return res[:-1] # strip trailing newline
-
-    def proplist(self, rec=0):
-        """ return a mapping of property names to property values.
-If rec is True, then return a dictionary mapping sub-paths to such mappings.
-"""
-        if rec:
-            res = self._svn('proplist -R')
-            return make_recursive_propdict(self, res)
-        else:
-            res = self._svn('proplist')
-            lines = res.split('\n')
-            lines = [x.strip() for x in lines[1:]]
-            return PropListDict(self, lines)
-
-    def revert(self, rec=0):
-        """ revert the local changes of this path. if rec is True, do so
-recursively. """
-        if rec:
-            result = self._svn('revert -R')
-        else:
-            result = self._svn('revert')
-        return result
-
-    def new(self, **kw):
-        """ create a modified version of this path. A 'rev' argument
-            indicates a new revision.
-            the following keyword arguments modify various path parts:
-
-              http://host.com/repo/path/file.ext
-              |-----------------------|          dirname
-                                        |------| basename
-                                        |--|     purebasename
-                                            |--| ext
-        """
-        if kw:
-            localpath = self.localpath.new(**kw)
-        else:
-            localpath = self.localpath
-        return self.__class__(localpath, auth=self.auth)
-
-    def join(self, *args, **kwargs):
-        """ return a new Path (with the same revision) which is composed
-            of the self Path followed by 'args' path components.
-        """
-        if not args:
-            return self
-        localpath = self.localpath.join(*args, **kwargs)
-        return self.__class__(localpath, auth=self.auth)
-
-    def info(self, usecache=1):
-        """ return an Info structure with svn-provided information. """
-        info = usecache and cache.info.get(self)
-        if not info:
-            try:
-                output = self._svn('info')
-            except py.process.cmdexec.Error:
-                e = sys.exc_info()[1]
-                if e.err.find('Path is not a working copy directory') != -1:
-                    raise py.error.ENOENT(self, e.err)
-                elif e.err.find("is not under version control") != -1:
-                    raise py.error.ENOENT(self, e.err)
-                raise
-            # XXX SVN 1.3 has output on stderr instead of stdout (while it does
-            # return 0!), so a bit nasty, but we assume no output is output
-            # to stderr...
-            if (output.strip() == '' or
-                    output.lower().find('not a versioned resource') != -1):
-                raise py.error.ENOENT(self, output)
-            info = InfoSvnWCCommand(output)
-
-            # Can't reliably compare on Windows without access to win32api
-            if py.std.sys.platform != 'win32':
-                if info.path != self.localpath:
-                    raise py.error.ENOENT(self, "not a versioned resource:" +
-                            " %s != %s" % (info.path, self.localpath))
-            cache.info[self] = info
-        return info
-
-    def listdir(self, fil=None, sort=None):
-        """ return a sequence of Paths.
-
-        listdir will return either a tuple or a list of paths
-        depending on implementation choices.
-        """
-        if isinstance(fil, str):
-            fil = common.FNMatcher(fil)
-        # XXX unify argument naming with LocalPath.listdir
-        def notsvn(path):
-            return path.basename != '.svn'
-
-        paths = []
-        for localpath in self.localpath.listdir(notsvn):
-            p = self.__class__(localpath, auth=self.auth)
-            if notsvn(p) and (not fil or fil(p)):
-                paths.append(p)
-        self._sortlist(paths, sort)
-        return paths
-
-    def open(self, mode='r'):
-        """ return an opened file with the given mode. """
-        return open(self.strpath, mode)
-
-    def _getbyspec(self, spec):
-        return self.localpath._getbyspec(spec)
-
-    class Checkers(py.path.local.Checkers):
-        def __init__(self, path):
-            self.svnwcpath = path
-            self.path = path.localpath
-        def versioned(self):
-            try:
-                s = self.svnwcpath.info()
-            except (py.error.ENOENT, py.error.EEXIST):
-                return False
-            except py.process.cmdexec.Error:
-                e = sys.exc_info()[1]
-                if e.err.find('is not a working copy')!=-1:
-                    return False
-                if e.err.lower().find('not a versioned resource') != -1:
-                    return False
-                raise
-            else:
-                return True
-
-    def log(self, rev_start=None, rev_end=1, verbose=False):
-        """ return a list of LogEntry instances for this path.
-rev_start is the starting revision (defaulting to the first one).
-rev_end is the last revision (defaulting to HEAD).
-if verbose is True, then the LogEntry instances also know which files changed.
-"""
-        assert self.check()   # make it simpler for the pipe
-        rev_start = rev_start is None and "HEAD" or rev_start
-        rev_end = rev_end is None and "HEAD" or rev_end
-        if rev_start == "HEAD" and rev_end == 1:
-                rev_opt = ""
-        else:
-            rev_opt = "-r %s:%s" % (rev_start, rev_end)
-        verbose_opt = verbose and "-v" or ""
-        locale_env = fixlocale()
-        # some blather on stderr
-        auth_opt = self._makeauthoptions()
-        #stdin, stdout, stderr  = os.popen3(locale_env +
-        #                                   'svn log --xml %s %s %s "%s"' % (
-        #                                    rev_opt, verbose_opt, auth_opt,
-        #                                    self.strpath))
-        cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
-            rev_opt, verbose_opt, auth_opt, self.strpath)
-
-        popen = subprocess.Popen(cmd,
-                    stdout=subprocess.PIPE,
-                    stderr=subprocess.PIPE,
-                    shell=True,
-        )
-        stdout, stderr = popen.communicate()
-        stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
-        minidom,ExpatError = importxml()
-        try:
-            tree = minidom.parseString(stdout)
-        except ExpatError:
-            raise ValueError('no such revision')
-        result = []
-        for logentry in filter(None, tree.firstChild.childNodes):
-            if logentry.nodeType == logentry.ELEMENT_NODE:
-                result.append(LogEntry(logentry))
-        return result
-
-    def size(self):
-        """ Return the size of the file content of the Path. """
-        return self.info().size
-
-    def mtime(self):
-        """ Return the last modification time of the file. """
-        return self.info().mtime
-
-    def __hash__(self):
-        return hash((self.strpath, self.__class__, self.auth))
-
-
-class WCStatus:
-    attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
-                'deleted', 'prop_modified', 'unknown', 'update_available',
-                'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
-                )
-
-    def __init__(self, wcpath, rev=None, modrev=None, author=None):
-        self.wcpath = wcpath
-        self.rev = rev
-        self.modrev = modrev
-        self.author = author
-
-        for name in self.attrnames:
-            setattr(self, name, [])
-
-    def allpath(self, sort=True, **kw):
-        d = {}
-        for name in self.attrnames:
-            if name not in kw or kw[name]:
-                for path in getattr(self, name):
-                    d[path] = 1
-        l = d.keys()
-        if sort:
-            l.sort()
-        return l
-
-    # XXX a bit scary to assume there's always 2 spaces between username and
-    # path, however with win32 allowing spaces in user names there doesn't
-    # seem to be a more solid approach :(
-    _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
-
-    def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
-        """ return a new WCStatus object from data 's'
-        """
-        rootstatus = WCStatus(rootwcpath, rev, modrev, author)
-        update_rev = None
-        for line in data.split('\n'):
-            if not line.strip():
-                continue
-            #print "processing %r" % line
-            flags, rest = line[:8], line[8:]
-            # first column
-            c0,c1,c2,c3,c4,c5,x6,c7 = flags
-            #if '*' in line:
-            #    print "flags", repr(flags), "rest", repr(rest)
-
-            if c0 in '?XI':
-                fn = line.split(None, 1)[1]
-                if c0 == '?':
-                    wcpath = rootwcpath.join(fn, abs=1)
-                    rootstatus.unknown.append(wcpath)
-                elif c0 == 'X':
-                    wcpath = rootwcpath.__class__(
-                        rootwcpath.localpath.join(fn, abs=1),
-                        auth=rootwcpath.auth)
-                    rootstatus.external.append(wcpath)
-                elif c0 == 'I':
-                    wcpath = rootwcpath.join(fn, abs=1)
-                    rootstatus.ignored.append(wcpath)
-
-                continue
-
-            #elif c0 in '~!' or c4 == 'S':
-            #    raise NotImplementedError("received flag %r" % c0)
-
-            m = WCStatus._rex_status.match(rest)
-            if not m:
-                if c7 == '*':
-                    fn = rest.strip()
-                    wcpath = rootwcpath.join(fn, abs=1)
-                    rootstatus.update_available.append(wcpath)
-                    continue
-                if line.lower().find('against revision:')!=-1:
-                    update_rev = int(rest.split(':')[1].strip())
-                    continue
-                if line.lower().find('status on external') > -1:
-                    # XXX not sure what to do here... perhaps we want to
-                    # store some state instead of just continuing, as right
-                    # now it makes the top-level external get added twice
-                    # (once as external, once as 'normal' unchanged item)
-                    # because of the way SVN presents external items
-                    continue
-                # keep trying
-                raise ValueError("could not parse line %r" % line)
-            else:
-                rev, modrev, author, fn = m.groups()
-            wcpath = rootwcpath.join(fn, abs=1)
-            #assert wcpath.check()
-            if c0 == 'M':
-                assert wcpath.check(file=1), "didn't expect a directory with changed content here"
-                rootstatus.modified.append(wcpath)
-            elif c0 == 'A' or c3 == '+' :
-                rootstatus.added.append(wcpath)
-            elif c0 == 'D':
-                rootstatus.deleted.append(wcpath)
-            elif c0 == 'C':
-                rootstatus.conflict.append(wcpath)
-            elif c0 == '~':
-                rootstatus.kindmismatch.append(wcpath)
-            elif c0 == '!':
-                rootstatus.incomplete.append(wcpath)
-            elif c0 == 'R':
-                rootstatus.replaced.append(wcpath)
-            elif not c0.strip():
-                rootstatus.unchanged.append(wcpath)
-            else:
-                raise NotImplementedError("received flag %r" % c0)
-
-            if c1 == 'M':
-                rootstatus.prop_modified.append(wcpath)
-            # XXX do we cover all client versions here?
-            if c2 == 'L' or c5 == 'K':
-                rootstatus.locked.append(wcpath)
-            if c7 == '*':
-                rootstatus.update_available.append(wcpath)
-
-            if wcpath == rootwcpath:
-                rootstatus.rev = rev
-                rootstatus.modrev = modrev
-                rootstatus.author = author
-                if update_rev:
-                    rootstatus.update_rev = update_rev
-                continue
-        return rootstatus
-    fromstring = staticmethod(fromstring)
-
-class XMLWCStatus(WCStatus):
-    def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
-        """ parse 'data' (XML string as outputted by svn st) into a status obj
-        """
-        # XXX for externals, the path is shown twice: once
-        # with external information, and once with full info as if
-        # the item was a normal non-external... the current way of
-        # dealing with this issue is by ignoring it - this does make
-        # externals appear as external items as well as 'normal',
-        # unchanged ones in the status object so this is far from ideal
-        rootstatus = WCStatus(rootwcpath, rev, modrev, author)
-        update_rev = None
-        minidom, ExpatError = importxml()
-        try:
-            doc = minidom.parseString(data)
-        except ExpatError:
-            e = sys.exc_info()[1]
-            raise ValueError(str(e))
-        urevels = doc.getElementsByTagName('against')
-        if urevels:
-            rootstatus.update_rev = urevels[-1].getAttribute('revision')
-        for entryel in doc.getElementsByTagName('entry'):
-            path = entryel.getAttribute('path')
-            statusel = entryel.getElementsByTagName('wc-status')[0]
-            itemstatus = statusel.getAttribute('item')
-
-            if itemstatus == 'unversioned':
-                wcpath = rootwcpath.join(path, abs=1)
-                rootstatus.unknown.append(wcpath)
-                continue
-            elif itemstatus == 'external':
-                wcpath = rootwcpath.__class__(
-                    rootwcpath.localpath.join(path, abs=1),
-                    auth=rootwcpath.auth)
-                rootstatus.external.append(wcpath)
-                continue
-            elif itemstatus == 'ignored':
-                wcpath = rootwcpath.join(path, abs=1)
-                rootstatus.ignored.append(wcpath)
-                continue
-            elif itemstatus == 'incomplete':
-                wcpath = rootwcpath.join(path, abs=1)
-                rootstatus.incomplete.append(wcpath)
-                continue
-
-            rev = statusel.getAttribute('revision')
-            if itemstatus == 'added' or itemstatus == 'none':
-                rev = '0'
-                modrev = '?'
-                author = '?'
-                date = ''
-            elif itemstatus == "replaced":
-                pass
-            else:
-                #print entryel.toxml()
-                commitel = entryel.getElementsByTagName('commit')[0]
-                if commitel:
-                    modrev = commitel.getAttribute('revision')
-                    author = ''
-                    author_els = commitel.getElementsByTagName('author')
-                    if author_els:
-                        for c in author_els[0].childNodes:
-                            author += c.nodeValue
-                    date = ''
-                    for c in commitel.getElementsByTagName('date')[0]\
-                            .childNodes:
-                        date += c.nodeValue
-
-            wcpath = rootwcpath.join(path, abs=1)
-
-            assert itemstatus != 'modified' or wcpath.check(file=1), (
-                'did\'t expect a directory with changed content here')
-
-            itemattrname = {
-                'normal': 'unchanged',
-                'unversioned': 'unknown',
-                'conflicted': 'conflict',
-                'none': 'added',
-            }.get(itemstatus, itemstatus)
-
-            attr = getattr(rootstatus, itemattrname)
-            attr.append(wcpath)
-
-            propsstatus = statusel.getAttribute('props')
-            if propsstatus not in ('none', 'normal'):
-                rootstatus.prop_modified.append(wcpath)
-
-            if wcpath == rootwcpath:
-                rootstatus.rev = rev
-                rootstatus.modrev = modrev
-                rootstatus.author = author
-                rootstatus.date = date
-
-            # handle repos-status element (remote info)
-            rstatusels = entryel.getElementsByTagName('repos-status')
-            if rstatusels:
-                rstatusel = rstatusels[0]
-                ritemstatus = rstatusel.getAttribute('item')
-                if ritemstatus in ('added', 'modified'):
-                    rootstatus.update_available.append(wcpath)
-
-            lockels = entryel.getElementsByTagName('lock')
-            if len(lockels):
-                rootstatus.locked.append(wcpath)
-
-        return rootstatus
-    fromstring = staticmethod(fromstring)
-
-class InfoSvnWCCommand:
-    def __init__(self, output):
-        # Path: test
-        # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
-        # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
-        # Revision: 2151
-        # Node Kind: directory
-        # Schedule: normal
-        # Last Changed Author: hpk
-        # Last Changed Rev: 2100
-        # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
-        # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
-
-        d = {}
-        for line in output.split('\n'):
-            if not line.strip():
-                continue
-            key, value = line.split(':', 1)
-            key = key.lower().replace(' ', '')
-            value = value.strip()
-            d[key] = value
-        try:
-            self.url = d['url']
-        except KeyError:
-            raise  ValueError("Not a versioned resource")
-            #raise ValueError, "Not a versioned resource %r" % path
-        self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
-        try:
-            self.rev = int(d['revision'])
-        except KeyError:
-            self.rev = None
-
-        self.path = py.path.local(d['path'])
-        self.size = self.path.size()
-        if 'lastchangedrev' in d:
-            self.created_rev = int(d['lastchangedrev'])
-        if 'lastchangedauthor' in d:
-            self.last_author = d['lastchangedauthor']
-        if 'lastchangeddate' in d:
-            self.mtime = parse_wcinfotime(d['lastchangeddate'])
-            self.time = self.mtime * 1000000
-
-    def __eq__(self, other):
-        return self.__dict__ == other.__dict__
-
-def parse_wcinfotime(timestr):
-    """ Returns seconds since epoch, UTC. """
-    # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
-    m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
-    if not m:
-        raise ValueError("timestring %r does not match" % timestr)
-    timestr, timezone = m.groups()
-    # do not handle timezone specially, return value should be UTC
-    parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
-    return calendar.timegm(parsedtime)
-
-def make_recursive_propdict(wcroot,
-                            output,
-                            rex = re.compile("Properties on '(.*)':")):
-    """ Return a dictionary of path->PropListDict mappings. """
-    lines = [x for x in output.split('\n') if x]
-    pdict = {}
-    while lines:
-        line = lines.pop(0)
-        m = rex.match(line)
-        if not m:
-            raise ValueError("could not parse propget-line: %r" % line)
-        path = m.groups()[0]
-        wcpath = wcroot.join(path, abs=1)
-        propnames = []
-        while lines and lines[0].startswith('  '):
-            propname = lines.pop(0).strip()
-            propnames.append(propname)
-        assert propnames, "must have found properties!"
-        pdict[wcpath] = PropListDict(wcpath, propnames)
-    return pdict
-
-
-def importxml(cache=[]):
-    if cache:
-        return cache
-    from xml.dom import minidom
-    from xml.parsers.expat import ExpatError
-    cache.extend([minidom, ExpatError])
-    return cache
-
-class LogEntry:
-    def __init__(self, logentry):
-        self.rev = int(logentry.getAttribute('revision'))
-        for lpart in filter(None, logentry.childNodes):
-            if lpart.nodeType == lpart.ELEMENT_NODE:
-                if lpart.nodeName == 'author':
-                    self.author = lpart.firstChild.nodeValue
-                elif lpart.nodeName == 'msg':
-                    if lpart.firstChild:
-                        self.msg = lpart.firstChild.nodeValue
-                    else:
-                        self.msg = ''
-                elif lpart.nodeName == 'date':
-                    #2003-07-29T20:05:11.598637Z
-                    timestr = lpart.firstChild.nodeValue
-                    self.date = parse_apr_time(timestr)
-                elif lpart.nodeName == 'paths':
-                    self.strpaths = []
-                    for ppart in filter(None, lpart.childNodes):
-                        if ppart.nodeType == ppart.ELEMENT_NODE:
-                            self.strpaths.append(PathEntry(ppart))
-    def __repr__(self):
-        return '<Logentry rev=%d author=%s date=%s>' % (
-            self.rev, self.author, self.date)
-
-
+"""
+svn-Command based Implementation of a Subversion WorkingCopy Path.
+
+  SvnWCCommandPath  is the main class.
+
+"""
+
+import os, sys, time, re, calendar
+import py
+import subprocess
+from py._path import common
+
+#-----------------------------------------------------------
+# Caching latest repository revision and repo-paths
+# (getting them is slow with the current implementations)
+#
+# XXX make mt-safe
+#-----------------------------------------------------------
+
+class cache:
+    proplist = {}
+    info = {}
+    entries = {}
+    prop = {}
+
+class RepoEntry:
+    def __init__(self, url, rev, timestamp):
+        self.url = url
+        self.rev = rev
+        self.timestamp = timestamp
+
+    def __str__(self):
+        return "repo: %s;%s  %s" %(self.url, self.rev, self.timestamp)
+
+class RepoCache:
+    """ The Repocache manages discovered repository paths
+    and their revisions.  If inside a timeout the cache
+    will even return the revision of the root.
+    """
+    timeout = 20 # seconds after which we forget that we know the last revision
+
+    def __init__(self):
+        self.repos = []
+
+    def clear(self):
+        self.repos = []
+
+    def put(self, url, rev, timestamp=None):
+        if rev is None:
+            return
+        if timestamp is None:
+            timestamp = time.time()
+
+        for entry in self.repos:
+            if url == entry.url:
+                entry.timestamp = timestamp
+                entry.rev = rev
+                #print "set repo", entry
+                break
+        else:
+            entry = RepoEntry(url, rev, timestamp)
+            self.repos.append(entry)
+            #print "appended repo", entry
+
+    def get(self, url):
+        now = time.time()
+        for entry in self.repos:
+            if url.startswith(entry.url):
+                if now < entry.timestamp + self.timeout:
+                    #print "returning immediate Etrny", entry
+                    return entry.url, entry.rev
+                return entry.url, -1
+        return url, -1
+
+repositories = RepoCache()
+
+
+# svn support code
+
+ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
+if sys.platform == "win32":
+    ALLOWED_CHARS += ":"
+ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
+
+def _getsvnversion(ver=[]):
+    try:
+        return ver[0]
+    except IndexError:
+        v = py.process.cmdexec("svn -q --version")
+        v.strip()
+        v = '.'.join(v.split('.')[:2])
+        ver.append(v)
+        return v
+
+def _escape_helper(text):
+    text = str(text)
+    if py.std.sys.platform != 'win32':
+        text = str(text).replace('$', '\\$')
+    return text
+
+def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
+    for c in str(text):
+        if c.isalnum():
+            continue
+        if c in allowed_chars:
+            continue
+        return True
+    return False
+
+def checkbadchars(url):
+    # (hpk) not quite sure about the exact purpose, guido w.?
+    proto, uri = url.split("://", 1)
+    if proto != "file":
+        host, uripath = uri.split('/', 1)
+        # only check for bad chars in the non-protocol parts
+        if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
+            or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
+            raise ValueError("bad char in %r" % (url, ))
+
+
+#_______________________________________________________________
+
+class SvnPathBase(common.PathBase):
+    """ Base implementation for SvnPath implementations. """
+    sep = '/'
+
+    def _geturl(self):
+        return self.strpath
+    url = property(_geturl, None, None, "url of this svn-path.")
+
+    def __str__(self):
+        """ return a string representation (including rev-number) """
+        return self.strpath
+
+    def __hash__(self):
+        return hash(self.strpath)
+
+    def new(self, **kw):
+        """ create a modified version of this path. A 'rev' argument
+            indicates a new revision.
+            the following keyword arguments modify various path parts::
+
+              http://host.com/repo/path/file.ext
+              |-----------------------|          dirname
+                                        |------| basename
+                                        |--|     purebasename
+                                            |--| ext
+        """
+        obj = object.__new__(self.__class__)
+        obj.rev = kw.get('rev', self.rev)
+        obj.auth = kw.get('auth', self.auth)
+        dirname, basename, purebasename, ext = self._getbyspec(
+             "dirname,basename,purebasename,ext")
+        if 'basename' in kw:
+            if 'purebasename' in kw or 'ext' in kw:
+                raise ValueError("invalid specification %r" % kw)
+        else:
+            pb = kw.setdefault('purebasename', purebasename)
+            ext = kw.setdefault('ext', ext)
+            if ext and not ext.startswith('.'):
+                ext = '.' + ext
+            kw['basename'] = pb + ext
+
+        kw.setdefault('dirname', dirname)
+        kw.setdefault('sep', self.sep)
+        if kw['basename']:
+            obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
+        else:
+            obj.strpath = "%(dirname)s" % kw
+        return obj
+
+    def _getbyspec(self, spec):
+        """ get specified parts of the path.  'arg' is a string
+            with comma separated path parts. The parts are returned
+            in exactly the order of the specification.
+
+            you may specify the following parts:
+
+            http://host.com/repo/path/file.ext
+            |-----------------------|          dirname
+                                      |------| basename
+                                      |--|     purebasename
+                                          |--| ext
+        """
+        res = []
+        parts = self.strpath.split(self.sep)
+        for name in spec.split(','):
+            name = name.strip()
+            if name == 'dirname':
+                res.append(self.sep.join(parts[:-1]))
+            elif name == 'basename':
+                res.append(parts[-1])
+            else:
+                basename = parts[-1]
+                i = basename.rfind('.')
+                if i == -1:
+                    purebasename, ext = basename, ''
+                else:
+                    purebasename, ext = basename[:i], basename[i:]
+                if name == 'purebasename':
+                    res.append(purebasename)
+                elif name == 'ext':
+                    res.append(ext)
+                else:
+                    raise NameError("Don't know part %r" % name)
+        return res
+
+    def __eq__(self, other):
+        """ return true if path and rev attributes each match """
+        return (str(self) == str(other) and
+               (self.rev == other.rev or self.rev == other.rev))
+
+    def __ne__(self, other):
+        return not self == other
+
+    def join(self, *args):
+        """ return a new Path (with the same revision) which is composed
+            of the self Path followed by 'args' path components.
+        """
+        if not args:
+            return self
+
+        args = tuple([arg.strip(self.sep) for arg in args])
+        parts = (self.strpath, ) + args
+        newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
+        return newpath
+
+    def propget(self, name):
+        """ return the content of the given property. """
+        value = self._propget(name)
+        return value
+
+    def proplist(self):
+        """ list all property names. """
+        content = self._proplist()
+        return content
+
+    def size(self):
+        """ Return the size of the file content of the Path. """
+        return self.info().size
+
+    def mtime(self):
+        """ Return the last modification time of the file. """
+        return self.info().mtime
+
+    # shared help methods
+
+    def _escape(self, cmd):
+        return _escape_helper(cmd)
+
+
+    #def _childmaxrev(self):
+    #    """ return maximum revision number of childs (or self.rev if no childs) """
+    #    rev = self.rev
+    #    for name, info in self._listdir_nameinfo():
+    #        rev = max(rev, info.created_rev)
+    #    return rev
+
+    #def _getlatestrevision(self):
+    #    """ return latest repo-revision for this path. """
+    #    url = self.strpath
+    #    path = self.__class__(url, None)
+    #
+    #    # we need a long walk to find the root-repo and revision
+    #    while 1:
+    #        try:
+    #            rev = max(rev, path._childmaxrev())
+    #            previous = path
+    #            path = path.dirpath()
+    #        except (IOError, process.cmdexec.Error):
+    #            break
+    #    if rev is None:
+    #        raise IOError, "could not determine newest repo revision for %s" % self
+    #    return rev
+
+    class Checkers(common.Checkers):
+        def dir(self):
+            try:
+                return self.path.info().kind == 'dir'
+            except py.error.Error:
+                return self._listdirworks()
+
+        def _listdirworks(self):
+            try:
+                self.path.listdir()
+            except py.error.ENOENT:
+                return False
+            else:
+                return True
+
+        def file(self):
+            try:
+                return self.path.info().kind == 'file'
+            except py.error.ENOENT:
+                return False
+
+        def exists(self):
+            try:
+                return self.path.info()
+            except py.error.ENOENT:
+                return self._listdirworks()
+
+def parse_apr_time(timestr):
+    i = timestr.rfind('.')
+    if i == -1:
+        raise ValueError("could not parse %s" % timestr)
+    timestr = timestr[:i]
+    parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
+    return time.mktime(parsedtime)
+
+class PropListDict(dict):
+    """ a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
+    def __init__(self, path, keynames):
+        dict.__init__(self, [(x, None) for x in keynames])
+        self.path = path
+
+    def __getitem__(self, key):
+        value = dict.__getitem__(self, key)
+        if value is None:
+            value = self.path.propget(key)
+            dict.__setitem__(self, key, value)
+        return value
+
+def fixlocale():
+    if sys.platform != 'win32':
+        return 'LC_ALL=C '
+    return ''
+
+# some nasty chunk of code to solve path and url conversion and quoting issues
+ILLEGAL_CHARS = '* | \\ / : < > ? \t \n \x0b \x0c \r'.split(' ')
+if os.sep in ILLEGAL_CHARS:
+    ILLEGAL_CHARS.remove(os.sep)
+ISWINDOWS = sys.platform == 'win32'
+_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
+def _check_path(path):
+    illegal = ILLEGAL_CHARS[:]
+    sp = path.strpath
+    if ISWINDOWS:
+        illegal.remove(':')
+        if not _reg_allow_disk.match(sp):
+            raise ValueError('path may not contain a colon (:)')
+    for char in sp:
+        if char not in string.printable or char in illegal:
+            raise ValueError('illegal character %r in path' % (char,))
+
+def path_to_fspath(path, addat=True):
+    _check_path(path)
+    sp = path.strpath
+    if addat and path.rev != -1:
+        sp = '%s@%s' % (sp, path.rev)
+    elif addat:
+        sp = '%s@HEAD' % (sp,)
+    return sp
+
+def url_from_path(path):
+    fspath = path_to_fspath(path, False)
+    quote = py.std.urllib.quote
+    if ISWINDOWS:
+        match = _reg_allow_disk.match(fspath)
+        fspath = fspath.replace('\\', '/')
+        if match.group(1):
+            fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
+                                quote(fspath[len(match.group(1)):]))
+        else:
+            fspath = quote(fspath)
+    else:
+        fspath = quote(fspath)
+    if path.rev != -1:
+        fspath = '%s@%s' % (fspath, path.rev)
+    else:
+        fspath = '%s@HEAD' % (fspath,)
+    return 'file://%s' % (fspath,)
+
+class SvnAuth(object):
+    """ container for auth information for Subversion """
+    def __init__(self, username, password, cache_auth=True, interactive=True):
+        self.username = username
+        self.password = password
+        self.cache_auth = cache_auth
+        self.interactive = interactive
+
+    def makecmdoptions(self):
+        uname = self.username.replace('"', '\\"')
+        passwd = self.password.replace('"', '\\"')
+        ret = []
+        if uname:
+            ret.append('--username="%s"' % (uname,))
+        if passwd:
+            ret.append('--password="%s"' % (passwd,))
+        if not self.cache_auth:
+            ret.append('--no-auth-cache')
+        if not self.interactive:
+            ret.append('--non-interactive')
+        return ' '.join(ret)
+
+    def __str__(self):
+        return "<SvnAuth username=%s ...>" %(self.username,)
+
+rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
+
+class SvnWCCommandPath(common.PathBase):
+    """ path implementation offering access/modification to svn working copies.
+        It has methods similar to the functions in os.path and similar to the
+        commands of the svn client.
+    """
+    sep = os.sep
+
+    def __new__(cls, wcpath=None, auth=None):
+        self = object.__new__(cls)
+        if isinstance(wcpath, cls):
+            if wcpath.__class__ == cls:
+                return wcpath
+            wcpath = wcpath.localpath
+        if _check_for_bad_chars(str(wcpath),
+                                          ALLOWED_CHARS):
+            raise ValueError("bad char in wcpath %s" % (wcpath, ))
+        self.localpath = py.path.local(wcpath)
+        self.auth = auth
+        return self
+
+    strpath = property(lambda x: str(x.localpath), None, None, "string path")
+    rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
+
+    def __eq__(self, other):
+        return self.localpath == getattr(other, 'localpath', None)
+
+    def _geturl(self):
+        if getattr(self, '_url', None) is None:
+            info = self.info()
+            self._url = info.url #SvnPath(info.url, info.rev)
+        assert isinstance(self._url, py.builtin._basestring)
+        return self._url
+
+    url = property(_geturl, None, None, "url of this WC item")
+
+    def _escape(self, cmd):
+        return _escape_helper(cmd)
+
+    def dump(self, obj):
+        """ pickle object into path location"""
+        return self.localpath.dump(obj)
+
+    def svnurl(self):
+        """ return current SvnPath for this WC-item. """
+        info = self.info()
+        return py.path.svnurl(info.url)
+
+    def __repr__(self):
+        return "svnwc(%r)" % (self.strpath) # , self._url)
+
+    def __str__(self):
+        return str(self.localpath)
+
+    def _makeauthoptions(self):
+        if self.auth is None:
+            return ''
+        return self.auth.makecmdoptions()
+
+    def _authsvn(self, cmd, args=None):
+        args = args and list(args) or []
+        args.append(self._makeauthoptions())
+        return self._svn(cmd, *args)
+
+    def _svn(self, cmd, *args):
+        l = ['svn %s' % cmd]
+        args = [self._escape(item) for item in args]
+        l.extend(args)
+        l.append('"%s"' % self._escape(self.strpath))
+        # try fixing the locale because we can't otherwise parse
+        string = fixlocale() + " ".join(l)
+        try:
+            try:
+                key = 'LC_MESSAGES'
+                hold = os.environ.get(key)
+                os.environ[key] = 'C'
+                out = py.process.cmdexec(string)
+            finally:
+                if hold:
+                    os.environ[key] = hold
+                else:
+                    del os.environ[key]
+        except py.process.cmdexec.Error:
+            e = sys.exc_info()[1]
+            strerr = e.err.lower()
+            if strerr.find('not found') != -1:
+                raise py.error.ENOENT(self)
+            elif strerr.find("E200009:") != -1:
+                raise py.error.ENOENT(self)
+            if (strerr.find('file exists') != -1 or
+                strerr.find('file already exists') != -1 or
+                strerr.find('w150002:') != -1 or
+                strerr.find("can't create directory") != -1):
+                raise py.error.EEXIST(strerr) #self)
+            raise
+        return out
+
+    def switch(self, url):
+        """ switch to given URL. """
+        self._authsvn('switch', [url])
+
+    def checkout(self, url=None, rev=None):
+        """ checkout from url to local wcpath. """
+        args = []
+        if url is None:
+            url = self.url
+        if rev is None or rev == -1:
+            if (py.std.sys.platform != 'win32' and
+                    _getsvnversion() == '1.3'):
+                url += "@HEAD"
+        else:
+            if _getsvnversion() == '1.3':
+                url += "@%d" % rev
+            else:
+                args.append('-r' + str(rev))
+        args.append(url)
+        self._authsvn('co', args)
+
+    def update(self, rev='HEAD', interactive=True):
+        """ update working copy item to given revision. (None -> HEAD). """
+        opts = ['-r', rev]
+        if not interactive:
+            opts.append("--non-interactive")
+        self._authsvn('up', opts)
+
+    def write(self, content, mode='w'):
+        """ write content into local filesystem wc. """
+        self.localpath.write(content, mode)
+
+    def dirpath(self, *args):
+        """ return the directory Path of the current Path. """
+        return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
+
+    def _ensuredirs(self):
+        parent = self.dirpath()
+        if parent.check(dir=0):
+            parent._ensuredirs()
+        if self.check(dir=0):
+            self.mkdir()
+        return self
+
+    def ensure(self, *args, **kwargs):
+        """ ensure that an args-joined path exists (by default as
+            a file). if you specify a keyword argument 'directory=True'
+            then the path is forced  to be a directory path.
+        """
+        p = self.join(*args)
+        if p.check():
+            if p.check(versioned=False):
+                p.add()
+            return p
+        if kwargs.get('dir', 0):
+            return p._ensuredirs()
+        parent = p.dirpath()
+        parent._ensuredirs()
+        p.write("")
+        p.add()
+        return p
+
+    def mkdir(self, *args):
+        """ create & return the directory joined with args. """
+        if args:
+            return self.join(*args).mkdir()
+        else:
+            self._svn('mkdir')
+            return self
+
+    def add(self):
+        """ add ourself to svn """
+        self._svn('add')
+
+    def remove(self, rec=1, force=1):
+        """ remove a file or a directory tree. 'rec'ursive is
+            ignored and considered always true (because of
+            underlying svn semantics.
+        """
+        assert rec, "svn cannot remove non-recursively"
+        if not self.check(versioned=True):
+            # not added to svn (anymore?), just remove
+            py.path.local(self).remove()
+            return
+        flags = []
+        if force:
+            flags.append('--force')
+        self._svn('remove', *flags)
+
+    def copy(self, target):
+        """ copy path to target."""
+        py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
+
+    def rename(self, target):
+        """ rename this path to target. """
+        py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
+
+    def lock(self):
+        """ set a lock (exclusive) on the resource """
+        out = self._authsvn('lock').strip()
+        if not out:
+            # warning or error, raise exception
+            raise ValueError("unknown error in svn lock command")
+
+    def unlock(self):
+        """ unset a previously set lock """
+        out = self._authsvn('unlock').strip()
+        if out.startswith('svn:'):
+            # warning or error, raise exception
+            raise Exception(out[4:])
+
+    def cleanup(self):
+        """ remove any locks from the resource """
+        # XXX should be fixed properly!!!
+        try:
+            self.unlock()
+        except:
+            pass
+
+    def status(self, updates=0, rec=0, externals=0):
+        """ return (collective) Status object for this file. """
+        # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
+        #             2201     2192        jum   test
+        # XXX
+        if externals:
+            raise ValueError("XXX cannot perform status() "
+                             "on external items yet")
+        else:
+            #1.2 supports: externals = '--ignore-externals'
+            externals = ''
+        if rec:
+            rec= ''
+        else:
+            rec = '--non-recursive'
+
+        # XXX does not work on all subversion versions
+        #if not externals:
+        #    externals = '--ignore-externals'
+
+        if updates:
+            updates = '-u'
+        else:
+            updates = ''
+
+        try:
+            cmd = 'status -v --xml --no-ignore %s %s %s' % (
+                    updates, rec, externals)
+            out = self._authsvn(cmd)
+        except py.process.cmdexec.Error:
+            cmd = 'status -v --no-ignore %s %s %s' % (
+                    updates, rec, externals)
+            out = self._authsvn(cmd)
+            rootstatus = WCStatus(self).fromstring(out, self)
+        else:
+            rootstatus = XMLWCStatus(self).fromstring(out, self)
+        return rootstatus
+
+    def diff(self, rev=None):
+        """ return a diff of the current path against revision rev (defaulting
+            to the last one).
+        """
+        args = []
+        if rev is not None:
+            args.append("-r %d" % rev)
+        out = self._authsvn('diff', args)
+        return out
+
+    def blame(self):
+        """ return a list of tuples of three elements:
+            (revision, commiter, line)
+        """
+        out = self._svn('blame')
+        result = []
+        blamelines = out.splitlines()
+        reallines = py.path.svnurl(self.url).readlines()
+        for i, (blameline, line) in enumerate(
+                zip(blamelines, reallines)):
+            m = rex_blame.match(blameline)
+            if not m:
+                raise ValueError("output line %r of svn blame does not match "
+                                 "expected format" % (line, ))
+            rev, name, _ = m.groups()
+            result.append((int(rev), name, line))
+        return result
+
+    _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
+    def commit(self, msg='', rec=1):
+        """ commit with support for non-recursive commits """
+        # XXX i guess escaping should be done better here?!?
+        cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
+        if not rec:
+            cmd += ' -N'
+        out = self._authsvn(cmd)
+        try:
+            del cache.info[self]
+        except KeyError:
+            pass
+        if out:
+            m = self._rex_commit.match(out)
+            return int(m.group(1))
+
+    def propset(self, name, value, *args):
+        """ set property name to value on this path. """
+        d = py.path.local.mkdtemp()
+        try:
+            p = d.join('value')
+            p.write(value)
+            self._svn('propset', name, '--file', str(p), *args)
+        finally:
+            d.remove()
+
+    def propget(self, name):
+        """ get property name on this path. """
+        res = self._svn('propget', name)
+        return res[:-1] # strip trailing newline
+
+    def propdel(self, name):
+        """ delete property name on this path. """
+        res = self._svn('propdel', name)
+        return res[:-1] # strip trailing newline
+
+    def proplist(self, rec=0):
+        """ return a mapping of property names to property values.
+If rec is True, then return a dictionary mapping sub-paths to such mappings.
+"""
+        if rec:
+            res = self._svn('proplist -R')
+            return make_recursive_propdict(self, res)
+        else:
+            res = self._svn('proplist')
+            lines = res.split('\n')
+            lines = [x.strip() for x in lines[1:]]
+            return PropListDict(self, lines)
+
+    def revert(self, rec=0):
+        """ revert the local changes of this path. if rec is True, do so
+recursively. """
+        if rec:
+            result = self._svn('revert -R')
+        else:
+            result = self._svn('revert')
+        return result
+
+    def new(self, **kw):
+        """ create a modified version of this path. A 'rev' argument
+            indicates a new revision.
+            the following keyword arguments modify various path parts:
+
+              http://host.com/repo/path/file.ext
+              |-----------------------|          dirname
+                                        |------| basename
+                                        |--|     purebasename
+                                            |--| ext
+        """
+        if kw:
+            localpath = self.localpath.new(**kw)
+        else:
+            localpath = self.localpath
+        return self.__class__(localpath, auth=self.auth)
+
+    def join(self, *args, **kwargs):
+        """ return a new Path (with the same revision) which is composed
+            of the self Path followed by 'args' path components.
+        """
+        if not args:
+            return self
+        localpath = self.localpath.join(*args, **kwargs)
+        return self.__class__(localpath, auth=self.auth)
+
+    def info(self, usecache=1):
+        """ return an Info structure with svn-provided information. """
+        info = usecache and cache.info.get(self)
+        if not info:
+            try:
+                output = self._svn('info')
+            except py.process.cmdexec.Error:
+                e = sys.exc_info()[1]
+                if e.err.find('Path is not a working copy directory') != -1:
+                    raise py.error.ENOENT(self, e.err)
+                elif e.err.find("is not under version control") != -1:
+                    raise py.error.ENOENT(self, e.err)
+                raise
+            # XXX SVN 1.3 has output on stderr instead of stdout (while it does
+            # return 0!), so a bit nasty, but we assume no output is output
+            # to stderr...
+            if (output.strip() == '' or
+                    output.lower().find('not a versioned resource') != -1):
+                raise py.error.ENOENT(self, output)
+            info = InfoSvnWCCommand(output)
+
+            # Can't reliably compare on Windows without access to win32api
+            if py.std.sys.platform != 'win32':
+                if info.path != self.localpath:
+                    raise py.error.ENOENT(self, "not a versioned resource:" +
+                            " %s != %s" % (info.path, self.localpath))
+            cache.info[self] = info
+        return info
+
+    def listdir(self, fil=None, sort=None):
+        """ return a sequence of Paths.
+
+        listdir will return either a tuple or a list of paths
+        depending on implementation choices.
+        """
+        if isinstance(fil, str):
+            fil = common.FNMatcher(fil)
+        # XXX unify argument naming with LocalPath.listdir
+        def notsvn(path):
+            return path.basename != '.svn'
+
+        paths = []
+        for localpath in self.localpath.listdir(notsvn):
+            p = self.__class__(localpath, auth=self.auth)
+            if notsvn(p) and (not fil or fil(p)):
+                paths.append(p)
+        self._sortlist(paths, sort)
+        return paths
+
+    def open(self, mode='r'):
+        """ return an opened file with the given mode. """
+        return open(self.strpath, mode)
+
+    def _getbyspec(self, spec):
+        return self.localpath._getbyspec(spec)
+
+    class Checkers(py.path.local.Checkers):
+        def __init__(self, path):
+            self.svnwcpath = path
+            self.path = path.localpath
+        def versioned(self):
+            try:
+                s = self.svnwcpath.info()
+            except (py.error.ENOENT, py.error.EEXIST):
+                return False
+            except py.process.cmdexec.Error:
+                e = sys.exc_info()[1]
+                if e.err.find('is not a working copy')!=-1:
+                    return False
+                if e.err.lower().find('not a versioned resource') != -1:
+                    return False
+                raise
+            else:
+                return True
+
+    def log(self, rev_start=None, rev_end=1, verbose=False):
+        """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+        assert self.check()   # make it simpler for the pipe
+        rev_start = rev_start is None and "HEAD" or rev_start
+        rev_end = rev_end is None and "HEAD" or rev_end
+        if rev_start == "HEAD" and rev_end == 1:
+                rev_opt = ""
+        else:
+            rev_opt = "-r %s:%s" % (rev_start, rev_end)
+        verbose_opt = verbose and "-v" or ""
+        locale_env = fixlocale()
+        # some blather on stderr
+        auth_opt = self._makeauthoptions()
+        #stdin, stdout, stderr  = os.popen3(locale_env +
+        #                                   'svn log --xml %s %s %s "%s"' % (
+        #                                    rev_opt, verbose_opt, auth_opt,
+        #                                    self.strpath))
+        cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
+            rev_opt, verbose_opt, auth_opt, self.strpath)
+
+        popen = subprocess.Popen(cmd,
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.PIPE,
+                    shell=True,
+        )
+        stdout, stderr = popen.communicate()
+        stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+        minidom,ExpatError = importxml()
+        try:
+            tree = minidom.parseString(stdout)
+        except ExpatError:
+            raise ValueError('no such revision')
+        result = []
+        for logentry in filter(None, tree.firstChild.childNodes):
+            if logentry.nodeType == logentry.ELEMENT_NODE:
+                result.append(LogEntry(logentry))
+        return result
+
+    def size(self):
+        """ Return the size of the file content of the Path. """
+        return self.info().size
+
+    def mtime(self):
+        """ Return the last modification time of the file. """
+        return self.info().mtime
+
+    def __hash__(self):
+        return hash((self.strpath, self.__class__, self.auth))
+
+
+class WCStatus:
+    attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
+                'deleted', 'prop_modified', 'unknown', 'update_available',
+                'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
+                )
+
+    def __init__(self, wcpath, rev=None, modrev=None, author=None):
+        self.wcpath = wcpath
+        self.rev = rev
+        self.modrev = modrev
+        self.author = author
+
+        for name in self.attrnames:
+            setattr(self, name, [])
+
+    def allpath(self, sort=True, **kw):
+        d = {}
+        for name in self.attrnames:
+            if name not in kw or kw[name]:
+                for path in getattr(self, name):
+                    d[path] = 1
+        l = d.keys()
+        if sort:
+            l.sort()
+        return l
+
+    # XXX a bit scary to assume there's always 2 spaces between username and
+    # path, however with win32 allowing spaces in user names there doesn't
+    # seem to be a more solid approach :(
+    _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
+
+    def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+        """ return a new WCStatus object from data 's'
+        """
+        rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+        update_rev = None
+        for line in data.split('\n'):
+            if not line.strip():
+                continue
+            #print "processing %r" % line
+            flags, rest = line[:8], line[8:]
+            # first column
+            c0,c1,c2,c3,c4,c5,x6,c7 = flags
+            #if '*' in line:
+            #    print "flags", repr(flags), "rest", repr(rest)
+
+            if c0 in '?XI':
+                fn = line.split(None, 1)[1]
+                if c0 == '?':
+                    wcpath = rootwcpath.join(fn, abs=1)
+                    rootstatus.unknown.append(wcpath)
+                elif c0 == 'X':
+                    wcpath = rootwcpath.__class__(
+                        rootwcpath.localpath.join(fn, abs=1),
+                        auth=rootwcpath.auth)
+                    rootstatus.external.append(wcpath)
+                elif c0 == 'I':
+                    wcpath = rootwcpath.join(fn, abs=1)
+                    rootstatus.ignored.append(wcpath)
+
+                continue
+
+            #elif c0 in '~!' or c4 == 'S':
+            #    raise NotImplementedError("received flag %r" % c0)
+
+            m = WCStatus._rex_status.match(rest)
+            if not m:
+                if c7 == '*':
+                    fn = rest.strip()
+                    wcpath = rootwcpath.join(fn, abs=1)
+                    rootstatus.update_available.append(wcpath)
+                    continue
+                if line.lower().find('against revision:')!=-1:
+                    update_rev = int(rest.split(':')[1].strip())
+                    continue
+                if line.lower().find('status on external') > -1:
+                    # XXX not sure what to do here... perhaps we want to
+                    # store some state instead of just continuing, as right
+                    # now it makes the top-level external get added twice
+                    # (once as external, once as 'normal' unchanged item)
+                    # because of the way SVN presents external items
+                    continue
+                # keep trying
+                raise ValueError("could not parse line %r" % line)
+            else:
+                rev, modrev, author, fn = m.groups()
+            wcpath = rootwcpath.join(fn, abs=1)
+            #assert wcpath.check()
+            if c0 == 'M':
+                assert wcpath.check(file=1), "didn't expect a directory with changed content here"
+                rootstatus.modified.append(wcpath)
+            elif c0 == 'A' or c3 == '+' :
+                rootstatus.added.append(wcpath)
+            elif c0 == 'D':
+                rootstatus.deleted.append(wcpath)
+            elif c0 == 'C':
+                rootstatus.conflict.append(wcpath)
+            elif c0 == '~':
+                rootstatus.kindmismatch.append(wcpath)
+            elif c0 == '!':
+                rootstatus.incomplete.append(wcpath)
+            elif c0 == 'R':
+                rootstatus.replaced.append(wcpath)
+            elif not c0.strip():
+                rootstatus.unchanged.append(wcpath)
+            else:
+                raise NotImplementedError("received flag %r" % c0)
+
+            if c1 == 'M':
+                rootstatus.prop_modified.append(wcpath)
+            # XXX do we cover all client versions here?
+            if c2 == 'L' or c5 == 'K':
+                rootstatus.locked.append(wcpath)
+            if c7 == '*':
+                rootstatus.update_available.append(wcpath)
+
+            if wcpath == rootwcpath:
+                rootstatus.rev = rev
+                rootstatus.modrev = modrev
+                rootstatus.author = author
+                if update_rev:
+                    rootstatus.update_rev = update_rev
+                continue
+        return rootstatus
+    fromstring = staticmethod(fromstring)
+
+class XMLWCStatus(WCStatus):
+    def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+        """ parse 'data' (XML string as outputted by svn st) into a status obj
+        """
+        # XXX for externals, the path is shown twice: once
+        # with external information, and once with full info as if
+        # the item was a normal non-external... the current way of
+        # dealing with this issue is by ignoring it - this does make
+        # externals appear as external items as well as 'normal',
+        # unchanged ones in the status object so this is far from ideal
+        rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+        update_rev = None
+        minidom, ExpatError = importxml()
+        try:
+            doc = minidom.parseString(data)
+        except ExpatError:
+            e = sys.exc_info()[1]
+            raise ValueError(str(e))
+        urevels = doc.getElementsByTagName('against')
+        if urevels:
+            rootstatus.update_rev = urevels[-1].getAttribute('revision')
+        for entryel in doc.getElementsByTagName('entry'):
+            path = entryel.getAttribute('path')
+            statusel = entryel.getElementsByTagName('wc-status')[0]
+            itemstatus = statusel.getAttribute('item')
+
+            if itemstatus == 'unversioned':
+                wcpath = rootwcpath.join(path, abs=1)
+                rootstatus.unknown.append(wcpath)
+                continue
+            elif itemstatus == 'external':
+                wcpath = rootwcpath.__class__(
+                    rootwcpath.localpath.join(path, abs=1),
+                    auth=rootwcpath.auth)
+                rootstatus.external.append(wcpath)
+                continue
+            elif itemstatus == 'ignored':
+                wcpath = rootwcpath.join(path, abs=1)
+                rootstatus.ignored.append(wcpath)
+                continue
+            elif itemstatus == 'incomplete':
+                wcpath = rootwcpath.join(path, abs=1)
+                rootstatus.incomplete.append(wcpath)
+                continue
+
+            rev = statusel.getAttribute('revision')
+            if itemstatus == 'added' or itemstatus == 'none':
+                rev = '0'
+                modrev = '?'
+                author = '?'
+                date = ''
+            elif itemstatus == "replaced":
+                pass
+            else:
+                #print entryel.toxml()
+                commitel = entryel.getElementsByTagName('commit')[0]
+                if commitel:
+                    modrev = commitel.getAttribute('revision')
+                    author = ''
+                    author_els = commitel.getElementsByTagName('author')
+                    if author_els:
+                        for c in author_els[0].childNodes:
+                            author += c.nodeValue
+                    date = ''
+                    for c in commitel.getElementsByTagName('date')[0]\
+                            .childNodes:
+                        date += c.nodeValue
+
+            wcpath = rootwcpath.join(path, abs=1)
+
+            assert itemstatus != 'modified' or wcpath.check(file=1), (
+                'did\'t expect a directory with changed content here')
+
+            itemattrname = {
+                'normal': 'unchanged',
+                'unversioned': 'unknown',
+                'conflicted': 'conflict',
+                'none': 'added',
+            }.get(itemstatus, itemstatus)
+
+            attr = getattr(rootstatus, itemattrname)
+            attr.append(wcpath)
+
+            propsstatus = statusel.getAttribute('props')
+            if propsstatus not in ('none', 'normal'):
+                rootstatus.prop_modified.append(wcpath)
+
+            if wcpath == rootwcpath:
+                rootstatus.rev = rev
+                rootstatus.modrev = modrev
+                rootstatus.author = author
+                rootstatus.date = date
+
+            # handle repos-status element (remote info)
+            rstatusels = entryel.getElementsByTagName('repos-status')
+            if rstatusels:
+                rstatusel = rstatusels[0]
+                ritemstatus = rstatusel.getAttribute('item')
+                if ritemstatus in ('added', 'modified'):
+                    rootstatus.update_available.append(wcpath)
+
+            lockels = entryel.getElementsByTagName('lock')
+            if len(lockels):
+                rootstatus.locked.append(wcpath)
+
+        return rootstatus
+    fromstring = staticmethod(fromstring)
+
+class InfoSvnWCCommand:
+    def __init__(self, output):
+        # Path: test
+        # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
+        # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+        # Revision: 2151
+        # Node Kind: directory
+        # Schedule: normal
+        # Last Changed Author: hpk
+        # Last Changed Rev: 2100
+        # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+        # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
+
+        d = {}
+        for line in output.split('\n'):
+            if not line.strip():
+                continue
+            key, value = line.split(':', 1)
+            key = key.lower().replace(' ', '')
+            value = value.strip()
+            d[key] = value
+        try:
+            self.url = d['url']
+        except KeyError:
+            raise  ValueError("Not a versioned resource")
+            #raise ValueError, "Not a versioned resource %r" % path
+        self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
+        try:
+            self.rev = int(d['revision'])
+        except KeyError:
+            self.rev = None
+
+        self.path = py.path.local(d['path'])
+        self.size = self.path.size()
+        if 'lastchangedrev' in d:
+            self.created_rev = int(d['lastchangedrev'])
+        if 'lastchangedauthor' in d:
+            self.last_author = d['lastchangedauthor']
+        if 'lastchangeddate' in d:
+            self.mtime = parse_wcinfotime(d['lastchangeddate'])
+            self.time = self.mtime * 1000000
+
+    def __eq__(self, other):
+        return self.__dict__ == other.__dict__
+
+def parse_wcinfotime(timestr):
+    """ Returns seconds since epoch, UTC. """
+    # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+    m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
+    if not m:
+        raise ValueError("timestring %r does not match" % timestr)
+    timestr, timezone = m.groups()
+    # do not handle timezone specially, return value should be UTC
+    parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
+    return calendar.timegm(parsedtime)
+
+def make_recursive_propdict(wcroot,
+                            output,
+                            rex = re.compile("Properties on '(.*)':")):
+    """ Return a dictionary of path->PropListDict mappings. """
+    lines = [x for x in output.split('\n') if x]
+    pdict = {}
+    while lines:
+        line = lines.pop(0)
+        m = rex.match(line)
+        if not m:
+            raise ValueError("could not parse propget-line: %r" % line)
+        path = m.groups()[0]
+        wcpath = wcroot.join(path, abs=1)
+        propnames = []
+        while lines and lines[0].startswith('  '):
+            propname = lines.pop(0).strip()
+            propnames.append(propname)
+        assert propnames, "must have found properties!"
+        pdict[wcpath] = PropListDict(wcpath, propnames)
+    return pdict
+
+
+def importxml(cache=[]):
+    if cache:
+        return cache
+    from xml.dom import minidom
+    from xml.parsers.expat import ExpatError
+    cache.extend([minidom, ExpatError])
+    return cache
+
+class LogEntry:
+    def __init__(self, logentry):
+        self.rev = int(logentry.getAttribute('revision'))
+        for lpart in filter(None, logentry.childNodes):
+            if lpart.nodeType == lpart.ELEMENT_NODE:
+                if lpart.nodeName == 'author':
+                    self.author = lpart.firstChild.nodeValue
+                elif lpart.nodeName == 'msg':
+                    if lpart.firstChild:
+                        self.msg = lpart.firstChild.nodeValue
+                    else:
+                        self.msg = ''
+                elif lpart.nodeName == 'date':
+                    #2003-07-29T20:05:11.598637Z
+                    timestr = lpart.firstChild.nodeValue
+                    self.date = parse_apr_time(timestr)
+                elif lpart.nodeName == 'paths':
+                    self.strpaths = []
+                    for ppart in filter(None, lpart.childNodes):
+                        if ppart.nodeType == ppart.ELEMENT_NODE:
+                            self.strpaths.append(PathEntry(ppart))
+    def __repr__(self):
+        return '<Logentry rev=%d author=%s date=%s>' % (
+            self.rev, self.author, self.date)
+
+
--- a/third_party/python/py/py/_process/__init__.py
+++ b/third_party/python/py/py/_process/__init__.py
@@ -1,1 +1,1 @@
-""" high-level sub-process handling """
+""" high-level sub-process handling """
--- a/third_party/python/py/py/_process/cmdexec.py
+++ b/third_party/python/py/py/_process/cmdexec.py
@@ -1,49 +1,49 @@
-import sys
-import subprocess
-import py
-from subprocess import Popen, PIPE
-
-def cmdexec(cmd):
-    """ return unicode output of executing 'cmd' in a separate process.
-
-    raise cmdexec.Error exeception if the command failed.
-    the exception will provide an 'err' attribute containing
-    the error-output from the command.
-    if the subprocess module does not provide a proper encoding/unicode strings
-    sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
-    """
-    process = subprocess.Popen(cmd, shell=True,
-            universal_newlines=True,
-            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = process.communicate()
-    if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
-        try:
-            default_encoding = sys.getdefaultencoding() # jython may not have it
-        except AttributeError:
-            default_encoding = sys.stdout.encoding or 'UTF-8'
-        out = unicode(out, process.stdout.encoding or default_encoding)
-        err = unicode(err, process.stderr.encoding or default_encoding)
-    status = process.poll()
-    if status:
-        raise ExecutionFailed(status, status, cmd, out, err)
-    return out
-
-class ExecutionFailed(py.error.Error):
-    def __init__(self, status, systemstatus, cmd, out, err):
-        Exception.__init__(self)
-        self.status = status
-        self.systemstatus = systemstatus
-        self.cmd = cmd
-        self.err = err
-        self.out = out
-
-    def __str__(self):
-        return "ExecutionFailed: %d  %s\n%s" %(self.status, self.cmd, self.err)
-
-# export the exception under the name 'py.process.cmdexec.Error'
-cmdexec.Error = ExecutionFailed
-try:
-    ExecutionFailed.__module__ = 'py.process.cmdexec'
-    ExecutionFailed.__name__ = 'Error'
-except (AttributeError, TypeError):
-    pass
+import sys
+import subprocess
+import py
+from subprocess import Popen, PIPE
+
+def cmdexec(cmd):
+    """ return unicode output of executing 'cmd' in a separate process.
+
+    raise cmdexec.Error exeception if the command failed.
+    the exception will provide an 'err' attribute containing
+    the error-output from the command.
+    if the subprocess module does not provide a proper encoding/unicode strings
+    sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
+    """
+    process = subprocess.Popen(cmd, shell=True,
+            universal_newlines=True,
+            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    out, err = process.communicate()
+    if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
+        try:
+            default_encoding = sys.getdefaultencoding() # jython may not have it
+        except AttributeError:
+            default_encoding = sys.stdout.encoding or 'UTF-8'
+        out = unicode(out, process.stdout.encoding or default_encoding)
+        err = unicode(err, process.stderr.encoding or default_encoding)
+    status = process.poll()
+    if status:
+        raise ExecutionFailed(status, status, cmd, out, err)
+    return out
+
+class ExecutionFailed(py.error.Error):
+    def __init__(self, status, systemstatus, cmd, out, err):
+        Exception.__init__(self)
+        self.status = status
+        self.systemstatus = systemstatus
+        self.cmd = cmd
+        self.err = err
+        self.out = out
+
+    def __str__(self):
+        return "ExecutionFailed: %d  %s\n%s" %(self.status, self.cmd, self.err)
+
+# export the exception under the name 'py.process.cmdexec.Error'
+cmdexec.Error = ExecutionFailed
+try:
+    ExecutionFailed.__module__ = 'py.process.cmdexec'
+    ExecutionFailed.__name__ = 'Error'
+except (AttributeError, TypeError):
+    pass
--- a/third_party/python/py/py/_process/forkedfunc.py
+++ b/third_party/python/py/py/_process/forkedfunc.py
@@ -1,120 +1,120 @@
-
-"""
-    ForkedFunc provides a way to run a function in a forked process
-    and get at its return value, stdout and stderr output as well
-    as signals and exitstatusus.
-"""
-
-import py
-import os
-import sys
-import marshal
-
-
-def get_unbuffered_io(fd, filename):
-    f = open(str(filename), "w")
-    if fd != f.fileno():
-        os.dup2(f.fileno(), fd)
-    class AutoFlush:
-        def write(self, data):
-            f.write(data)
-            f.flush()
-        def __getattr__(self, name):
-            return getattr(f, name)
-    return AutoFlush()
-
-
-class ForkedFunc:
-    EXITSTATUS_EXCEPTION = 3
-
-
-    def __init__(self, fun, args=None, kwargs=None, nice_level=0,
-                 child_on_start=None, child_on_exit=None):
-        if args is None:
-            args = []
-        if kwargs is None:
-            kwargs = {}
-        self.fun = fun
-        self.args = args
-        self.kwargs = kwargs
-        self.tempdir = tempdir = py.path.local.mkdtemp()
-        self.RETVAL = tempdir.ensure('retval')
-        self.STDOUT = tempdir.ensure('stdout')
-        self.STDERR = tempdir.ensure('stderr')
-
-        pid = os.fork()
-        if pid:  # in parent process
-            self.pid = pid
-        else:  # in child process
-            self.pid = None
-            self._child(nice_level, child_on_start, child_on_exit)
-
-    def _child(self, nice_level, child_on_start, child_on_exit):
-        # right now we need to call a function, but first we need to
-        # map all IO that might happen
-        sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT)
-        sys.stderr = stderr = get_unbuffered_io(2, self.STDERR)
-        retvalf = self.RETVAL.open("wb")
-        EXITSTATUS = 0
-        try:
-            if nice_level:
-                os.nice(nice_level)
-            try:
-                if child_on_start is not None:
-                    child_on_start()
-                retval = self.fun(*self.args, **self.kwargs)
-                retvalf.write(marshal.dumps(retval))
-                if child_on_exit is not None:
-                    child_on_exit()
-            except:
-                excinfo = py.code.ExceptionInfo()
-                stderr.write(str(excinfo._getreprcrash()))
-                EXITSTATUS = self.EXITSTATUS_EXCEPTION
-        finally:
-            stdout.close()
-            stderr.close()
-            retvalf.close()
-        os.close(1)
-        os.close(2)
-        os._exit(EXITSTATUS)
-
-    def waitfinish(self, waiter=os.waitpid):
-        pid, systemstatus = waiter(self.pid, 0)
-        if systemstatus:
-            if os.WIFSIGNALED(systemstatus):
-                exitstatus = os.WTERMSIG(systemstatus) + 128
-            else:
-                exitstatus = os.WEXITSTATUS(systemstatus)
-        else:
-            exitstatus = 0
-        signal = systemstatus & 0x7f
-        if not exitstatus and not signal:
-            retval = self.RETVAL.open('rb')
-            try:
-                retval_data = retval.read()
-            finally:
-                retval.close()
-            retval = marshal.loads(retval_data)
-        else:
-            retval = None
-        stdout = self.STDOUT.read()
-        stderr = self.STDERR.read()
-        self._removetemp()
-        return Result(exitstatus, signal, retval, stdout, stderr)
-
-    def _removetemp(self):
-        if self.tempdir.check():
-            self.tempdir.remove()
-
-    def __del__(self):
-        if self.pid is not None:  # only clean up in main process
-            self._removetemp()
-
-
-class Result(object):
-    def __init__(self, exitstatus, signal, retval, stdout, stderr):
-        self.exitstatus = exitstatus
-        self.signal = signal
-        self.retval = retval
-        self.out = stdout
-        self.err = stderr
+
+"""
+    ForkedFunc provides a way to run a function in a forked process
+    and get at its return value, stdout and stderr output as well
+    as signals and exitstatusus.
+"""
+
+import py
+import os
+import sys
+import marshal
+
+
+def get_unbuffered_io(fd, filename):
+    f = open(str(filename), "w")
+    if fd != f.fileno():
+        os.dup2(f.fileno(), fd)
+    class AutoFlush:
+        def write(self, data):
+            f.write(data)
+            f.flush()
+        def __getattr__(self, name):
+            return getattr(f, name)
+    return AutoFlush()
+
+
+class ForkedFunc:
+    EXITSTATUS_EXCEPTION = 3
+
+
+    def __init__(self, fun, args=None, kwargs=None, nice_level=0,
+                 child_on_start=None, child_on_exit=None):
+        if args is None:
+            args = []
+        if kwargs is None:
+            kwargs = {}
+        self.fun = fun
+        self.args = args
+        self.kwargs = kwargs
+        self.tempdir = tempdir = py.path.local.mkdtemp()
+        self.RETVAL = tempdir.ensure('retval')
+        self.STDOUT = tempdir.ensure('stdout')
+        self.STDERR = tempdir.ensure('stderr')
+
+        pid = os.fork()
+        if pid:  # in parent process
+            self.pid = pid
+        else:  # in child process
+            self.pid = None
+            self._child(nice_level, child_on_start, child_on_exit)
+
+    def _child(self, nice_level, child_on_start, child_on_exit):
+        # right now we need to call a function, but first we need to
+        # map all IO that might happen
+        sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT)
+        sys.stderr = stderr = get_unbuffered_io(2, self.STDERR)
+        retvalf = self.RETVAL.open("wb")
+        EXITSTATUS = 0
+        try:
+            if nice_level:
+                os.nice(nice_level)
+            try:
+                if child_on_start is not None:
+                    child_on_start()
+                retval = self.fun(*self.args, **self.kwargs)
+                retvalf.write(marshal.dumps(retval))
+                if child_on_exit is not None:
+                    child_on_exit()
+            except:
+                excinfo = py.code.ExceptionInfo()
+                stderr.write(str(excinfo._getreprcrash()))
+                EXITSTATUS = self.EXITSTATUS_EXCEPTION
+        finally:
+            stdout.close()
+            stderr.close()
+            retvalf.close()
+        os.close(1)
+        os.close(2)
+        os._exit(EXITSTATUS)
+
+    def waitfinish(self, waiter=os.waitpid):
+        pid, systemstatus = waiter(self.pid, 0)
+        if systemstatus:
+            if os.WIFSIGNALED(systemstatus):
+                exitstatus = os.WTERMSIG(systemstatus) + 128
+            else:
+                exitstatus = os.WEXITSTATUS(systemstatus)
+        else:
+            exitstatus = 0
+        signal = systemstatus & 0x7f
+        if not exitstatus and not signal:
+            retval = self.RETVAL.open('rb')
+            try:
+                retval_data = retval.read()
+            finally:
+                retval.close()
+            retval = marshal.loads(retval_data)
+        else:
+            retval = None
+        stdout = self.STDOUT.read()
+        stderr = self.STDERR.read()
+        self._removetemp()
+        return Result(exitstatus, signal, retval, stdout, stderr)
+
+    def _removetemp(self):
+        if self.tempdir.check():
+            self.tempdir.remove()
+
+    def __del__(self):
+        if self.pid is not None:  # only clean up in main process
+            self._removetemp()
+
+
+class Result(object):
+    def __init__(self, exitstatus, signal, retval, stdout, stderr):
+        self.exitstatus = exitstatus
+        self.signal = signal
+        self.retval = retval
+        self.out = stdout
+        self.err = stderr
--- a/third_party/python/py/py/_process/killproc.py
+++ b/third_party/python/py/py/_process/killproc.py
@@ -1,23 +1,23 @@
-import py
-import os, sys
-
-if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
-    try:
-        import ctypes
-    except ImportError:
-        def dokill(pid):
-            py.process.cmdexec("taskkill /F /PID %d" %(pid,))
-    else:
-        def dokill(pid):
-            PROCESS_TERMINATE = 1
-            handle = ctypes.windll.kernel32.OpenProcess(
-                        PROCESS_TERMINATE, False, pid)
-            ctypes.windll.kernel32.TerminateProcess(handle, -1)
-            ctypes.windll.kernel32.CloseHandle(handle)
-else:
-    def dokill(pid):
-        os.kill(pid, 15)
-
-def kill(pid):
-    """ kill process by id. """
-    dokill(pid)
+import py
+import os, sys
+
+if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
+    try:
+        import ctypes
+    except ImportError:
+        def dokill(pid):
+            py.process.cmdexec("taskkill /F /PID %d" %(pid,))
+    else:
+        def dokill(pid):
+            PROCESS_TERMINATE = 1
+            handle = ctypes.windll.kernel32.OpenProcess(
+                        PROCESS_TERMINATE, False, pid)
+            ctypes.windll.kernel32.TerminateProcess(handle, -1)
+            ctypes.windll.kernel32.CloseHandle(handle)
+else:
+    def dokill(pid):
+        os.kill(pid, 15)
+
+def kill(pid):
+    """ kill process by id. """
+    dokill(pid)
--- a/third_party/python/py/py/_std.py
+++ b/third_party/python/py/py/_std.py
@@ -1,18 +1,18 @@
-import sys
-
-class Std(object):
-    """ makes top-level python modules available as an attribute,
-        importing them on first access.
-    """
-
-    def __init__(self):
-        self.__dict__ = sys.modules
-
-    def __getattr__(self, name):
-        try:
-            m = __import__(name)
-        except ImportError:
-            raise AttributeError("py.std: could not import %s" % name)
-        return m
-
-std = Std()
+import sys
+
+class Std(object):
+    """ makes top-level python modules available as an attribute,
+        importing them on first access.
+    """
+
+    def __init__(self):
+        self.__dict__ = sys.modules
+
+    def __getattr__(self, name):
+        try:
+            m = __import__(name)
+        except ImportError:
+            raise AttributeError("py.std: could not import %s" % name)
+        return m
+
+std = Std()
--- a/third_party/python/py/py/_xmlgen.py
+++ b/third_party/python/py/py/_xmlgen.py
@@ -1,253 +1,255 @@
-"""
-module for generating and serializing xml and html structures
-by using simple python objects.
-
-(c) holger krekel, holger at merlinux eu. 2009
-"""
-import sys, re
-
-if sys.version_info >= (3,0):
-    def u(s):
-        return s
-    def unicode(x, errors=None):
-        if hasattr(x, '__unicode__'):
-            return x.__unicode__()
-        return str(x)
-else:
-    def u(s):
-        return unicode(s)
-    unicode = unicode
-
-
-class NamespaceMetaclass(type):
-    def __getattr__(self, name):
-        if name[:1] == '_':
-            raise AttributeError(name)
-        if self == Namespace:
-            raise ValueError("Namespace class is abstract")
-        tagspec = self.__tagspec__
-        if tagspec is not None and name not in tagspec:
-            raise AttributeError(name)
-        classattr = {}
-        if self.__stickyname__:
-            classattr['xmlname'] = name
-        cls = type(name, (self.__tagclass__,), classattr)
-        setattr(self, name, cls)
-        return cls
-
-class Tag(list):
-    class Attr(object):
-        def __init__(self, **kwargs):
-            self.__dict__.update(kwargs)
-
-    def __init__(self, *args, **kwargs):
-        super(Tag, self).__init__(args)
-        self.attr = self.Attr(**kwargs)
-
-    def __unicode__(self):
-        return self.unicode(indent=0)
-    __str__ = __unicode__
-
-    def unicode(self, indent=2):
-        l = []
-        SimpleUnicodeVisitor(l.append, indent).visit(self)
-        return u("").join(l)
-
-    def __repr__(self):
-        name = self.__class__.__name__
-        return "<%r tag object %d>" % (name, id(self))
-
-Namespace = NamespaceMetaclass('Namespace', (object, ), {
-    '__tagspec__': None,
-    '__tagclass__': Tag,
-    '__stickyname__': False,
-})
-
-class HtmlTag(Tag):
-    def unicode(self, indent=2):
-        l = []
-        HtmlVisitor(l.append, indent, shortempty=False).visit(self)
-        return u("").join(l)
-
-# exported plain html namespace
-class html(Namespace):
-    __tagclass__ = HtmlTag
-    __stickyname__ = True
-    __tagspec__ = dict([(x,1) for x in (
-        'a,abbr,acronym,address,applet,area,b,bdo,big,blink,'
-        'blockquote,body,br,button,caption,center,cite,code,col,'
-        'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,'
-        'fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,'
-        'i,iframe,img,input,ins,kbd,label,legend,li,link,listing,'
-        'map,marquee,menu,meta,multicol,nobr,noembed,noframes,'
-        'noscript,object,ol,optgroup,option,p,pre,q,s,script,'
-        'select,small,span,strike,strong,style,sub,sup,table,'
-        'tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,'
-        'base,basefont,frame,hr,isindex,param,samp,var'
-    ).split(',') if x])
-
-    class Style(object):
-        def __init__(self, **kw):
-            for x, y in kw.items():
-                x = x.replace('_', '-')
-                setattr(self, x, y)
-
-
-class raw(object):
-    """just a box that can contain a unicode string that will be
-    included directly in the output"""
-    def __init__(self, uniobj):
-        self.uniobj = uniobj
-
-class SimpleUnicodeVisitor(object):
-    """ recursive visitor to write unicode. """
-    def __init__(self, write, indent=0, curindent=0, shortempty=True):
-        self.write = write
-        self.cache = {}
-        self.visited = {} # for detection of recursion
-        self.indent = indent
-        self.curindent = curindent
-        self.parents = []
-        self.shortempty = shortempty  # short empty tags or not
-
-    def visit(self, node):
-        """ dispatcher on node's class/bases name. """
-        cls = node.__class__
-        try:
-            visitmethod = self.cache[cls]
-        except KeyError:
-            for subclass in cls.__mro__:
-                visitmethod = getattr(self, subclass.__name__, None)
-                if visitmethod is not None:
-                    break
-            else:
-                visitmethod = self.__object
-            self.cache[cls] = visitmethod
-        visitmethod(node)
-
-    # the default fallback handler is marked private
-    # to avoid clashes with the tag name object
-    def __object(self, obj):
-        #self.write(obj)
-        self.write(escape(unicode(obj)))
-
-    def raw(self, obj):
-        self.write(obj.uniobj)
-
-    def list(self, obj):
-        assert id(obj) not in self.visited
-        self.visited[id(obj)] = 1
-        for elem in obj:
-            self.visit(elem)
-
-    def Tag(self, tag):
-        assert id(tag) not in self.visited
-        try:
-            tag.parent = self.parents[-1]
-        except IndexError:
-            tag.parent = None
-        self.visited[id(tag)] = 1
-        tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
-        if self.curindent and not self._isinline(tagname):
-            self.write("\n" + u(' ') * self.curindent)
-        if tag:
-            self.curindent += self.indent
-            self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
-            self.parents.append(tag)
-            for x in tag:
-                self.visit(x)
-            self.parents.pop()
-            self.write(u('</%s>') % tagname)
-            self.curindent -= self.indent
-        else:
-            nameattr = tagname+self.attributes(tag)
-            if self._issingleton(tagname):
-                self.write(u('<%s/>') % (nameattr,))
-            else:
-                self.write(u('<%s></%s>') % (nameattr, tagname))
-
-    def attributes(self, tag):
-        # serialize attributes
-        attrlist = dir(tag.attr)
-        attrlist.sort()
-        l = []
-        for name in attrlist:
-            res = self.repr_attribute(tag.attr, name)
-            if res is not None:
-                l.append(res)
-        l.extend(self.getstyle(tag))
-        return u("").join(l)
-
-    def repr_attribute(self, attrs, name):
-        if name[:2] != '__':
-            value = getattr(attrs, name)
-            if name.endswith('_'):
-                name = name[:-1]
-            if isinstance(value, raw):
-                insert = value.uniobj
-            else:
-                insert = escape(unicode(value))
-            return ' %s="%s"' % (name, insert)
-
-    def getstyle(self, tag):
-        """ return attribute list suitable for styling. """
-        try:
-            styledict = tag.style.__dict__
-        except AttributeError:
-            return []
-        else:
-            stylelist = [x+': ' + y for x,y in styledict.items()]
-            return [u(' style="%s"') % u('; ').join(stylelist)]
-
-    def _issingleton(self, tagname):
-        """can (and will) be overridden in subclasses"""
-        return self.shortempty
-
-    def _isinline(self, tagname):
-        """can (and will) be overridden in subclasses"""
-        return False
-
-class HtmlVisitor(SimpleUnicodeVisitor):
-
-    single = dict([(x, 1) for x in
-                ('br,img,area,param,col,hr,meta,link,base,'
-                    'input,frame').split(',')])
-    inline = dict([(x, 1) for x in
-                ('a abbr acronym b basefont bdo big br cite code dfn em font '
-                 'i img input kbd label q s samp select small span strike '
-                 'strong sub sup textarea tt u var'.split(' '))])
-
-    def repr_attribute(self, attrs, name):
-        if name == 'class_':
-            value = getattr(attrs, name)
-            if value is None:
-                return
-        return super(HtmlVisitor, self).repr_attribute(attrs, name)
-
-    def _issingleton(self, tagname):
-        return tagname in self.single
-
-    def _isinline(self, tagname):
-        return tagname in self.inline
-
-
-class _escape:
-    def __init__(self):
-        self.escape = {
-            u('"') : u('&quot;'), u('<') : u('&lt;'), u('>') : u('&gt;'),
-            u('&') : u('&amp;'), u("'") : u('&apos;'),
-            }
-        self.charef_rex = re.compile(u("|").join(self.escape.keys()))
-
-    def _replacer(self, match):
-        return self.escape[match.group(0)]
-
-    def __call__(self, ustring):
-        """ xml-escape the given unicode string. """
-        try:
-            ustring = unicode(ustring)
-        except UnicodeDecodeError:
-            ustring = unicode(ustring, 'utf-8', errors='replace')
-        return self.charef_rex.sub(self._replacer, ustring)
-
-escape = _escape()
+"""
+module for generating and serializing xml and html structures
+by using simple python objects.
+
+(c) holger krekel, holger at merlinux eu. 2009
+"""
+import sys, re
+
+if sys.version_info >= (3,0):
+    def u(s):
+        return s
+    def unicode(x, errors=None):
+        if hasattr(x, '__unicode__'):
+            return x.__unicode__()
+        return str(x)
+else:
+    def u(s):
+        return unicode(s)
+    unicode = unicode
+
+
+class NamespaceMetaclass(type):
+    def __getattr__(self, name):
+        if name[:1] == '_':
+            raise AttributeError(name)
+        if self == Namespace:
+            raise ValueError("Namespace class is abstract")
+        tagspec = self.__tagspec__
+        if tagspec is not None and name not in tagspec:
+            raise AttributeError(name)
+        classattr = {}
+        if self.__stickyname__:
+            classattr['xmlname'] = name
+        cls = type(name, (self.__tagclass__,), classattr)
+        setattr(self, name, cls)
+        return cls
+
+class Tag(list):
+    class Attr(object):
+        def __init__(self, **kwargs):
+            self.__dict__.update(kwargs)
+
+    def __init__(self, *args, **kwargs):
+        super(Tag, self).__init__(args)
+        self.attr = self.Attr(**kwargs)
+
+    def __unicode__(self):
+        return self.unicode(indent=0)
+    __str__ = __unicode__
+
+    def unicode(self, indent=2):
+        l = []
+        SimpleUnicodeVisitor(l.append, indent).visit(self)
+        return u("").join(l)
+
+    def __repr__(self):
+        name = self.__class__.__name__
+        return "<%r tag object %d>" % (name, id(self))
+
+Namespace = NamespaceMetaclass('Namespace', (object, ), {
+    '__tagspec__': None,
+    '__tagclass__': Tag,
+    '__stickyname__': False,
+})
+
+class HtmlTag(Tag):
+    def unicode(self, indent=2):
+        l = []
+        HtmlVisitor(l.append, indent, shortempty=False).visit(self)
+        return u("").join(l)
+
+# exported plain html namespace
+class html(Namespace):
+    __tagclass__ = HtmlTag
+    __stickyname__ = True
+    __tagspec__ = dict([(x,1) for x in (
+        'a,abbr,acronym,address,applet,area,article,aside,audio,b,'
+        'base,basefont,bdi,bdo,big,blink,blockquote,body,br,button,'
+        'canvas,caption,center,cite,code,col,colgroup,command,comment,'
+        'datalist,dd,del,details,dfn,dir,div,dl,dt,em,embed,'
+        'fieldset,figcaption,figure,footer,font,form,frame,frameset,h1,'
+        'h2,h3,h4,h5,h6,head,header,hgroup,hr,html,i,iframe,img,input,'
+        'ins,isindex,kbd,keygen,label,legend,li,link,listing,map,mark,'
+        'marquee,menu,meta,meter,multicol,nav,nobr,noembed,noframes,'
+        'noscript,object,ol,optgroup,option,output,p,param,pre,progress,'
+        'q,rp,rt,ruby,s,samp,script,section,select,small,source,span,'
+        'strike,strong,style,sub,summary,sup,table,tbody,td,textarea,'
+        'tfoot,th,thead,time,title,tr,track,tt,u,ul,xmp,var,video,wbr'
+    ).split(',') if x])
+
+    class Style(object):
+        def __init__(self, **kw):
+            for x, y in kw.items():
+                x = x.replace('_', '-')
+                setattr(self, x, y)
+
+
+class raw(object):
+    """just a box that can contain a unicode string that will be
+    included directly in the output"""
+    def __init__(self, uniobj):
+        self.uniobj = uniobj
+
+class SimpleUnicodeVisitor(object):
+    """ recursive visitor to write unicode. """
+    def __init__(self, write, indent=0, curindent=0, shortempty=True):
+        self.write = write
+        self.cache = {}
+        self.visited = {} # for detection of recursion
+        self.indent = indent
+        self.curindent = curindent
+        self.parents = []
+        self.shortempty = shortempty  # short empty tags or not
+
+    def visit(self, node):
+        """ dispatcher on node's class/bases name. """
+        cls = node.__class__
+        try:
+            visitmethod = self.cache[cls]
+        except KeyError:
+            for subclass in cls.__mro__:
+                visitmethod = getattr(self, subclass.__name__, None)
+                if visitmethod is not None:
+                    break
+            else:
+                visitmethod = self.__object
+            self.cache[cls] = visitmethod
+        visitmethod(node)
+
+    # the default fallback handler is marked private
+    # to avoid clashes with the tag name object
+    def __object(self, obj):
+        #self.write(obj)
+        self.write(escape(unicode(obj)))
+
+    def raw(self, obj):
+        self.write(obj.uniobj)
+
+    def list(self, obj):
+        assert id(obj) not in self.visited
+        self.visited[id(obj)] = 1
+        for elem in obj:
+            self.visit(elem)
+
+    def Tag(self, tag):
+        assert id(tag) not in self.visited
+        try:
+            tag.parent = self.parents[-1]
+        except IndexError:
+            tag.parent = None
+        self.visited[id(tag)] = 1
+        tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
+        if self.curindent and not self._isinline(tagname):
+            self.write("\n" + u(' ') * self.curindent)
+        if tag:
+            self.curindent += self.indent
+            self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
+            self.parents.append(tag)
+            for x in tag:
+                self.visit(x)
+            self.parents.pop()
+            self.write(u('</%s>') % tagname)
+            self.curindent -= self.indent
+        else:
+            nameattr = tagname+self.attributes(tag)
+            if self._issingleton(tagname):
+                self.write(u('<%s/>') % (nameattr,))
+            else:
+                self.write(u('<%s></%s>') % (nameattr, tagname))
+
+    def attributes(self, tag):
+        # serialize attributes
+        attrlist = dir(tag.attr)
+        attrlist.sort()
+        l = []
+        for name in attrlist:
+            res = self.repr_attribute(tag.attr, name)
+            if res is not None:
+                l.append(res)
+        l.extend(self.getstyle(tag))
+        return u("").join(l)
+
+    def repr_attribute(self, attrs, name):
+        if name[:2] != '__':
+            value = getattr(attrs, name)
+            if name.endswith('_'):
+                name = name[:-1]
+            if isinstance(value, raw):
+                insert = value.uniobj
+            else:
+                insert = escape(unicode(value))
+            return ' %s="%s"' % (name, insert)
+
+    def getstyle(self, tag):
+        """ return attribute list suitable for styling. """
+        try:
+            styledict = tag.style.__dict__
+        except AttributeError:
+            return []
+        else:
+            stylelist = [x+': ' + y for x,y in styledict.items()]
+            return [u(' style="%s"') % u('; ').join(stylelist)]
+
+    def _issingleton(self, tagname):
+        """can (and will) be overridden in subclasses"""
+        return self.shortempty
+
+    def _isinline(self, tagname):
+        """can (and will) be overridden in subclasses"""
+        return False
+
+class HtmlVisitor(SimpleUnicodeVisitor):
+
+    single = dict([(x, 1) for x in
+                ('br,img,area,param,col,hr,meta,link,base,'
+                    'input,frame').split(',')])
+    inline = dict([(x, 1) for x in
+                ('a abbr acronym b basefont bdo big br cite code dfn em font '
+                 'i img input kbd label q s samp select small span strike '
+                 'strong sub sup textarea tt u var'.split(' '))])
+
+    def repr_attribute(self, attrs, name):
+        if name == 'class_':
+            value = getattr(attrs, name)
+            if value is None:
+                return
+        return super(HtmlVisitor, self).repr_attribute(attrs, name)
+
+    def _issingleton(self, tagname):
+        return tagname in self.single
+
+    def _isinline(self, tagname):
+        return tagname in self.inline
+
+
+class _escape:
+    def __init__(self):
+        self.escape = {
+            u('"') : u('&quot;'), u('<') : u('&lt;'), u('>') : u('&gt;'),
+            u('&') : u('&amp;'), u("'") : u('&apos;'),
+            }
+        self.charef_rex = re.compile(u("|").join(self.escape.keys()))
+
+    def _replacer(self, match):
+        return self.escape[match.group(0)]
+
+    def __call__(self, ustring):
+        """ xml-escape the given unicode string. """
+        try:
+            ustring = unicode(ustring)
+        except UnicodeDecodeError:
+            ustring = unicode(ustring, 'utf-8', errors='replace')
+        return self.charef_rex.sub(self._replacer, ustring)
+
+escape = _escape()
--- a/third_party/python/py/py/test.py
+++ b/third_party/python/py/py/test.py
@@ -1,10 +1,10 @@
-import sys
-if __name__ == '__main__':
-    import pytest
-    sys.exit(pytest.main())
-else:
-    import sys, pytest
-    sys.modules['py.test'] = pytest
-
-# for more API entry points see the 'tests' definition
-# in __init__.py
+import sys
+if __name__ == '__main__':
+    import pytest
+    sys.exit(pytest.main())
+else:
+    import sys, pytest
+    sys.modules['py.test'] = pytest
+
+# for more API entry points see the 'tests' definition
+# in __init__.py
deleted file mode 100644
--- a/third_party/python/py/setup.cfg
+++ /dev/null
@@ -1,11 +0,0 @@
-[wheel]
-universal = 1
-
-[devpi:upload]
-formats = sdist.tgz,bdist_wheel
-
-[egg_info]
-tag_build = 
-tag_date = 0
-tag_svn_revision = 0
-
deleted file mode 100644
--- a/third_party/python/py/setup.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os, sys
-
-from setuptools import setup
-
-def main():
-    setup(
-        name='py',
-        description='library with cross-python path, ini-parsing, io, code, log facilities',
-        long_description = open('README.txt').read(),
-        version='1.4.31',
-        url='http://pylib.readthedocs.org/',
-        license='MIT license',
-        platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
-        author='holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others',
-        author_email='pytest-dev@python.org',
-        classifiers=['Development Status :: 6 - Mature',
-                     'Intended Audience :: Developers',
-                     'License :: OSI Approved :: MIT License',
-                     'Operating System :: POSIX',
-                     'Operating System :: Microsoft :: Windows',
-                     'Operating System :: MacOS :: MacOS X',
-                     'Topic :: Software Development :: Testing',
-                     'Topic :: Software Development :: Libraries',
-                     'Topic :: Utilities',
-                     'Programming Language :: Python',
-                     'Programming Language :: Python :: 3'],
-        packages=['py',
-                  'py._code',
-                  'py._io',
-                  'py._log',
-                  'py._path',
-                  'py._process',
-        ],
-        zip_safe=False,
-    )
-
-if __name__ == '__main__':
-    main()
deleted file mode 100644
--- a/third_party/python/pytest/.coveragerc
+++ /dev/null
@@ -1,7 +0,0 @@
-[run]
-omit = 
-    # standlonetemplate is read dynamically and tested by test_genscript
-    *standalonetemplate.py
-    # oldinterpret could be removed, as it is no longer used in py26+
-    *oldinterpret.py
-    vendored_packages
deleted file mode 100644
--- a/third_party/python/pytest/AUTHORS
+++ /dev/null
@@ -1,91 +0,0 @@
-Holger Krekel, holger at merlinux eu
-merlinux GmbH, Germany, office at merlinux eu
-
-Contributors include::
-
-Abhijeet Kasurde
-Anatoly Bubenkoff
-Andreas Zeidler
-Andy Freeland
-Anthon van der Neut
-Armin Rigo
-Aron Curzon
-Aviv Palivoda
-Benjamin Peterson
-Bob Ippolito
-Brian Dorsey
-Brian Okken
-Brianna Laugher
-Bruno Oliveira
-Carl Friedrich Bolz
-Charles Cloud
-Chris Lamb
-Christian Theunert
-Christian Tismer
-Christopher Gilling
-Daniel Grana
-Daniel Hahler
-Daniel Nuri
-Dave Hunt
-David Mohr
-David Vierra
-Edison Gustavo Muenz
-Eduardo Schettino
-Endre Galaczi
-Elizaveta Shashkova
-Eric Hunsberger
-Eric Siegerman
-Erik M. Bray
-Florian Bruhin
-Floris Bruynooghe
-Gabriel Reis
-Georgy Dyuldin
-Graham Horler
-Grig Gheorghiu
-Guido Wesdorp
-Harald Armin Massa
-Ian Bicking
-Jaap Broekhuizen
-Jan Balster
-Janne Vanhala
-Jason R. Coombs
-John Towler
-Joshua Bronson
-Jurko Gospodnetić
-Katarzyna Jachim
-Kevin Cox
-Lee Kamentsky
-Lukas Bednar
-Maciek Fijalkowski
-Maho
-Marc Schlaich
-Mark Abramowitz
-Markus Unterwaditzer
-Martijn Faassen
-Martin Prusse
-Matt Bachmann
-Michael Aquilina
-Michael Birtwell
-Michael Droettboom
-Nicolas Delaby
-Pieter Mulder
-Piotr Banaszkiewicz
-Punyashloka Biswal
-Quentin Pradet
-Ralf Schmitt
-Raphael Pierzina
-Ronny Pfannschmidt
-Ross Lawley
-Ryan Wooden
-Samuele Pedroni
-Tom Viner
-Trevor Bekolay
-Wouter van Ackooy
-David Díaz-Barquero
-Eric Hunsberger
-Simon Gomizelj
-Russel Winder
-Ben Webb
-Alexei Kozlenok
-Cal Leeming
-Feng Ma
deleted file mode 100644
--- a/third_party/python/pytest/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2004-2016 Holger Krekel and others
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
deleted file mode 100644
--- a/third_party/python/pytest/MANIFEST.in
+++ /dev/null
@@ -1,34 +0,0 @@
-include CHANGELOG.rst
-include LICENSE
-include AUTHORS
-
-include README.rst
-include CONTRIBUTING.rst
-
-include tox.ini
-include setup.py
-
-include .coveragerc
-
-include plugin-test.sh
-include requirements-docs.txt
-include runtox.py
-
-recursive-include bench *.py
-recursive-include extra *.py
-
-graft testing
-graft doc
-
-exclude _pytest/impl
-
-graft _pytest/vendored_packages
-
-recursive-exclude * *.pyc *.pyo
-
-exclude appveyor/install.ps1
-exclude appveyor.yml
-exclude appveyor
-
-exclude ISSUES.txt
-exclude HOWTORELEASE.rst
deleted file mode 100644
--- a/third_party/python/pytest/PKG-INFO
+++ /dev/null
@@ -1,133 +0,0 @@
-Metadata-Version: 1.1
-Name: pytest
-Version: 2.9.2
-Summary: pytest: simple powerful testing with Python
-Home-page: http://pytest.org
-Author: Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others
-Author-email: holger at merlinux.eu
-License: MIT license
-Description: .. image:: http://pytest.org/latest/_static/pytest1.png
-           :target: http://pytest.org
-           :align: center
-           :alt: pytest
-        
-        ------
-        
-        .. image:: https://img.shields.io/pypi/v/pytest.svg
-           :target: https://pypi.python.org/pypi/pytest
-        .. image:: https://img.shields.io/pypi/pyversions/pytest.svg
-          :target: https://pypi.python.org/pypi/pytest
-        .. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg
-           :target: https://coveralls.io/r/pytest-dev/pytest
-        .. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master
-            :target: https://travis-ci.org/pytest-dev/pytest
-        .. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true
-            :target: https://ci.appveyor.com/project/pytestbot/pytest
-        
-        The ``pytest`` framework makes it easy to write small tests, yet
-        scales to support complex functional testing for applications and libraries.    
-        
-        An example of a simple test:
-        
-        .. code-block:: python
-        
-            # content of test_sample.py
-            def func(x):
-                return x + 1
-        
-            def test_answer():
-                assert func(3) == 5
-        
-        
-        To execute it::
-        
-            $ py.test
-            ======= test session starts ========
-            platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1    
-            collected 1 items
-        
-            test_sample.py F
-        
-            ======= FAILURES ========
-            _______ test_answer ________
-        
-                def test_answer():
-            >       assert func(3) == 5
-            E       assert 4 == 5
-            E        +  where 4 = func(3)
-        
-            test_sample.py:5: AssertionError
-            ======= 1 failed in 0.12 seconds ========
-        
-        Due to ``py.test``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://pytest.org/latest/getting-started.html#our-first-test-run>`_ for more examples.
-                
-        
-        Features
-        --------
-        
-        - Detailed info on failing `assert statements <http://pytest.org/latest/assert.html>`_ (no need to remember ``self.assert*`` names);
-        
-        - `Auto-discovery
-          <http://pytest.org/latest/goodpractices.html#python-test-discovery>`_
-          of test modules and functions;
-        
-        - `Modular fixtures <http://pytest.org/latest/fixture.html>`_  for
-          managing small or parametrized long-lived test resources;
-        
-        - Can run `unittest <http://pytest.org/latest/unittest.html>`_ (or trial),
-          `nose <http://pytest.org/latest/nose.html>`_ test suites out of the box;
-        
-        - Python2.6+, Python3.2+, PyPy-2.3, Jython-2.5 (untested);
-        
-        - Rich plugin architecture, with over 150+ `external plugins <http://pytest.org/latest/plugins.html#installing-external-plugins-searching>`_ and thriving community;
-        
-        
-        Documentation
-        -------------
-        
-        For full documentation, including installation, tutorials and PDF documents, please see http://pytest.org.
-        
-        
-        Bugs/Requests
-        -------------
-        
-        Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
-        
-        
-        Changelog
-        ---------
-        
-        Consult the `Changelog <http://pytest.org/latest/changelog.html>`_ page for fixes and enhancements of each version.
-        
-        
-        License
-        -------
-        
-        Copyright Holger Krekel and others, 2004-2016.
-        
-        Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
-        
-        .. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE
-        
-Platform: unix
-Platform: linux
-Platform: osx
-Platform: cygwin
-Platform: win32
-Classifier: Development Status :: 6 - Mature
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Topic :: Software Development :: Testing
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.2
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
deleted file mode 100644
--- a/third_party/python/pytest/README.rst
+++ /dev/null
@@ -1,102 +0,0 @@
-.. image:: http://pytest.org/latest/_static/pytest1.png
-   :target: http://pytest.org
-   :align: center
-   :alt: pytest
-
-------
-
-.. image:: https://img.shields.io/pypi/v/pytest.svg
-   :target: https://pypi.python.org/pypi/pytest
-.. image:: https://img.shields.io/pypi/pyversions/pytest.svg
-  :target: https://pypi.python.org/pypi/pytest
-.. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg
-   :target: https://coveralls.io/r/pytest-dev/pytest
-.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master
-    :target: https://travis-ci.org/pytest-dev/pytest
-.. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true
-    :target: https://ci.appveyor.com/project/pytestbot/pytest
-
-The ``pytest`` framework makes it easy to write small tests, yet
-scales to support complex functional testing for applications and libraries.    
-
-An example of a simple test:
-
-.. code-block:: python
-
-    # content of test_sample.py
-    def func(x):
-        return x + 1
-
-    def test_answer():
-        assert func(3) == 5
-
-
-To execute it::
-
-    $ py.test
-    ======= test session starts ========
-    platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1    
-    collected 1 items
-
-    test_sample.py F
-
-    ======= FAILURES ========
-    _______ test_answer ________
-
-        def test_answer():
-    >       assert func(3) == 5
-    E       assert 4 == 5
-    E        +  where 4 = func(3)
-
-    test_sample.py:5: AssertionError
-    ======= 1 failed in 0.12 seconds ========
-
-Due to ``py.test``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://pytest.org/latest/getting-started.html#our-first-test-run>`_ for more examples.
-        
-
-Features
---------
-
-- Detailed info on failing `assert statements <http://pytest.org/latest/assert.html>`_ (no need to remember ``self.assert*`` names);
-
-- `Auto-discovery
-  <http://pytest.org/latest/goodpractices.html#python-test-discovery>`_
-  of test modules and functions;
-
-- `Modular fixtures <http://pytest.org/latest/fixture.html>`_  for
-  managing small or parametrized long-lived test resources;
-
-- Can run `unittest <http://pytest.org/latest/unittest.html>`_ (or trial),
-  `nose <http://pytest.org/latest/nose.html>`_ test suites out of the box;
-
-- Python2.6+, Python3.2+, PyPy-2.3, Jython-2.5 (untested);
-
-- Rich plugin architecture, with over 150+ `external plugins <http://pytest.org/latest/plugins.html#installing-external-plugins-searching>`_ and thriving community;
-
-
-Documentation
--------------
-
-For full documentation, including installation, tutorials and PDF documents, please see http://pytest.org.
-
-
-Bugs/Requests
--------------
-
-Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
-
-
-Changelog
----------
-
-Consult the `Changelog <http://pytest.org/latest/changelog.html>`_ page for fixes and enhancements of each version.
-
-
-License
--------
-
-Copyright Holger Krekel and others, 2004-2016.
-
-Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
-
-.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE
--- a/third_party/python/pytest/_pytest/__init__.py
+++ b/third_party/python/pytest/_pytest/__init__.py
@@ -1,2 +1,8 @@
-#
-__version__ = '2.9.2'
+__all__ = ['__version__']
+
+try:
+    from ._version import version as __version__
+except ImportError:
+    # broken installation, we don't even try
+    # unknown only works because we do poor mans version compare
+    __version__ = 'unknown'
--- a/third_party/python/pytest/_pytest/_argcomplete.py
+++ b/third_party/python/pytest/_pytest/_argcomplete.py
@@ -52,17 +52,17 @@ If things do not work right away:
     python-argcomplete-check-easy-install-script $(which appname)
     echo $?
   will echo 0 if the magic line has been found, 1 if not
 - sometimes it helps to find early on errors using:
     _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
   which should throw a KeyError: 'COMPLINE' (which is properly set by the
   global argcomplete script).
 """
-
+from __future__ import absolute_import, division, print_function
 import sys
 import os
 from glob import glob
 
 class FastFilesCompleter:
     'Fast file completer class'
     def __init__(self, directories=True):
         self.directories = directories
@@ -82,16 +82,17 @@ class FastFilesCompleter:
         globbed.extend(glob(prefix))
         for x in sorted(globbed):
             if os.path.isdir(x):
                 x += '/'
             # append stripping the prefix (like bash, not like compgen)
             completion.append(x[prefix_dir:])
         return completion
 
+
 if os.environ.get('_ARGCOMPLETE'):
     try:
         import argcomplete.completers
     except ImportError:
         sys.exit(-1)
     filescompleter = FastFilesCompleter()
 
     def try_argcomplete(parser):
--- a/third_party/python/pytest/_pytest/_code/__init__.py
+++ b/third_party/python/pytest/_pytest/_code/__init__.py
@@ -1,12 +1,10 @@
 """ python inspection/code generation API """
+from __future__ import absolute_import, division, print_function
 from .code import Code  # noqa
 from .code import ExceptionInfo  # noqa
 from .code import Frame  # noqa
 from .code import Traceback  # noqa
 from .code import getrawcode  # noqa
-from .code import patch_builtins  # noqa
-from .code import unpatch_builtins  # noqa
 from .source import Source  # noqa
 from .source import compile_ as compile  # noqa
 from .source import getfslineno  # noqa
-
--- a/third_party/python/pytest/_pytest/_code/_py2traceback.py
+++ b/third_party/python/pytest/_pytest/_code/_py2traceback.py
@@ -1,12 +1,13 @@
 # copied from python-2.7.3's traceback.py
 # CHANGES:
 # - some_str is replaced, trying to create unicode strings
 #
+from __future__ import absolute_import, division, print_function
 import types
 
 def format_exception_only(etype, value):
     """Format the exception part of a traceback.
 
     The arguments are the exception type and value such as given by
     sys.last_type and sys.last_value. The return value is a list of
     strings, each ending in a newline.
--- a/third_party/python/pytest/_pytest/_code/code.py
+++ b/third_party/python/pytest/_pytest/_code/code.py
@@ -1,51 +1,61 @@
+from __future__ import absolute_import, division, print_function
 import sys
 from inspect import CO_VARARGS, CO_VARKEYWORDS
+import re
+from weakref import ref
+from _pytest.compat import _PY2, _PY3, PY35, safe_str
 
 import py
-
 builtin_repr = repr
 
 reprlib = py.builtin._tryimport('repr', 'reprlib')
 
-if sys.version_info[0] >= 3:
+if _PY3:
     from traceback import format_exception_only
 else:
     from ._py2traceback import format_exception_only
 
+
 class Code(object):
     """ wrapper around Python code objects """
     def __init__(self, rawcode):
         if not hasattr(rawcode, "co_filename"):
             rawcode = getrawcode(rawcode)
         try:
             self.filename = rawcode.co_filename
             self.firstlineno = rawcode.co_firstlineno - 1
             self.name = rawcode.co_name
         except AttributeError:
             raise TypeError("not a code object: %r" %(rawcode,))
         self.raw = rawcode
 
     def __eq__(self, other):
         return self.raw == other.raw
 
+    __hash__ = None
+
     def __ne__(self, other):
         return not self == other
 
     @property
     def path(self):
         """ return a path object pointing to source code (note that it
         might not point to an actually existing file). """
-        p = py.path.local(self.raw.co_filename)
-        # maybe don't try this checking
-        if not p.check():
+        try:
+            p = py.path.local(self.raw.co_filename)
+            # maybe don't try this checking
+            if not p.check():
+                raise OSError("py.path check failed.")
+        except OSError:
             # XXX maybe try harder like the weird logic
             # in the standard lib [linecache.updatecache] does?
             p = self.raw.co_filename
+
         return p
 
     @property
     def fullsource(self):
         """ return a _pytest._code.Source object for the full source file of the code
         """
         from _pytest._code import source
         full, _ = source.findsource(self.raw)
@@ -134,17 +144,18 @@ class Frame(object):
         return retval
 
 class TracebackEntry(object):
     """ a single entry in a traceback """
 
     _repr_style = None
     exprinfo = None
 
-    def __init__(self, rawentry):
+    def __init__(self, rawentry, excinfo=None):
+        self._excinfo = excinfo
         self._rawentry = rawentry
         self.lineno = rawentry.tb_lineno - 1
 
     def set_repr_style(self, mode):
         assert mode in ("short", "long")
         self._repr_style = mode
 
     @property
@@ -169,28 +180,16 @@ class TracebackEntry(object):
     def path(self):
         """ path to the source code """
         return self.frame.code.path
 
     def getlocals(self):
         return self.frame.f_locals
     locals = property(getlocals, None, None, "locals of underlaying frame")
 
-    def reinterpret(self):
-        """Reinterpret the failing statement and returns a detailed information
-           about what operations are performed."""
-        from _pytest.assertion.reinterpret import reinterpret
-        if self.exprinfo is None:
-            source = py.builtin._totext(self.statement).strip()
-            x = reinterpret(source, self.frame, should_fail=True)
-            if not py.builtin._istext(x):
-                raise TypeError("interpret returned non-string %r" % (x,))
-            self.exprinfo = x
-        return self.exprinfo
-
     def getfirstlinesource(self):
         # on Jython this firstlineno can be -1 apparently
         return max(self.frame.code.firstlineno, 0)
 
     def getsource(self, astcache=None):
         """ return failing source code. """
         # we use the passed in astcache to not reparse asttrees
         # within exception info printing
@@ -215,26 +214,34 @@ class TracebackEntry(object):
         return source[start:end]
 
     source = property(getsource)
 
     def ishidden(self):
         """ return True if the current frame has a var __tracebackhide__
             resolving to True
 
+            If __tracebackhide__ is a callable, it gets called with the
+            ExceptionInfo instance and can decide whether to hide the traceback.
+
             mostly for internal use
         """
         try:
-            return self.frame.f_locals['__tracebackhide__']
+            tbh = self.frame.f_locals['__tracebackhide__']
         except KeyError:
             try:
-                return self.frame.f_globals['__tracebackhide__']
+                tbh = self.frame.f_globals['__tracebackhide__']
             except KeyError:
                 return False
 
+        if py.builtin.callable(tbh):
+            return tbh(None if self._excinfo is None else self._excinfo())
+        else:
+            return tbh
+
     def __str__(self):
         try:
             fn = str(self.path)
         except py.error.Error:
             fn = '???'
         name = self.frame.code.name
         try:
             line = str(self.statement).lstrip()
@@ -248,22 +255,23 @@ class TracebackEntry(object):
         return self.frame.code.raw.co_name
     name = property(name, None, None, "co_name of underlaying code")
 
 class Traceback(list):
     """ Traceback objects encapsulate and offer higher level
         access to Traceback entries.
     """
     Entry = TracebackEntry
-    def __init__(self, tb):
-        """ initialize from given python traceback object. """
+    def __init__(self, tb, excinfo=None):
+        """ initialize from given python traceback object and ExceptionInfo """
+        self._excinfo = excinfo
         if hasattr(tb, 'tb_next'):
             def f(cur):
                 while cur is not None:
-                    yield self.Entry(cur)
+                    yield self.Entry(cur, excinfo=excinfo)
                     cur = cur.tb_next
             list.__init__(self, f(tb))
         else:
             list.__init__(self, tb)
 
     def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
         """ return a Traceback instance wrapping part of this Traceback
 
@@ -277,17 +285,17 @@ class Traceback(list):
         for x in self:
             code = x.frame.code
             codepath = code.path
             if ((path is None or codepath == path) and
                 (excludepath is None or not hasattr(codepath, 'relto') or
                  not codepath.relto(excludepath)) and
                 (lineno is None or x.lineno == lineno) and
                 (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
-                return Traceback(x._rawentry)
+                return Traceback(x._rawentry, self._excinfo)
         return self
 
     def __getitem__(self, key):
         val = super(Traceback, self).__getitem__(key)
         if isinstance(key, type(slice(0))):
             val = self.__class__(val)
         return val
 
@@ -296,17 +304,17 @@ class Traceback(list):
 
             fn is a function that gets a single argument, a TracebackEntry
             instance, and should return True when the item should be added
             to the Traceback, False when not
 
             by default this removes all the TracebackEntries which are hidden
             (see ishidden() above)
         """
-        return Traceback(filter(fn, self))
+        return Traceback(filter(fn, self), self._excinfo)
 
     def getcrashentry(self):
         """ return last non-hidden traceback entry that lead
         to the exception of a traceback.
         """
         for i in range(-1, -len(self)-1, -1):
             entry = self[i]
             if not entry.ishidden():
@@ -332,45 +340,48 @@ class Traceback(list):
                 for otherloc in l:
                     if f.is_true(f.eval(co_equal,
                         __recursioncache_locals_1=loc,
                         __recursioncache_locals_2=otherloc)):
                         return i
             l.append(entry.frame.f_locals)
         return None
 
+
 co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
                    '?', 'eval')
 
 class ExceptionInfo(object):
     """ wraps sys.exc_info() objects and offers
         help for navigating the traceback.
     """
     _striptext = ''
+    _assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert "
+
     def __init__(self, tup=None, exprinfo=None):
         import _pytest._code
         if tup is None:
             tup = sys.exc_info()
             if exprinfo is None and isinstance(tup[1], AssertionError):
                 exprinfo = getattr(tup[1], 'msg', None)
                 if exprinfo is None:
-                    exprinfo = str(tup[1])
-                if exprinfo and exprinfo.startswith('assert '):
+                    exprinfo = py.io.saferepr(tup[1])
+                if exprinfo and exprinfo.startswith(self._assert_start_repr):
                     self._striptext = 'AssertionError: '
         self._excinfo = tup
         #: the exception class
         self.type = tup[0]
         #: the exception instance
         self.value = tup[1]
         #: the exception raw traceback
         self.tb = tup[2]
         #: the exception type name
         self.typename = self.type.__name__
         #: the exception traceback (_pytest._code.Traceback instance)
-        self.traceback = _pytest._code.Traceback(self.tb)
+        self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self))
 
     def __repr__(self):
         return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
 
     def exconly(self, tryshort=False):
         """ return the exception as a string
 
             when 'tryshort' resolves to True, and the exception is a
@@ -422,16 +433,29 @@ class ExceptionInfo(object):
         loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
         return str(loc)
 
     def __unicode__(self):
         entry = self.traceback[-1]
         loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
         return unicode(loc)
 
+    def match(self, regexp):
+        """
+        Match the regular expression 'regexp' on the string representation of
+        the exception. If it matches then True is returned (so that it is
+        possible to write 'assert excinfo.match()'). If it doesn't match an
+        AssertionError is raised.
+        """
+        __tracebackhide__ = True
+        if not re.search(regexp, str(self.value)):
+            assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
+                regexp, self.value)
+        return True
+
 
 class FormattedExcinfo(object):
     """ presenting information about failing Functions and Generators. """
     # for traceback entries
     flow_marker = ">"
     fail_marker = "E"
 
     def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
@@ -573,70 +597,157 @@ class FormattedExcinfo(object):
             if len(np) < len(str(path)):
                 path = np
         return path
 
     def repr_traceback(self, excinfo):
         traceback = excinfo.traceback
         if self.tbfilter:
             traceback = traceback.filter()
-        recursionindex = None
+
         if is_recursion_error(excinfo):
-            recursionindex = traceback.recursionindex()
+            traceback, extraline = self._truncate_recursive_traceback(traceback)
+        else:
+            extraline = None
+
         last = traceback[-1]
         entries = []
-        extraline = None
         for index, entry in enumerate(traceback):
             einfo = (last == entry) and excinfo or None
             reprentry = self.repr_traceback_entry(entry, einfo)
             entries.append(reprentry)
-            if index == recursionindex:
+        return ReprTraceback(entries, extraline, style=self.style)
+
+    def _truncate_recursive_traceback(self, traceback):
+        """
+        Truncate the given recursive traceback trying to find the starting point
+        of the recursion.
+
+        The detection is done by going through each traceback entry and finding the
+        point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
+
+        Handle the situation where the recursion process might raise an exception (for example
+        comparing numpy arrays using equality raises a TypeError), in which case we do our best to
+        warn the user of the error and show a limited traceback.
+        """
+        try:
+            recursionindex = traceback.recursionindex()
+        except Exception as e:
+            max_frames = 10
+            extraline = (
+                '!!! Recursion error detected, but an error occurred locating the origin of recursion.\n'
+                '  The following exception happened when comparing locals in the stack frame:\n'
+                '    {exc_type}: {exc_msg}\n'
+                '  Displaying first and last {max_frames} stack frames out of {total}.'
+            ).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback))
+            traceback = traceback[:max_frames] + traceback[-max_frames:]
+        else:
+            if recursionindex is not None:
                 extraline = "!!! Recursion detected (same locals & position)"
-                break
-        return ReprTraceback(entries, extraline, style=self.style)
+                traceback = traceback[:recursionindex + 1]
+            else:
+                extraline = None
+            
+        return traceback, extraline
 
     def repr_excinfo(self, excinfo):
-        reprtraceback = self.repr_traceback(excinfo)
-        reprcrash = excinfo._getreprcrash()
-        return ReprExceptionInfo(reprtraceback, reprcrash)
+        if _PY2:
+            reprtraceback = self.repr_traceback(excinfo)
+            reprcrash = excinfo._getreprcrash()
 
-class TerminalRepr:
+            return ReprExceptionInfo(reprtraceback, reprcrash)
+        else:
+            repr_chain = []
+            e = excinfo.value
+            descr = None
+            while e is not None:
+                if excinfo:
+                    reprtraceback = self.repr_traceback(excinfo)
+                    reprcrash = excinfo._getreprcrash()
+                else:
+                    # fallback to native repr if the exception doesn't have a traceback:
+                    # ExceptionInfo objects require a full traceback to work
+                    reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None))
+                    reprcrash = None
+
+                repr_chain += [(reprtraceback, reprcrash, descr)]
+                if e.__cause__ is not None:
+                    e = e.__cause__
+                    excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
+                    descr = 'The above exception was the direct cause of the following exception:'
+                elif e.__context__ is not None:
+                    e = e.__context__
+                    excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
+                    descr = 'During handling of the above exception, another exception occurred:'
+                else:
+                    e = None
+            repr_chain.reverse()
+            return ExceptionChainRepr(repr_chain)
+
+
+class TerminalRepr(object):
     def __str__(self):
         s = self.__unicode__()
-        if sys.version_info[0] < 3:
+        if _PY2:
             s = s.encode('utf-8')
         return s
 
     def __unicode__(self):
         # FYI this is called from pytest-xdist's serialization of exception
         # information.
         io = py.io.TextIO()
         tw = py.io.TerminalWriter(file=io)
         self.toterminal(tw)
         return io.getvalue().strip()
 
     def __repr__(self):
         return "<%s instance at %0x>" %(self.__class__, id(self))
 
 
-class ReprExceptionInfo(TerminalRepr):
-    def __init__(self, reprtraceback, reprcrash):
-        self.reprtraceback = reprtraceback
-        self.reprcrash = reprcrash
+class ExceptionRepr(TerminalRepr):
+    def __init__(self):
         self.sections = []
 
     def addsection(self, name, content, sep="-"):
         self.sections.append((name, content, sep))
 
     def toterminal(self, tw):
-        self.reprtraceback.toterminal(tw)
         for name, content, sep in self.sections:
             tw.sep(sep, name)
             tw.line(content)
 
+
+class ExceptionChainRepr(ExceptionRepr):
+    def __init__(self, chain):
+        super(ExceptionChainRepr, self).__init__()
+        self.chain = chain
+        # reprcrash and reprtraceback of the outermost (the newest) exception
+        # in the chain
+        self.reprtraceback = chain[-1][0]
+        self.reprcrash = chain[-1][1]
+
+    def toterminal(self, tw):
+        for element in self.chain:
+            element[0].toterminal(tw)
+            if element[2] is not None:
+                tw.line("")
+                tw.line(element[2], yellow=True)
+        super(ExceptionChainRepr, self).toterminal(tw)
+
+
+class ReprExceptionInfo(ExceptionRepr):
+    def __init__(self, reprtraceback, reprcrash):
+        super(ReprExceptionInfo, self).__init__()
+        self.reprtraceback = reprtraceback
+        self.reprcrash = reprcrash
+
+    def toterminal(self, tw):
+        self.reprtraceback.toterminal(tw)
+        super(ReprExceptionInfo, self).toterminal(tw)
+
 class ReprTraceback(TerminalRepr):
     entrysep = "_ "
 
     def __init__(self, reprentries, extraline, style):
         self.reprentries = reprentries
         self.extraline = extraline
         self.style = style
 
@@ -715,17 +826,18 @@ class ReprFileLocation(TerminalRepr):
 
     def toterminal(self, tw):
         # filename and lineno output for each entry,
         # using an output format that most editors unterstand
         msg = self.message
         i = msg.find("\n")
         if i != -1:
             msg = msg[:i]
-        tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+        tw.write(self.path, bold=True, red=True)
+        tw.line(":%s: %s" % (self.lineno, msg))
 
 class ReprLocals(TerminalRepr):
     def __init__(self, lines):
         self.lines = lines
 
     def toterminal(self, tw):
         for line in self.lines:
             tw.line(line)
@@ -748,56 +860,34 @@ class ReprFuncArgs(TerminalRepr):
                         linesofar += ", " + ns
                     else:
                         linesofar = ns
             if linesofar:
                 tw.line(linesofar)
             tw.line("")
 
 
-
-oldbuiltins = {}
-
-def patch_builtins(assertion=True, compile=True):
-    """ put compile and AssertionError builtins to Python's builtins. """
-    if assertion:
-        from _pytest.assertion import reinterpret
-        l = oldbuiltins.setdefault('AssertionError', [])
-        l.append(py.builtin.builtins.AssertionError)
-        py.builtin.builtins.AssertionError = reinterpret.AssertionError
-    if compile:
-        import _pytest._code
-        l = oldbuiltins.setdefault('compile', [])
-        l.append(py.builtin.builtins.compile)
-        py.builtin.builtins.compile = _pytest._code.compile
-
-def unpatch_builtins(assertion=True, compile=True):
-    """ remove compile and AssertionError builtins from Python builtins. """
-    if assertion:
-        py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
-    if compile:
-        py.builtin.builtins.compile = oldbuiltins['compile'].pop()
-
 def getrawcode(obj, trycall=True):
     """ return code object for given function. """
     try:
         return obj.__code__
     except AttributeError:
         obj = getattr(obj, 'im_func', obj)
         obj = getattr(obj, 'func_code', obj)
         obj = getattr(obj, 'f_code', obj)
         obj = getattr(obj, '__code__', obj)
         if trycall and not hasattr(obj, 'co_firstlineno'):
             if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
                 x = getrawcode(obj.__call__, trycall=False)
                 if hasattr(x, 'co_firstlineno'):
                     return x
         return obj
 
-if sys.version_info[:2] >= (3, 5):  # RecursionError introduced in 3.5
+
+if PY35:  # RecursionError introduced in 3.5
     def is_recursion_error(excinfo):
         return excinfo.errisinstance(RecursionError)  # noqa
 else:
     def is_recursion_error(excinfo):
         if not excinfo.errisinstance(RuntimeError):
             return False
         try:
             return "maximum recursion depth exceeded" in str(excinfo.value)
--- a/third_party/python/pytest/_pytest/_code/source.py
+++ b/third_party/python/pytest/_pytest/_code/source.py
@@ -1,15 +1,14 @@
-from __future__ import generators
+from __future__ import absolute_import, division, generators, print_function
 
 from bisect import bisect_right
 import sys
 import inspect, tokenize
 import py
-from types import ModuleType
 cpy_compile = compile
 
 try:
     import _ast
     from _ast import PyCF_ONLY_AST as _AST_FLAG
 except ImportError:
     _AST_FLAG = 0
     _ast = None
@@ -47,32 +46,31 @@ class Source(object):
     def __eq__(self, other):
         try:
             return self.lines == other.lines
         except AttributeError:
             if isinstance(other, str):
                 return str(self) == other
             return False
 
+    __hash__ = None
+
     def __getitem__(self, key):
         if isinstance(key, int):
             return self.lines[key]
         else:
             if key.step not in (None, 1):
                 raise IndexError("cannot slice a Source with a step")
-            return self.__getslice__(key.start, key.stop)
+            newsource = Source()
+            newsource.lines = self.lines[key.start:key.stop]
+            return newsource
 
     def __len__(self):
         return len(self.lines)
 
-    def __getslice__(self, start, end):
-        newsource = Source()
-        newsource.lines = self.lines[start:end]
-        return newsource
-
     def strip(self):
         """ return new source object with trailing
             and leading blank lines removed.
         """
         start, end = 0, len(self)
         while start < end and not self.lines[start].strip():
             start += 1
         while end > start and not self.lines[end-1].strip():
@@ -188,24 +186,16 @@ class Source(object):
             newex.offset = ex.offset
             newex.lineno = ex.lineno
             newex.text = ex.text
             raise newex
         else:
             if flag & _AST_FLAG:
                 return co
             lines = [(x + "\n") for x in self.lines]
-            if sys.version_info[0] >= 3:
-                # XXX py3's inspect.getsourcefile() checks for a module
-                # and a pep302 __loader__ ... we don't have a module
-                # at code compile-time so we need to fake it here
-                m = ModuleType("_pycodecompile_pseudo_module")
-                py.std.inspect.modulesbyfile[filename] = None
-                py.std.sys.modules[None] = m
-                m.__loader__ = 1
             py.std.linecache.cache[filename] = (1, None, lines, filename)
             return co
 
 #
 # public API shortcut functions
 #
 
 def compile_(source, filename=None, mode='exec', flags=
@@ -261,39 +251,42 @@ def findsource(obj):
     except py.builtin._sysex:
         raise
     except:
         return None, -1
     source = Source()
     source.lines = [line.rstrip() for line in sourcelines]
     return source, lineno
 
+
 def getsource(obj, **kwargs):
     import _pytest._code
     obj = _pytest._code.getrawcode(obj)
     try:
         strsrc = inspect.getsource(obj)
     except IndentationError:
         strsrc = "\"Buggy python version consider upgrading, cannot get source\""
     assert isinstance(strsrc, str)
     return Source(strsrc, **kwargs)
 
+
 def deindent(lines, offset=None):
     if offset is None:
         for line in lines:
             line = line.expandtabs()
             s = line.lstrip()
             if s:
                 offset = len(line)-len(s)
                 break
         else:
             offset = 0
     if offset == 0:
         return list(lines)
     newlines = []
+
     def readline_generator(lines):
         for line in lines:
             yield line + '\n'
         while True:
             yield ''
 
     it = readline_generator(lines)
 
--- a/third_party/python/pytest/_pytest/_pluggy.py
+++ b/third_party/python/pytest/_pytest/_pluggy.py
@@ -1,11 +1,11 @@
 """
 imports symbols from vendored "pluggy" if available, otherwise
 falls back to importing "pluggy" from the default namespace.
 """
-
+from __future__ import absolute_import, division, print_function
 try:
     from _pytest.vendored_packages.pluggy import *  # noqa
     from _pytest.vendored_packages.pluggy import __version__  # noqa
 except ImportError:
     from pluggy import *  # noqa
     from pluggy import __version__  # noqa
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/_version.py
@@ -0,0 +1,4 @@
+# coding: utf-8
+# file generated by setuptools_scm
+# don't change, don't track in version control
+version = '3.1.3'
--- a/third_party/python/pytest/_pytest/assertion/__init__.py
+++ b/third_party/python/pytest/_pytest/assertion/__init__.py
@@ -1,176 +1,149 @@
 """
 support for presenting detailed information in failing assertions.
 """
+from __future__ import absolute_import, division, print_function
 import py
-import os
 import sys
-from _pytest.monkeypatch import monkeypatch
+
 from _pytest.assertion import util
+from _pytest.assertion import rewrite
+from _pytest.assertion import truncate
 
 
 def pytest_addoption(parser):
     group = parser.getgroup("debugconfig")
     group.addoption('--assert',
                     action="store",
                     dest="assertmode",
-                    choices=("rewrite", "reinterp", "plain",),
+                    choices=("rewrite", "plain",),
                     default="rewrite",
                     metavar="MODE",
-                    help="""control assertion debugging tools.  'plain'
-                            performs no assertion debugging.  'reinterp'
-                            reinterprets assert statements after they failed
-                            to provide assertion expression information.
-                            'rewrite' (the default) rewrites assert
-                            statements in test modules on import to
-                            provide assert expression information. """)
-    group.addoption('--no-assert',
-                    action="store_true",
-                    default=False,
-                    dest="noassert",
-                    help="DEPRECATED equivalent to --assert=plain")
-    group.addoption('--nomagic', '--no-magic',
-                    action="store_true",
-                    default=False,
-                    help="DEPRECATED equivalent to --assert=plain")
+                    help="""Control assertion debugging tools.  'plain'
+                            performs no assertion debugging.  'rewrite'
+                            (the default) rewrites assert statements in
+                            test modules on import to provide assert
+                            expression information.""")
+
+
+
+def register_assert_rewrite(*names):
+    """Register one or more module names to be rewritten on import.
+
+    This function will make sure that this module or all modules inside
+    the package will get their assert statements rewritten.
+    Thus you should make sure to call this before the module is
+    actually imported, usually in your __init__.py if you are a plugin
+    using a package.
+
+    :raise TypeError: if the given module names are not strings.
+    """
+    for name in names:
+        if not isinstance(name, str):
+            msg = 'expected module names as *args, got {0} instead'
+            raise TypeError(msg.format(repr(names)))
+    for hook in sys.meta_path:
+        if isinstance(hook, rewrite.AssertionRewritingHook):
+            importhook = hook
+            break
+    else:
+        importhook = DummyRewriteHook()
+    importhook.mark_rewrite(*names)
+
+
+class DummyRewriteHook(object):
+    """A no-op import hook for when rewriting is disabled."""
+
+    def mark_rewrite(self, *names):
+        pass
 
 
 class AssertionState:
     """State for the assertion plugin."""
 
     def __init__(self, config, mode):
         self.mode = mode
         self.trace = config.trace.root.get("assertion")
+        self.hook = None
 
 
-def pytest_configure(config):
-    mode = config.getvalue("assertmode")
-    if config.getvalue("noassert") or config.getvalue("nomagic"):
-        mode = "plain"
-    if mode == "rewrite":
-        try:
-            import ast  # noqa
-        except ImportError:
-            mode = "reinterp"
-        else:
-            # Both Jython and CPython 2.6.0 have AST bugs that make the
-            # assertion rewriting hook malfunction.
-            if (sys.platform.startswith('java') or
-                    sys.version_info[:3] == (2, 6, 0)):
-                mode = "reinterp"
-    if mode != "plain":
-        _load_modules(mode)
-        m = monkeypatch()
-        config._cleanup.append(m.undo)
-        m.setattr(py.builtin.builtins, 'AssertionError',
-                  reinterpret.AssertionError)  # noqa
-    hook = None
-    if mode == "rewrite":
-        hook = rewrite.AssertionRewritingHook()  # noqa
-        sys.meta_path.insert(0, hook)
-    warn_about_missing_assertion(mode)
-    config._assertstate = AssertionState(config, mode)
-    config._assertstate.hook = hook
-    config._assertstate.trace("configured with mode set to %r" % (mode,))
+def install_importhook(config):
+    """Try to install the rewrite hook, raise SystemError if it fails."""
+    # Both Jython and CPython 2.6.0 have AST bugs that make the
+    # assertion rewriting hook malfunction.
+    if (sys.platform.startswith('java') or
+            sys.version_info[:3] == (2, 6, 0)):
+        raise SystemError('rewrite not supported')
+
+    config._assertstate = AssertionState(config, 'rewrite')
+    config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
+    sys.meta_path.insert(0, hook)
+    config._assertstate.trace('installed rewrite import hook')
+
     def undo():
         hook = config._assertstate.hook
         if hook is not None and hook in sys.meta_path:
             sys.meta_path.remove(hook)
+
     config.add_cleanup(undo)
+    return hook
 
 
 def pytest_collection(session):
     # this hook is only called when test modules are collected
     # so for example not in the master process of pytest-xdist
     # (which does not collect test modules)
-    hook = session.config._assertstate.hook
-    if hook is not None:
-        hook.set_session(session)
-
-
-def _running_on_ci():
-    """Check if we're currently running on a CI system."""
-    env_vars = ['CI', 'BUILD_NUMBER']
-    return any(var in os.environ for var in env_vars)
+    assertstate = getattr(session.config, '_assertstate', None)
+    if assertstate:
+        if assertstate.hook is not None:
+            assertstate.hook.set_session(session)
 
 
 def pytest_runtest_setup(item):
     """Setup the pytest_assertrepr_compare hook
 
     The newinterpret and rewrite modules will use util._reprcompare if
     it exists to use custom reporting via the
     pytest_assertrepr_compare hook.  This sets up this custom
     comparison for the test.
     """
     def callbinrepr(op, left, right):
         """Call the pytest_assertrepr_compare hook and prepare the result
 
         This uses the first result from the hook and then ensures the
         following:
-        * Overly verbose explanations are dropped unless -vv was used or
-          running on a CI.
+        * Overly verbose explanations are truncated unless configured otherwise
+          (eg. if running in verbose mode).
         * Embedded newlines are escaped to help util.format_explanation()
           later.
         * If the rewrite mode is used embedded %-characters are replaced
           to protect later % formatting.
 
         The result can be formatted by util.format_explanation() for
         pretty printing.
         """
         hook_result = item.ihook.pytest_assertrepr_compare(
             config=item.config, op=op, left=left, right=right)
         for new_expl in hook_result:
             if new_expl:
-                if (sum(len(p) for p in new_expl[1:]) > 80*8 and
-                        item.config.option.verbose < 2 and
-                        not _running_on_ci()):
-                    show_max = 10
-                    truncated_lines = len(new_expl) - show_max
-                    new_expl[show_max:] = [py.builtin._totext(
-                        'Detailed information truncated (%d more lines)'
-                        ', use "-vv" to show' % truncated_lines)]
+                new_expl = truncate.truncate_if_required(new_expl, item)
                 new_expl = [line.replace("\n", "\\n") for line in new_expl]
                 res = py.builtin._totext("\n~").join(new_expl)
                 if item.config.getvalue("assertmode") == "rewrite":
                     res = res.replace("%", "%%")
                 return res
     util._reprcompare = callbinrepr
 
 
 def pytest_runtest_teardown(item):
     util._reprcompare = None
 
 
 def pytest_sessionfinish(session):
-    hook = session.config._assertstate.hook
-    if hook is not None:
-        hook.session = None
-
-
-def _load_modules(mode):
-    """Lazily import assertion related code."""
-    global rewrite, reinterpret
-    from _pytest.assertion import reinterpret  # noqa
-    if mode == "rewrite":
-        from _pytest.assertion import rewrite  # noqa
-
-
-def warn_about_missing_assertion(mode):
-    try:
-        assert False
-    except AssertionError:
-        pass
-    else:
-        if mode == "rewrite":
-            specifically = ("assertions which are not in test modules "
-                            "will be ignored")
-        else:
-            specifically = "failing tests may report as passing"
-
-        sys.stderr.write("WARNING: " + specifically +
-                         " because assert statements are not executed "
-                         "by the underlying Python interpreter "
-                         "(are you using python -O?)\n")
+    assertstate = getattr(session.config, '_assertstate', None)
+    if assertstate:
+        if assertstate.hook is not None:
+            assertstate.hook.set_session(None)
 
 
 # Expose this plugin's implementation for the pytest_assertrepr_compare hook
 pytest_assertrepr_compare = util.assertrepr_compare
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/assertion/reinterpret.py
+++ /dev/null
@@ -1,407 +0,0 @@
-"""
-Find intermediate evalutation results in assert statements through builtin AST.
-"""
-import ast
-import sys
-
-import _pytest._code
-import py
-from _pytest.assertion import util
-u = py.builtin._totext
-
-
-class AssertionError(util.BuiltinAssertionError):
-    def __init__(self, *args):
-        util.BuiltinAssertionError.__init__(self, *args)
-        if args:
-            # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
-            # on Python2.7 and above we always get len(args) == 1
-            # with args[0] being the (x,y) tuple.
-            if len(args) > 1:
-                toprint = args
-            else:
-                toprint = args[0]
-            try:
-                self.msg = u(toprint)
-            except Exception:
-                self.msg = u(
-                    "<[broken __repr__] %s at %0xd>"
-                    % (toprint.__class__, id(toprint)))
-        else:
-            f = _pytest._code.Frame(sys._getframe(1))
-            try:
-                source = f.code.fullsource
-                if source is not None:
-                    try:
-                        source = source.getstatement(f.lineno, assertion=True)
-                    except IndexError:
-                        source = None
-                    else:
-                        source = str(source.deindent()).strip()
-            except py.error.ENOENT:
-                source = None
-                # this can also occur during reinterpretation, when the
-                # co_filename is set to "<run>".
-            if source:
-                self.msg = reinterpret(source, f, should_fail=True)
-            else:
-                self.msg = "<could not determine information>"
-            if not self.args:
-                self.args = (self.msg,)
-
-if sys.version_info > (3, 0):
-    AssertionError.__module__ = "builtins"
-
-if sys.platform.startswith("java"):
-    # See http://bugs.jython.org/issue1497
-    _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
-              "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
-              "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
-              "List", "Tuple")
-    _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
-              "AugAssign", "Print", "For", "While", "If", "With", "Raise",
-              "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
-              "Exec", "Global", "Expr", "Pass", "Break", "Continue")
-    _expr_nodes = set(getattr(ast, name) for name in _exprs)
-    _stmt_nodes = set(getattr(ast, name) for name in _stmts)
-    def _is_ast_expr(node):
-        return node.__class__ in _expr_nodes
-    def _is_ast_stmt(node):
-        return node.__class__ in _stmt_nodes
-else:
-    def _is_ast_expr(node):
-        return isinstance(node, ast.expr)
-    def _is_ast_stmt(node):
-        return isinstance(node, ast.stmt)
-
-try:
-    _Starred = ast.Starred
-except AttributeError:
-    # Python 2. Define a dummy class so isinstance() will always be False.
-    class _Starred(object): pass
-
-
-class Failure(Exception):
-    """Error found while interpreting AST."""
-
-    def __init__(self, explanation=""):
-        self.cause = sys.exc_info()
-        self.explanation = explanation
-
-
-def reinterpret(source, frame, should_fail=False):
-    mod = ast.parse(source)
-    visitor = DebugInterpreter(frame)
-    try:
-        visitor.visit(mod)
-    except Failure:
-        failure = sys.exc_info()[1]
-        return getfailure(failure)
-    if should_fail:
-        return ("(assertion failed, but when it was re-run for "
-                "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --assert=plain)")
-
-def run(offending_line, frame=None):
-    if frame is None:
-        frame = _pytest._code.Frame(sys._getframe(1))
-    return reinterpret(offending_line, frame)
-
-def getfailure(e):
-    explanation = util.format_explanation(e.explanation)
-    value = e.cause[1]
-    if str(value):
-        lines = explanation.split('\n')
-        lines[0] += "  << %s" % (value,)
-        explanation = '\n'.join(lines)
-    text = "%s: %s" % (e.cause[0].__name__, explanation)
-    if text.startswith('AssertionError: assert '):
-        text = text[16:]
-    return text
-
-operator_map = {
-    ast.BitOr : "|",
-    ast.BitXor : "^",
-    ast.BitAnd : "&",
-    ast.LShift : "<<",
-    ast.RShift : ">>",
-    ast.Add : "+",
-    ast.Sub : "-",
-    ast.Mult : "*",
-    ast.Div : "/",
-    ast.FloorDiv : "//",
-    ast.Mod : "%",
-    ast.Eq : "==",
-    ast.NotEq : "!=",
-    ast.Lt : "<",
-    ast.LtE : "<=",
-    ast.Gt : ">",
-    ast.GtE : ">=",
-    ast.Pow : "**",
-    ast.Is : "is",
-    ast.IsNot : "is not",
-    ast.In : "in",
-    ast.NotIn : "not in"
-}
-
-unary_map = {
-    ast.Not : "not %s",
-    ast.Invert : "~%s",
-    ast.USub : "-%s",
-    ast.UAdd : "+%s"
-}
-
-
-class DebugInterpreter(ast.NodeVisitor):
-    """Interpret AST nodes to gleam useful debugging information. """
-
-    def __init__(self, frame):
-        self.frame = frame
-
-    def generic_visit(self, node):
-        # Fallback when we don't have a special implementation.
-        if _is_ast_expr(node):
-            mod = ast.Expression(node)
-            co = self._compile(mod)
-            try:
-                result = self.frame.eval(co)
-            except Exception:
-                raise Failure()
-            explanation = self.frame.repr(result)
-            return explanation, result
-        elif _is_ast_stmt(node):
-            mod = ast.Module([node])
-            co = self._compile(mod, "exec")
-            try:
-                self.frame.exec_(co)
-            except Exception:
-                raise Failure()
-            return None, None
-        else:
-            raise AssertionError("can't handle %s" %(node,))
-
-    def _compile(self, source, mode="eval"):
-        return compile(source, "<assertion interpretation>", mode)
-
-    def visit_Expr(self, expr):
-        return self.visit(expr.value)
-
-    def visit_Module(self, mod):
-        for stmt in mod.body:
-            self.visit(stmt)
-
-    def visit_Name(self, name):
-        explanation, result = self.generic_visit(name)
-        # See if the name is local.
-        source = "%r in locals() is not globals()" % (name.id,)
-        co = self._compile(source)
-        try:
-            local = self.frame.eval(co)
-        except Exception:
-            # have to assume it isn't
-            local = None
-        if local is None or not self.frame.is_true(local):
-            return name.id, result
-        return explanation, result
-
-    def visit_Compare(self, comp):
-        left = comp.left
-        left_explanation, left_result = self.visit(left)
-        for op, next_op in zip(comp.ops, comp.comparators):
-            next_explanation, next_result = self.visit(next_op)
-            op_symbol = operator_map[op.__class__]
-            explanation = "%s %s %s" % (left_explanation, op_symbol,
-                                        next_explanation)
-            source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
-            co = self._compile(source)
-            try:
-                result = self.frame.eval(co, __exprinfo_left=left_result,
-                                         __exprinfo_right=next_result)
-            except Exception:
-                raise Failure(explanation)
-            try:
-                if not self.frame.is_true(result):
-                    break
-            except KeyboardInterrupt:
-                raise
-            except:
-                break
-            left_explanation, left_result = next_explanation, next_result
-
-        if util._reprcompare is not None:
-            res = util._reprcompare(op_symbol, left_result, next_result)
-            if res:
-                explanation = res
-        return explanation, result
-
-    def visit_BoolOp(self, boolop):
-        is_or = isinstance(boolop.op, ast.Or)
-        explanations = []
-        for operand in boolop.values:
-            explanation, result = self.visit(operand)
-            explanations.append(explanation)
-            if result == is_or:
-                break
-        name = is_or and " or " or " and "
-        explanation = "(" + name.join(explanations) + ")"
-        return explanation, result
-
-    def visit_UnaryOp(self, unary):
-        pattern = unary_map[unary.op.__class__]
-        operand_explanation, operand_result = self.visit(unary.operand)
-        explanation = pattern % (operand_explanation,)
-        co = self._compile(pattern % ("__exprinfo_expr",))
-        try:
-            result = self.frame.eval(co, __exprinfo_expr=operand_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, result
-
-    def visit_BinOp(self, binop):
-        left_explanation, left_result = self.visit(binop.left)
-        right_explanation, right_result = self.visit(binop.right)
-        symbol = operator_map[binop.op.__class__]
-        explanation = "(%s %s %s)" % (left_explanation, symbol,
-                                      right_explanation)
-        source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, __exprinfo_left=left_result,
-                                     __exprinfo_right=right_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, result
-
-    def visit_Call(self, call):
-        func_explanation, func = self.visit(call.func)
-        arg_explanations = []
-        ns = {"__exprinfo_func" : func}
-        arguments = []
-        for arg in call.args:
-            arg_explanation, arg_result = self.visit(arg)
-            if isinstance(arg, _Starred):
-                arg_name = "__exprinfo_star"
-                ns[arg_name] = arg_result
-                arguments.append("*%s" % (arg_name,))
-                arg_explanations.append("*%s" % (arg_explanation,))
-            else:
-                arg_name = "__exprinfo_%s" % (len(ns),)
-                ns[arg_name] = arg_result
-                arguments.append(arg_name)
-                arg_explanations.append(arg_explanation)
-        for keyword in call.keywords:
-            arg_explanation, arg_result = self.visit(keyword.value)
-            if keyword.arg:
-                arg_name = "__exprinfo_%s" % (len(ns),)
-                keyword_source = "%s=%%s" % (keyword.arg)
-                arguments.append(keyword_source % (arg_name,))
-                arg_explanations.append(keyword_source % (arg_explanation,))
-            else:
-                arg_name = "__exprinfo_kwds"
-                arguments.append("**%s" % (arg_name,))
-                arg_explanations.append("**%s" % (arg_explanation,))
-
-            ns[arg_name] = arg_result
-
-        if getattr(call, 'starargs', None):
-            arg_explanation, arg_result = self.visit(call.starargs)
-            arg_name = "__exprinfo_star"
-            ns[arg_name] = arg_result
-            arguments.append("*%s" % (arg_name,))
-            arg_explanations.append("*%s" % (arg_explanation,))
-
-        if getattr(call, 'kwargs', None):
-            arg_explanation, arg_result = self.visit(call.kwargs)
-            arg_name = "__exprinfo_kwds"
-            ns[arg_name] = arg_result
-            arguments.append("**%s" % (arg_name,))
-            arg_explanations.append("**%s" % (arg_explanation,))
-        args_explained = ", ".join(arg_explanations)
-        explanation = "%s(%s)" % (func_explanation, args_explained)
-        args = ", ".join(arguments)
-        source = "__exprinfo_func(%s)" % (args,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, **ns)
-        except Exception:
-            raise Failure(explanation)
-        pattern = "%s\n{%s = %s\n}"
-        rep = self.frame.repr(result)
-        explanation = pattern % (rep, rep, explanation)
-        return explanation, result
-
-    def _is_builtin_name(self, name):
-        pattern = "%r not in globals() and %r not in locals()"
-        source = pattern % (name.id, name.id)
-        co = self._compile(source)
-        try:
-            return self.frame.eval(co)
-        except Exception:
-            return False
-
-    def visit_Attribute(self, attr):
-        if not isinstance(attr.ctx, ast.Load):
-            return self.generic_visit(attr)
-        source_explanation, source_result = self.visit(attr.value)
-        explanation = "%s.%s" % (source_explanation, attr.attr)
-        source = "__exprinfo_expr.%s" % (attr.attr,)
-        co = self._compile(source)
-        try:
-            try:
-                result = self.frame.eval(co, __exprinfo_expr=source_result)
-            except AttributeError:
-                # Maybe the attribute name needs to be mangled?
-                if not attr.attr.startswith("__") or attr.attr.endswith("__"):
-                    raise
-                source = "getattr(__exprinfo_expr.__class__, '__name__', '')"
-                co = self._compile(source)
-                class_name = self.frame.eval(co, __exprinfo_expr=source_result)
-                mangled_attr = "_" + class_name +  attr.attr
-                source = "__exprinfo_expr.%s" % (mangled_attr,)
-                co = self._compile(source)
-                result = self.frame.eval(co, __exprinfo_expr=source_result)
-        except Exception:
-            raise Failure(explanation)
-        explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
-                                              self.frame.repr(result),
-                                              source_explanation, attr.attr)
-        # Check if the attr is from an instance.
-        source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
-        source = source % (attr.attr,)
-        co = self._compile(source)
-        try:
-            from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
-        except Exception:
-            from_instance = None
-        if from_instance is None or self.frame.is_true(from_instance):
-            rep = self.frame.repr(result)
-            pattern = "%s\n{%s = %s\n}"
-            explanation = pattern % (rep, rep, explanation)
-        return explanation, result
-
-    def visit_Assert(self, assrt):
-        test_explanation, test_result = self.visit(assrt.test)
-        explanation = "assert %s" % (test_explanation,)
-        if not self.frame.is_true(test_result):
-            try:
-                raise util.BuiltinAssertionError
-            except Exception:
-                raise Failure(explanation)
-        return explanation, test_result
-
-    def visit_Assign(self, assign):
-        value_explanation, value_result = self.visit(assign.value)
-        explanation = "... = %s" % (value_explanation,)
-        name = ast.Name("__exprinfo_expr", ast.Load(),
-                        lineno=assign.value.lineno,
-                        col_offset=assign.value.col_offset)
-        new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
-                                col_offset=assign.col_offset)
-        mod = ast.Module([new_assign])
-        co = self._compile(mod, "exec")
-        try:
-            self.frame.exec_(co, __exprinfo_expr=value_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, value_result
-
--- a/third_party/python/pytest/_pytest/assertion/rewrite.py
+++ b/third_party/python/pytest/_pytest/assertion/rewrite.py
@@ -1,11 +1,12 @@
 """Rewrite assertion AST to produce nice error messages"""
-
+from __future__ import absolute_import, division, print_function
 import ast
+import _ast
 import errno
 import itertools
 import imp
 import marshal
 import os
 import re
 import struct
 import sys
@@ -39,30 +40,30 @@ if sys.version_info >= (3,5):
     ast_Call = ast.Call
 else:
     ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
 
 
 class AssertionRewritingHook(object):
     """PEP302 Import hook which rewrites asserts."""
 
-    def __init__(self):
+    def __init__(self, config):
+        self.config = config
+        self.fnpats = config.getini("python_files")
         self.session = None
         self.modules = {}
+        self._rewritten_names = set()
         self._register_with_pkg_resources()
+        self._must_rewrite = set()
 
     def set_session(self, session):
-        self.fnpats = session.config.getini("python_files")
         self.session = session
 
     def find_module(self, name, path=None):
-        if self.session is None:
-            return None
-        sess = self.session
-        state = sess.config._assertstate
+        state = self.config._assertstate
         state.trace("find_module called for: %s" % name)
         names = name.rsplit(".", 1)
         lastname = names[-1]
         pth = None
         if path is not None:
             # Starting with Python 3.3, path is a _NamespacePath(), which
             # causes problems if not converted to list.
             path = list(path)
@@ -73,42 +74,36 @@ class AssertionRewritingHook(object):
                 fd, fn, desc = imp.find_module(lastname, path)
             except ImportError:
                 return None
             if fd is not None:
                 fd.close()
             tp = desc[2]
             if tp == imp.PY_COMPILED:
                 if hasattr(imp, "source_from_cache"):
-                    fn = imp.source_from_cache(fn)
+                    try:
+                        fn = imp.source_from_cache(fn)
+                    except ValueError:
+                        # Python 3 doesn't like orphaned but still-importable
+                        # .pyc files.
+                        fn = fn[:-1]
                 else:
                     fn = fn[:-1]
             elif tp != imp.PY_SOURCE:
                 # Don't know what this is.
                 return None
         else:
             fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
+
         fn_pypath = py.path.local(fn)
-        # Is this a test file?
-        if not sess.isinitpath(fn):
-            # We have to be very careful here because imports in this code can
-            # trigger a cycle.
-            self.session = None
-            try:
-                for pat in self.fnpats:
-                    if fn_pypath.fnmatch(pat):
-                        state.trace("matched test file %r" % (fn,))
-                        break
-                else:
-                    return None
-            finally:
-                self.session = sess
-        else:
-            state.trace("matched test file (was specified on cmdline): %r" %
-                        (fn,))
+        if not self._should_rewrite(name, fn_pypath, state):
+            return None
+
+        self._rewritten_names.add(name)
+
         # The requested module looks like a test file, so rewrite it. This is
         # the most magical part of the process: load the source, rewrite the
         # asserts, and load the rewritten source. We also cache the rewritten
         # module code in a special pyc. We must be aware of the possibility of
         # concurrent pytest processes rewriting and loading pycs. To avoid
         # tricky race conditions, we maintain the following invariant: The
         # cached pyc is always a complete, valid pyc. Operations on it must be
         # atomic. POSIX's atomic rename comes in handy.
@@ -135,27 +130,72 @@ class AssertionRewritingHook(object):
                     raise
         cache_name = fn_pypath.basename[:-3] + PYC_TAIL
         pyc = os.path.join(cache_dir, cache_name)
         # Notice that even if we're in a read-only directory, I'm going
         # to check for a cached pyc. This may not be optimal...
         co = _read_pyc(fn_pypath, pyc, state.trace)
         if co is None:
             state.trace("rewriting %r" % (fn,))
-            source_stat, co = _rewrite_test(state, fn_pypath)
+            source_stat, co = _rewrite_test(self.config, fn_pypath)
             if co is None:
                 # Probably a SyntaxError in the test.
                 return None
             if write:
                 _make_rewritten_pyc(state, source_stat, pyc, co)
         else:
             state.trace("found cached rewritten pyc for %r" % (fn,))
         self.modules[name] = co, pyc
         return self
 
+    def _should_rewrite(self, name, fn_pypath, state):
+        # always rewrite conftest files
+        fn = str(fn_pypath)
+        if fn_pypath.basename == 'conftest.py':
+            state.trace("rewriting conftest file: %r" % (fn,))
+            return True
+
+        if self.session is not None:
+            if self.session.isinitpath(fn):
+                state.trace("matched test file (was specified on cmdline): %r" %
+                            (fn,))
+                return True
+
+        # modules not passed explicitly on the command line are only
+        # rewritten if they match the naming convention for test files
+        for pat in self.fnpats:
+            if fn_pypath.fnmatch(pat):
+                state.trace("matched test file %r" % (fn,))
+                return True
+
+        for marked in self._must_rewrite:
+            if name.startswith(marked):
+                state.trace("matched marked file %r (from %r)" % (name, marked))
+                return True
+
+        return False
+
+    def mark_rewrite(self, *names):
+        """Mark import names as needing to be re-written.
+
+        The named module or package as well as any nested modules will
+        be re-written on import.
+        """
+        already_imported = set(names).intersection(set(sys.modules))
+        if already_imported:
+            for name in already_imported:
+                if name not in self._rewritten_names:
+                    self._warn_already_imported(name)
+        self._must_rewrite.update(names)
+
+    def _warn_already_imported(self, name):
+        self.config.warn(
+            'P1',
+            'Module already imported so can not be re-written: %s' % name)
+
     def load_module(self, name):
         # If there is an existing module object named 'fullname' in
         # sys.modules, the loader must use that existing module. (Otherwise,
         # the reload() builtin will not work correctly.)
         if name in sys.modules:
             return sys.modules[name]
 
         co, pyc = self.modules.pop(name)
@@ -165,17 +205,18 @@ class AssertionRewritingHook(object):
         mod = sys.modules[name] = imp.new_module(name)
         try:
             mod.__file__ = co.co_filename
             # Normally, this attribute is 3.2+.
             mod.__cached__ = pyc
             mod.__loader__ = self
             py.builtin.exec_(co, mod.__dict__)
         except:
-            del sys.modules[name]
+            if name in sys.modules:
+                del sys.modules[name]
             raise
         return sys.modules[name]
 
 
 
     def is_package(self, name):
         try:
             fd, fn, desc = imp.find_module(name)
@@ -230,24 +271,26 @@ def _write_pyc(state, co, source_stat, p
         mtime = int(source_stat.mtime)
         size = source_stat.size & 0xFFFFFFFF
         fp.write(struct.pack("<ll", mtime, size))
         marshal.dump(co, fp)
     finally:
         fp.close()
     return True
 
+
 RN = "\r\n".encode("utf-8")
 N = "\n".encode("utf-8")
 
 cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
 BOM_UTF8 = '\xef\xbb\xbf'
 
-def _rewrite_test(state, fn):
+def _rewrite_test(config, fn):
     """Try to read and rewrite *fn* and return the code object."""
+    state = config._assertstate
     try:
         stat = fn.stat()
         source = fn.read("rb")
     except EnvironmentError:
         return None, None
     if ASCII_IS_DEFAULT_ENCODING:
         # ASCII is the default encoding in Python 2. Without a coding
         # declaration, Python 2 will complain about any bytes in the file
@@ -282,19 +325,19 @@ def _rewrite_test(state, fn):
     if REWRITE_NEWLINES:
         source = source.replace(RN, N) + N
     try:
         tree = ast.parse(source)
     except SyntaxError:
         # Let this pop up again in the real import.
         state.trace("failed to parse: %r" % (fn,))
         return None, None
-    rewrite_asserts(tree)
+    rewrite_asserts(tree, fn, config)
     try:
-        co = compile(tree, fn.strpath, "exec")
+        co = compile(tree, fn.strpath, "exec", dont_inherit=True)
     except SyntaxError:
         # It's possible that this error is from some bug in the
         # assertion rewriting, but I don't know of a fast way to tell.
         state.trace("failed to compile: %r" % (fn,))
         return None, None
     return stat, co
 
 def _make_rewritten_pyc(state, source_stat, pyc, co):
@@ -338,19 +381,19 @@ def _read_pyc(source, pyc, trace=lambda 
             trace('_read_pyc(%s): marshal.load error %s' % (source, e))
             return None
         if not isinstance(co, types.CodeType):
             trace('_read_pyc(%s): not a code object' % source)
             return None
         return co
 
 
-def rewrite_asserts(mod):
+def rewrite_asserts(mod, module_path=None, config=None):
     """Rewrite the assert statements in mod."""
-    AssertionRewriter().run(mod)
+    AssertionRewriter(module_path, config).run(mod)
 
 
 def _saferepr(obj):
     """Get a safe repr of an object for assertion error messages.
 
     The assertion formatting (util.format_explanation()) requires
     newlines to be escaped since they are a special character for it.
     Normally assertion.util.format_explanation() does this but for a
@@ -527,16 +570,21 @@ class AssertionRewriter(ast.NodeVisitor)
        .push_format_context() and .pop_format_context() which allows
        to build another %-formatted string while already building one.
 
     This state is reset on every new assert statement visited and used
     by the other visitors.
 
     """
 
+    def __init__(self, module_path, config):
+        super(AssertionRewriter, self).__init__()
+        self.module_path = module_path
+        self.config = config
+
     def run(self, mod):
         """Find all assert statements in *mod* and rewrite them."""
         if not mod.body:
             # Nothing to do.
             return
         # Insert some special imports at the top of the module but after any
         # docstrings and __future__ imports.
         aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
@@ -667,16 +715,20 @@ class AssertionRewriter(ast.NodeVisitor)
         """Return the AST statements to replace the ast.Assert instance.
 
         This re-writes the test of an assertion to provide
         intermediate values and replace it with an if statement which
         raises an assertion error with a detailed explanation in case
         the expression is false.
 
         """
+        if isinstance(assert_.test, ast.Tuple) and self.config is not None:
+            fslocation = (self.module_path, assert_.lineno)
+            self.config.warn('R1', 'assertion is always true, perhaps '
+                              'remove parentheses?', fslocation=fslocation)
         self.statements = []
         self.variables = []
         self.variable_counter = itertools.count()
         self.stack = []
         self.on_failure = []
         self.push_format_context()
         # Rewrite assert into a bunch of statements.
         top_condition, explanation = self.visit(assert_.test)
@@ -850,25 +902,29 @@ class AssertionRewriter(ast.NodeVisitor)
         res_expl = self.explanation_param(self.display(res))
         pat = "%s\n{%s = %s.%s\n}"
         expl = pat % (res_expl, res_expl, value_expl, attr.attr)
         return res, expl
 
     def visit_Compare(self, comp):
         self.push_format_context()
         left_res, left_expl = self.visit(comp.left)
+        if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
+            left_expl = "({0})".format(left_expl)
         res_variables = [self.variable() for i in range(len(comp.ops))]
         load_names = [ast.Name(v, ast.Load()) for v in res_variables]
         store_names = [ast.Name(v, ast.Store()) for v in res_variables]
         it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
         expls = []
         syms = []
         results = [left_res]
         for i, op, next_operand in it:
             next_res, next_expl = self.visit(next_operand)
+            if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
+                next_expl = "({0})".format(next_expl)
             results.append(next_res)
             sym = binop_map[op.__class__]
             syms.append(ast.Str(sym))
             expl = "%s %s %s" % (left_expl, sym, next_expl)
             expls.append(ast.Str(expl))
             res_expr = ast.Compare(left_res, [op], [next_res])
             self.statements.append(ast.Assign([store_names[i]], res_expr))
             left_res, left_expl = next_res, next_expl
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/assertion/truncate.py
@@ -0,0 +1,102 @@
+"""
+Utilities for truncating assertion output.
+
+Current default behaviour is to truncate assertion explanations at
+~8 terminal lines, unless running in "-vv" mode or running on CI.
+"""
+from __future__ import absolute_import, division, print_function
+import os
+
+import py
+
+
+DEFAULT_MAX_LINES = 8
+DEFAULT_MAX_CHARS = 8 * 80
+USAGE_MSG = "use '-vv' to show"
+
+
+def truncate_if_required(explanation, item, max_length=None):
+    """
+    Truncate this assertion explanation if the given test item is eligible.
+    """
+    if _should_truncate_item(item):
+        return _truncate_explanation(explanation)
+    return explanation
+
+
+def _should_truncate_item(item):
+    """
+    Whether or not this test item is eligible for truncation.
+    """
+    verbose = item.config.option.verbose
+    return verbose < 2 and not _running_on_ci()
+
+
+def _running_on_ci():
+    """Check if we're currently running on a CI system."""
+    env_vars = ['CI', 'BUILD_NUMBER']
+    return any(var in os.environ for var in env_vars)
+
+
+def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
+    """
+    Truncate given list of strings that makes up the assertion explanation.
+
+    Truncates to either 8 lines, or 640 characters - whichever the input reaches
+    first. The remaining lines will be replaced by a usage message.
+    """
+
+    if max_lines is None:
+        max_lines = DEFAULT_MAX_LINES
+    if max_chars is None:
+        max_chars = DEFAULT_MAX_CHARS
+
+    # Check if truncation required
+    input_char_count = len("".join(input_lines))
+    if len(input_lines) <= max_lines and input_char_count <= max_chars:
+        return input_lines
+
+    # Truncate first to max_lines, and then truncate to max_chars if max_chars
+    # is exceeded.
+    truncated_explanation = input_lines[:max_lines]
+    truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars)
+
+    # Add ellipsis to final line
+    truncated_explanation[-1] = truncated_explanation[-1] + "..."
+
+    # Append useful message to explanation
+    truncated_line_count = len(input_lines) - len(truncated_explanation)
+    truncated_line_count += 1  # Account for the part-truncated final line
+    msg = '...Full output truncated'
+    if truncated_line_count == 1:
+        msg += ' ({0} line hidden)'.format(truncated_line_count)
+    else:
+        msg += ' ({0} lines hidden)'.format(truncated_line_count)
+    msg += ", {0}" .format(USAGE_MSG)
+    truncated_explanation.extend([
+        py.builtin._totext(""),
+        py.builtin._totext(msg),
+    ])
+    return truncated_explanation
+
+
+def _truncate_by_char_count(input_lines, max_chars):
+    # Check if truncation required
+    if len("".join(input_lines)) <= max_chars:
+        return input_lines
+
+    # Find point at which input length exceeds total allowed length
+    iterated_char_count = 0
+    for iterated_index, input_line in enumerate(input_lines):
+        if iterated_char_count + len(input_line) > max_chars:
+            break
+        iterated_char_count += len(input_line)
+
+    # Create truncated explanation with modified final line
+    truncated_result = input_lines[:iterated_index]
+    final_line = input_lines[iterated_index]
+    if final_line:
+        final_line_truncate_point = max_chars - iterated_char_count
+        final_line = final_line[:final_line_truncate_point]
+    truncated_result.append(final_line)
+    return truncated_result
--- a/third_party/python/pytest/_pytest/assertion/util.py
+++ b/third_party/python/pytest/_pytest/assertion/util.py
@@ -1,19 +1,20 @@
 """Utilities for assertion debugging"""
+from __future__ import absolute_import, division, print_function
 import pprint
 
 import _pytest._code
 import py
 try:
     from collections import Sequence
 except ImportError:
     Sequence = list
 
-BuiltinAssertionError = py.builtin.builtins.AssertionError
+
 u = py.builtin._totext
 
 # The _reprcompare attribute on the util module is used by the new assertion
 # interpretation code and assertion rewriter to detect this plugin was
 # loaded and in turn call the hooks defined here as part of the
 # DebugInterpreter.
 _reprcompare = None
 
@@ -33,54 +34,21 @@ def format_explanation(explanation):
     Normally all embedded newlines are escaped, however there are
     three exceptions: \n{, \n} and \n~.  The first two are intended
     cover nested explanations, see function and attribute explanations
     for examples (.visit_Call(), visit_Attribute()).  The last one is
     for when one explanation needs to span multiple lines, e.g. when
     displaying diffs.
     """
     explanation = ecu(explanation)
-    explanation = _collapse_false(explanation)
     lines = _split_explanation(explanation)
     result = _format_lines(lines)
     return u('\n').join(result)
 
 
-def _collapse_false(explanation):
-    """Collapse expansions of False
-
-    So this strips out any "assert False\n{where False = ...\n}"
-    blocks.
-    """
-    where = 0
-    while True:
-        start = where = explanation.find("False\n{False = ", where)
-        if where == -1:
-            break
-        level = 0
-        prev_c = explanation[start]
-        for i, c in enumerate(explanation[start:]):
-            if prev_c + c == "\n{":
-                level += 1
-            elif prev_c + c == "\n}":
-                level -= 1
-                if not level:
-                    break
-            prev_c = c
-        else:
-            raise AssertionError("unbalanced braces: %r" % (explanation,))
-        end = start + i
-        where = end
-        if explanation[end - 1] == '\n':
-            explanation = (explanation[:start] + explanation[start+15:end-1] +
-                           explanation[end+1:])
-            where -= 17
-    return explanation
-
-
 def _split_explanation(explanation):
     """Return a list of individual lines in the explanation
 
     This will return a list of lines split on '\n{', '\n}' and '\n~'.
     Any other newlines will be escaped and appear in the line as the
     literal '\n' characters.
     """
     raw_lines = (explanation or u('')).split('\n')
@@ -133,17 +101,17 @@ try:
     basestring = basestring
 except NameError:
     basestring = str
 
 
 def assertrepr_compare(config, op, left, right):
     """Return specialised explanations for some operators/operands"""
     width = 80 - 15 - len(op) - 2  # 15 chars indentation, 1 space around op
-    left_repr = py.io.saferepr(left, maxsize=int(width/2))
+    left_repr = py.io.saferepr(left, maxsize=int(width//2))
     right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
 
     summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
 
     issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
                             not isinstance(x, basestring))
     istext = lambda x: isinstance(x, basestring)
     isdict = lambda x: isinstance(x, dict)
@@ -220,19 +188,20 @@ def _diff_text(left, right, verbose=Fals
                 if left[-i] != right[-i]:
                     break
             if i > 42:
                 i -= 10     # Provide some context
                 explanation += [u('Skipping %s identical trailing '
                                   'characters in diff, use -v to show') % i]
                 left = left[:-i]
                 right = right[:-i]
+    keepends = True
     explanation += [line.strip('\n')
-                    for line in ndiff(left.splitlines(),
-                                      right.splitlines())]
+                    for line in ndiff(left.splitlines(keepends),
+                                      right.splitlines(keepends))]
     return explanation
 
 
 def _compare_eq_iterable(left, right, verbose=False):
     if not verbose:
         return [u('Use -v to get the full diff')]
     # dynamic import to speedup pytest
     import difflib
@@ -283,18 +252,18 @@ def _compare_eq_set(left, right, verbose
             explanation.append(py.io.saferepr(item))
     return explanation
 
 
 def _compare_eq_dict(left, right, verbose=False):
     explanation = []
     common = set(left).intersection(set(right))
     same = dict((k, left[k]) for k in common if left[k] == right[k])
-    if same and not verbose:
-        explanation += [u('Omitting %s identical items, use -v to show') %
+    if same and verbose < 2:
+        explanation += [u('Omitting %s identical items, use -vv to show') %
                         len(same)]
     elif same:
         explanation += [u('Common items:')]
         explanation += pprint.pformat(same).splitlines()
     diff = set(k for k in common if left[k] != right[k])
     if diff:
         explanation += [u('Differing items:')]
         for k in diff:
old mode 100755
new mode 100644
--- a/third_party/python/pytest/_pytest/cacheprovider.py
+++ b/third_party/python/pytest/_pytest/cacheprovider.py
@@ -1,15 +1,15 @@
 """
 merged implementation of the cache provider
 
-the name cache was not choosen to ensure pluggy automatically
+the name cache was not chosen to ensure pluggy automatically
 ignores the external pytest-cache
 """
-
+from __future__ import absolute_import, division, print_function
 import py
 import pytest
 import json
 from os.path import sep as _sep, altsep as _altsep
 
 
 class Cache(object):
     def __init__(self, config):
@@ -134,21 +134,21 @@ class LFPlugin:
                 if item.nodeid in self.lastfailed:
                     previously_failed.append(item)
                 else:
                     previously_passed.append(item)
             if not previously_failed and previously_passed:
                 # running a subset of all tests with recorded failures outside
                 # of the set of tests currently executing
                 pass
-            elif self.config.getvalue("failedfirst"):
-                items[:] = previously_failed + previously_passed
-            else:
+            elif self.config.getvalue("lf"):
                 items[:] = previously_failed
                 config.hook.pytest_deselected(items=previously_passed)
+            else:
+                items[:] = previously_failed + previously_passed
 
     def pytest_sessionfinish(self, session):
         config = self.config
         if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
             return
         prev_failed = config.cache.get("cache/lastfailed", None) is not None
         if (session.testscollected and prev_failed) or self.lastfailed:
             config.cache.set("cache/lastfailed", self.lastfailed)
@@ -214,32 +214,32 @@ def cacheshow(config, session):
     tw.line("cachedir: " + str(config.cache._cachedir))
     if not config.cache._cachedir.check():
         tw.line("cache is empty")
         return 0
     dummy = object()
     basedir = config.cache._cachedir
     vdir = basedir.join("v")
     tw.sep("-", "cache values")
-    for valpath in vdir.visit(lambda x: x.isfile()):
+    for valpath in sorted(vdir.visit(lambda x: x.isfile())):
         key = valpath.relto(vdir).replace(valpath.sep, "/")
         val = config.cache.get(key, dummy)
         if val is dummy:
             tw.line("%s contains unreadable content, "
                   "will be ignored" % key)
         else:
             tw.line("%s contains:" % key)
             stream = py.io.TextIO()
             pprint(val, stream=stream)
             for line in stream.getvalue().splitlines():
                 tw.line("  " + line)
 
     ddir = basedir.join("d")
     if ddir.isdir() and ddir.listdir():
         tw.sep("-", "cache directories")
-        for p in basedir.join("d").visit():
+        for p in sorted(basedir.join("d").visit()):
             #if p.check(dir=1):
             #    print("%s/" % p.relto(basedir))
             if p.isfile():
                 key = p.relto(basedir)
                 tw.line("%s is a file of length %d" % (
                         key, p.size()))
     return 0
--- a/third_party/python/pytest/_pytest/capture.py
+++ b/third_party/python/pytest/_pytest/capture.py
@@ -1,22 +1,25 @@
 """
 per-test stdout/stderr capturing mechanism.
 
 """
-from __future__ import with_statement
+from __future__ import absolute_import, division, print_function
 
+import contextlib
 import sys
 import os
+import io
+from io import UnsupportedOperation
 from tempfile import TemporaryFile
 
 import py
 import pytest
+from _pytest.compat import CaptureIO
 
-from py.io import TextIO
 unicode = py.builtin.text
 
 patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
 
 
 def pytest_addoption(parser):
     group = parser.getgroup("general")
     group._addoption(
@@ -26,18 +29,20 @@ def pytest_addoption(parser):
         help="per-test capturing method: one of fd|sys|no.")
     group._addoption(
         '-s', action="store_const", const="no", dest="capture",
         help="shortcut for --capture=no.")
 
 
 @pytest.hookimpl(hookwrapper=True)
 def pytest_load_initial_conftests(early_config, parser, args):
+    ns = early_config.known_args_namespace
+    if ns.capture == "fd":
+        _py36_windowsconsoleio_workaround()
     _readline_workaround()
-    ns = early_config.known_args_namespace
     pluginmanager = early_config.pluginmanager
     capman = CaptureManager(ns.capture)
     pluginmanager.register(capman, "capturemanager")
 
     # make sure that capturemanager is properly reset at final shutdown
     early_config.add_cleanup(capman.reset_capturings)
 
     # make sure logging does not raise exceptions at the end
@@ -141,70 +146,81 @@ class CaptureManager:
     @pytest.hookimpl(tryfirst=True)
     def pytest_keyboard_interrupt(self, excinfo):
         self.reset_capturings()
 
     @pytest.hookimpl(tryfirst=True)
     def pytest_internalerror(self, excinfo):
         self.reset_capturings()
 
-    def suspendcapture_item(self, item, when):
-        out, err = self.suspendcapture()
+    def suspendcapture_item(self, item, when, in_=False):
+        out, err = self.suspendcapture(in_=in_)
         item.add_report_section(when, "stdout", out)
         item.add_report_section(when, "stderr", err)
 
+
 error_capsysfderror = "cannot use capsys and capfd at the same time"
 
 
 @pytest.fixture
 def capsys(request):
-    """enables capturing of writes to sys.stdout/sys.stderr and makes
+    """Enable capturing of writes to sys.stdout/sys.stderr and make
     captured output available via ``capsys.readouterr()`` method calls
     which return a ``(out, err)`` tuple.
     """
-    if "capfd" in request._funcargs:
+    if "capfd" in request.fixturenames:
         raise request.raiseerror(error_capsysfderror)
-    request.node._capfuncarg = c = CaptureFixture(SysCapture)
+    request.node._capfuncarg = c = CaptureFixture(SysCapture, request)
     return c
 
 @pytest.fixture
 def capfd(request):
-    """enables capturing of writes to file descriptors 1 and 2 and makes
+    """Enable capturing of writes to file descriptors 1 and 2 and make
     captured output available via ``capfd.readouterr()`` method calls
     which return a ``(out, err)`` tuple.
     """
-    if "capsys" in request._funcargs:
+    if "capsys" in request.fixturenames:
         request.raiseerror(error_capsysfderror)
     if not hasattr(os, 'dup'):
         pytest.skip("capfd funcarg needs os.dup")
-    request.node._capfuncarg = c = CaptureFixture(FDCapture)
+    request.node._capfuncarg = c = CaptureFixture(FDCapture, request)
     return c
 
 
 class CaptureFixture:
-    def __init__(self, captureclass):
+    def __init__(self, captureclass, request):
         self.captureclass = captureclass
+        self.request = request
 
     def _start(self):
         self._capture = MultiCapture(out=True, err=True, in_=False,
-                                       Capture=self.captureclass)
+                                     Capture=self.captureclass)
         self._capture.start_capturing()
 
     def close(self):
         cap = self.__dict__.pop("_capture", None)
         if cap is not None:
             self._outerr = cap.pop_outerr_to_orig()
             cap.stop_capturing()
 
     def readouterr(self):
         try:
             return self._capture.readouterr()
         except AttributeError:
             return self._outerr
 
+    @contextlib.contextmanager
+    def disabled(self):
+        capmanager = self.request.config.pluginmanager.getplugin('capturemanager')
+        capmanager.suspendcapture_item(self.request.node, "call", in_=True)
+        try:
+            yield
+        finally:
+            capmanager.resumecapture()
+
 
 def safe_text_dupfile(f, mode, default_encoding="UTF8"):
     """ return a open text file object that's a duplicate of f on the
         FD-level if possible.
     """
     encoding = getattr(f, "encoding", None)
     try:
         fd = f.fileno()
@@ -385,17 +401,17 @@ class SysCapture:
     def __init__(self, fd, tmpfile=None):
         name = patchsysdict[fd]
         self._old = getattr(sys, name)
         self.name = name
         if tmpfile is None:
             if name == "stdin":
                 tmpfile = DontReadFromInput()
             else:
-                tmpfile = TextIO()
+                tmpfile = CaptureIO()
         self.tmpfile = tmpfile
 
     def start(self):
         setattr(sys, self.name, self.tmpfile)
 
     def snap(self):
         f = self.tmpfile
         res = f.getvalue()
@@ -431,33 +447,41 @@ class DontReadFromInput:
 
     def read(self, *args):
         raise IOError("reading from stdin while output is captured")
     readline = read
     readlines = read
     __iter__ = read
 
     def fileno(self):
-        raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+        raise UnsupportedOperation("redirected stdin is pseudofile, "
+                                   "has no fileno()")
 
     def isatty(self):
         return False
 
     def close(self):
         pass
 
+    @property
+    def buffer(self):
+        if sys.version_info >= (3,0):
+            return self
+        else:
+            raise AttributeError('redirected stdin has no attribute buffer')
+
 
 def _readline_workaround():
     """
     Ensure readline is imported so that it attaches to the correct stdio
     handles on Windows.
 
     Pdb uses readline support where available--when not running from the Python
     prompt, the readline module is not imported until running the pdb REPL.  If
-    running py.test with the --pdb option this means the readline module is not
+    running pytest with the --pdb option this means the readline module is not
     imported until after I/O capture has been started.
 
     This is a problem for pyreadline, which is often used to implement readline
     support on Windows, as it does not attach to the correct handles for stdout
     and/or stdin if they have been redirected by the FDCapture mechanism.  This
     workaround ensures that readline is imported before I/O capture is setup so
     that it can attach to the actual stdin/out for the console.
 
@@ -465,8 +489,54 @@ def _readline_workaround():
     """
 
     if not sys.platform.startswith('win32'):
         return
     try:
         import readline  # noqa
     except ImportError:
         pass
+
+
+def _py36_windowsconsoleio_workaround():
+    """
+    Python 3.6 implemented unicode console handling for Windows. This works
+    by reading/writing to the raw console handle using
+    ``{Read,Write}ConsoleW``.
+
+    The problem is that we are going to ``dup2`` over the stdio file
+    descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
+    handles used by Python to write to the console. Though there is still some
+    weirdness and the console handle seems to only be closed randomly and not
+    on the first call to ``CloseHandle``, or maybe it gets reopened with the
+    same handle value when we suspend capturing.
+
+    The workaround in this case will reopen stdio with a different fd which
+    also means a different handle by replicating the logic in
+    "Py_lifecycle.c:initstdio/create_stdio".
+
+    See https://github.com/pytest-dev/py/issues/103
+    """
+    if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6):
+        return
+
+    buffered = hasattr(sys.stdout.buffer, 'raw')
+    raw_stdout = sys.stdout.buffer.raw if buffered else sys.stdout.buffer
+
+    if not isinstance(raw_stdout, io._WindowsConsoleIO):
+        return
+
+    def _reopen_stdio(f, mode):
+        if not buffered and mode[0] == 'w':
+            buffering = 0
+        else:
+            buffering = -1
+
+        return io.TextIOWrapper(
+            open(os.dup(f.fileno()), mode, buffering),
+            f.encoding,
+            f.errors,
+            f.newlines,
+            f.line_buffering)
+
+    sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb')
+    sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb')
+    sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb')
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/compat.py
@@ -0,0 +1,307 @@
+"""
+python version compatibility code
+"""
+from __future__ import absolute_import, division, print_function
+import sys
+import inspect
+import types
+import re
+import functools
+
+import py
+
+import  _pytest
+
+
+
+try:
+    import enum
+except ImportError:  # pragma: no cover
+    # Only available in Python 3.4+ or as a backport
+    enum = None
+
+
+_PY3 = sys.version_info > (3, 0)
+_PY2 = not _PY3
+
+
+NoneType = type(None)
+NOTSET = object()
+
+PY35 = sys.version_info[:2] >= (3, 5)
+PY36 = sys.version_info[:2] >= (3, 6)
+MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError'
+
+if hasattr(inspect, 'signature'):
+    def _format_args(func):
+        return str(inspect.signature(func))
+else:
+    def _format_args(func):
+        return inspect.formatargspec(*inspect.getargspec(func))
+
+isfunction = inspect.isfunction
+isclass = inspect.isclass
+# used to work around a python2 exception info leak
+exc_clear = getattr(sys, 'exc_clear', lambda: None)
+# The type of re.compile objects is not exposed in Python.
+REGEX_TYPE = type(re.compile(''))
+
+
+def is_generator(func):
+    genfunc = inspect.isgeneratorfunction(func)
+    return genfunc and not iscoroutinefunction(func)
+
+
+def iscoroutinefunction(func):
+    """Return True if func is a decorated coroutine function.
+
+    Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,
+    which in turns also initializes the "logging" module as side-effect (see issue #8).
+    """
+    return (getattr(func, '_is_coroutine', False) or
+           (hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func)))
+
+
+def getlocation(function, curdir):
+    import inspect
+    fn = py.path.local(inspect.getfile(function))
+    lineno = py.builtin._getcode(function).co_firstlineno
+    if fn.relto(curdir):
+        fn = fn.relto(curdir)
+    return "%s:%d" %(fn, lineno+1)
+
+
+def num_mock_patch_args(function):
+    """ return number of arguments used up by mock arguments (if any) """
+    patchings = getattr(function, "patchings", None)
+    if not patchings:
+        return 0
+    mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
+    if mock is not None:
+        return len([p for p in patchings
+                        if not p.attribute_name and p.new is mock.DEFAULT])
+    return len(patchings)
+
+
+def getfuncargnames(function, startindex=None):
+    # XXX merge with main.py's varnames
+    #assert not isclass(function)
+    realfunction = function
+    while hasattr(realfunction, "__wrapped__"):
+        realfunction = realfunction.__wrapped__
+    if startindex is None:
+        startindex = inspect.ismethod(function) and 1 or 0
+    if realfunction != function:
+        startindex += num_mock_patch_args(function)
+        function = realfunction
+    if isinstance(function, functools.partial):
+        argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
+        partial = function
+        argnames = argnames[len(partial.args):]
+        if partial.keywords:
+            for kw in partial.keywords:
+                argnames.remove(kw)
+    else:
+        argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
+    defaults = getattr(function, 'func_defaults',
+                       getattr(function, '__defaults__', None)) or ()
+    numdefaults = len(defaults)
+    if numdefaults:
+        return tuple(argnames[startindex:-numdefaults])
+    return tuple(argnames[startindex:])
+
+
+
+if  sys.version_info[:2] == (2, 6):
+    def isclass(object):
+        """ Return true if the object is a class. Overrides inspect.isclass for
+        python 2.6 because it will return True for objects which always return
+        something on __getattr__ calls (see #1035).
+        Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
+        """
+        return isinstance(object, (type, types.ClassType))
+
+
+if _PY3:
+    import codecs
+    imap = map
+    STRING_TYPES = bytes, str
+    UNICODE_TYPES = str,
+
+    def _escape_strings(val):
+        """If val is pure ascii, returns it as a str().  Otherwise, escapes
+        bytes objects into a sequence of escaped bytes:
+
+        b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
+
+        and escapes unicode objects into a sequence of escaped unicode
+        ids, e.g.:
+
+        '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
+
+        note:
+           the obvious "v.decode('unicode-escape')" will return
+           valid utf-8 unicode if it finds them in bytes, but we
+           want to return escaped bytes for any byte, even if they match
+           a utf-8 string.
+
+        """
+        if isinstance(val, bytes):
+            if val:
+                # source: http://goo.gl/bGsnwC
+                encoded_bytes, _ = codecs.escape_encode(val)
+                return encoded_bytes.decode('ascii')
+            else:
+                # empty bytes crashes codecs.escape_encode (#1087)
+                return ''
+        else:
+            return val.encode('unicode_escape').decode('ascii')
+else:
+    STRING_TYPES = bytes, str, unicode
+    UNICODE_TYPES = unicode,
+
+    from itertools import imap  # NOQA
+
+    def _escape_strings(val):
+        """In py2 bytes and str are the same type, so return if it's a bytes
+        object, return it unchanged if it is a full ascii string,
+        otherwise escape it into its binary form.
+
+        If it's a unicode string, change the unicode characters into
+        unicode escapes.
+
+        """
+        if isinstance(val, bytes):
+            try:
+                return val.encode('ascii')
+            except UnicodeDecodeError:
+                return val.encode('string-escape')
+        else:
+            return val.encode('unicode-escape')
+
+
+def get_real_func(obj):
+    """ gets the real function object of the (possibly) wrapped object by
+    functools.wraps or functools.partial.
+    """
+    start_obj = obj
+    for i in range(100):
+        new_obj = getattr(obj, '__wrapped__', None)
+        if new_obj is None:
+            break
+        obj = new_obj
+    else:
+        raise ValueError(
+            ("could not find real function of {start}"
+             "\nstopped at {current}").format(
+                start=py.io.saferepr(start_obj),
+                current=py.io.saferepr(obj)))
+    if isinstance(obj, functools.partial):
+        obj = obj.func
+    return obj
+
+
+def getfslineno(obj):
+    # xxx let decorators etc specify a sane ordering
+    obj = get_real_func(obj)
+    if hasattr(obj, 'place_as'):
+        obj = obj.place_as
+    fslineno = _pytest._code.getfslineno(obj)
+    assert isinstance(fslineno[1], int), obj
+    return fslineno
+
+
+def getimfunc(func):
+    try:
+        return func.__func__
+    except AttributeError:
+        try:
+            return func.im_func
+        except AttributeError:
+            return func
+
+
+def safe_getattr(object, name, default):
+    """ Like getattr but return default upon any Exception.
+
+    Attribute access can potentially fail for 'evil' Python objects.
+    See issue #214.
+    """
+    try:
+        return getattr(object, name, default)
+    except Exception:
+        return default
+
+
+def _is_unittest_unexpected_success_a_failure():
+    """Return if the test suite should fail if a @expectedFailure unittest test PASSES.
+
+    From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
+        Changed in version 3.4: Returns False if there were any
+        unexpectedSuccesses from tests marked with the expectedFailure() decorator.
+    """
+    return sys.version_info >= (3, 4)
+
+
+if _PY3:
+    def safe_str(v):
+        """returns v as string"""
+        return str(v)
+else:
+    def safe_str(v):
+        """returns v as string, converting to ascii if necessary"""
+        try:
+            return str(v)
+        except UnicodeError:
+            if not isinstance(v, unicode):
+                v = unicode(v)
+            errors = 'replace'
+            return v.encode('utf-8', errors)
+
+
+COLLECT_FAKEMODULE_ATTRIBUTES = (
+    'Collector',
+    'Module',
+    'Generator',
+    'Function',
+    'Instance',
+    'Session',
+    'Item',
+    'Class',
+    'File',
+    '_fillfuncargs',
+)
+
+
+def _setup_collect_fakemodule():
+    from types import ModuleType
+    import pytest
+    pytest.collect = ModuleType('pytest.collect')
+    pytest.collect.__all__ = []  # used for setns
+    for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
+        setattr(pytest.collect, attr, getattr(pytest, attr))
+
+
+if _PY2:
+    from py.io import TextIO as CaptureIO
+else:
+    import io
+
+    class CaptureIO(io.TextIOWrapper):
+        def __init__(self):
+            super(CaptureIO, self).__init__(
+                io.BytesIO(),
+                encoding='UTF-8', newline='', write_through=True,
+            )
+
+        def getvalue(self):
+            return self.buffer.getvalue().decode('UTF-8')
+
+class FuncargnamesCompatAttr(object):
+    """ helper class so that Metafunc, Function and FixtureRequest
+    don't need to each define the "funcargnames" compatibility attribute.
+    """
+    @property
+    def funcargnames(self):
+        """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
+        return self.fixturenames
--- a/third_party/python/pytest/_pytest/config.py
+++ b/third_party/python/pytest/_pytest/config.py
@@ -1,35 +1,45 @@
 """ command line options, ini-file and conftest.py processing. """
+from __future__ import absolute_import, division, print_function
 import argparse
 import shlex
 import traceback
 import types
 import warnings
 
 import py
 # DON't import pytest here because it causes import cycle troubles
-import sys, os
+import sys
+import os
 import _pytest._code
 import _pytest.hookspec  # the extension point definitions
+import _pytest.assertion
 from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
+from _pytest.compat import safe_str
 
 hookimpl = HookimplMarker("pytest")
 hookspec = HookspecMarker("pytest")
 
 # pytest startup
 #
 
 
 class ConftestImportFailure(Exception):
     def __init__(self, path, excinfo):
         Exception.__init__(self, path, excinfo)
         self.path = path
         self.excinfo = excinfo
 
+    def __str__(self):
+        etype, evalue, etb = self.excinfo
+        formatted = traceback.format_tb(etb)
+        # The level of the tracebacks we want to print is hand crafted :(
+        return repr(evalue) + '\n' + ''.join(formatted[2:])
+
 
 def main(args=None, plugins=None):
     """ return exit code, after performing an in-process test run.
 
     :arg args: list of command line arguments.
 
     :arg plugins: list of plugin objects to be auto-registered during
                   initialization.
@@ -40,37 +50,68 @@ def main(args=None, plugins=None):
         except ConftestImportFailure as e:
             tw = py.io.TerminalWriter(sys.stderr)
             for line in traceback.format_exception(*e.excinfo):
                 tw.line(line.rstrip(), red=True)
             tw.line("ERROR: could not load %s\n" % (e.path), red=True)
             return 4
         else:
             try:
-                config.pluginmanager.check_pending()
                 return config.hook.pytest_cmdline_main(config=config)
             finally:
                 config._ensure_unconfigure()
     except UsageError as e:
         for msg in e.args:
             sys.stderr.write("ERROR: %s\n" %(msg,))
         return 4
 
 class cmdline:  # compatibility namespace
     main = staticmethod(main)
 
+
 class UsageError(Exception):
     """ error in pytest usage or invocation"""
 
+
+class PrintHelp(Exception):
+    """Raised when pytest should print it's help to skip the rest of the
+    argument parsing and validation."""
+    pass
+
+
+def filename_arg(path, optname):
+    """ Argparse type validator for filename arguments.
+
+    :path: path of filename
+    :optname: name of the option
+    """
+    if os.path.isdir(path):
+        raise UsageError("{0} must be a filename, given: {1}".format(optname, path))
+    return path
+
+
+def directory_arg(path, optname):
+    """Argparse type validator for directory arguments.
+
+    :path: path of directory
+    :optname: name of the option
+    """
+    if not os.path.isdir(path):
+        raise UsageError("{0} must be a directory, given: {1}".format(optname, path))
+    return path
+
+
 _preinit = []
 
 default_plugins = (
-     "mark main terminal runner python pdb unittest capture skipping "
-     "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
-     "junitxml resultlog doctest cacheprovider").split()
+     "mark main terminal runner python fixtures debugging unittest capture skipping "
+     "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
+     "junitxml resultlog doctest cacheprovider freeze_support "
+     "setuponly setupplan warnings").split()
+
 
 builtin_plugins = set(default_plugins)
 builtin_plugins.add("pytester")
 
 
 def _preloadplugins():
     assert not _preinit
     _preinit.append(get_config())
@@ -92,76 +133,85 @@ def get_plugin_manager():
     already loaded.
 
     This function can be used by integration with other tools, like hooking
     into pytest to run tests into an IDE.
     """
     return get_config().pluginmanager
 
 def _prepareconfig(args=None, plugins=None):
+    warning = None
     if args is None:
         args = sys.argv[1:]
     elif isinstance(args, py.path.local):
         args = [str(args)]
     elif not isinstance(args, (tuple, list)):
         if not isinstance(args, str):
             raise ValueError("not a string or argument list: %r" % (args,))
         args = shlex.split(args, posix=sys.platform != "win32")
+        from _pytest import deprecated
+        warning = deprecated.MAIN_STR_ARGS
     config = get_config()
     pluginmanager = config.pluginmanager
     try:
         if plugins:
             for plugin in plugins:
                 if isinstance(plugin, py.builtin._basestring):
                     pluginmanager.consider_pluginarg(plugin)
                 else:
                     pluginmanager.register(plugin)
+        if warning:
+            config.warn('C1', warning)
         return pluginmanager.hook.pytest_cmdline_parse(
                 pluginmanager=pluginmanager, args=args)
     except BaseException:
         config._ensure_unconfigure()
         raise
 
 
 class PytestPluginManager(PluginManager):
     """
-    Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific
+    Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific
     functionality:
 
     * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
       ``pytest_plugins`` global variables found in plugins being loaded;
     * ``conftest.py`` loading during start-up;
     """
     def __init__(self):
         super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
         self._conftest_plugins = set()
 
         # state related to local conftest plugins
         self._path2confmods = {}
         self._conftestpath2mod = {}
         self._confcutdir = None
         self._noconftest = False
+        self._duplicatepaths = set()
 
         self.add_hookspecs(_pytest.hookspec)
         self.register(self)
         if os.environ.get('PYTEST_DEBUG'):
             err = sys.stderr
             encoding = getattr(err, 'encoding', 'utf8')
             try:
                 err = py.io.dupfile(err, encoding=encoding)
             except Exception:
                 pass
             self.trace.root.setwriter(err.write)
             self.enable_tracing()
 
+        # Config._consider_importhook will set a real object if required.
+        self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
+
     def addhooks(self, module_or_class):
         """
         .. deprecated:: 2.8
 
-        Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead.
+        Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>` instead.
         """
         warning = dict(code="I2",
                        fslocation=_pytest._code.getfslineno(sys._getframe(1)),
                        nodeid=None,
                        message="use pluginmanager.add_hookspecs instead of "
                                "deprecated addhooks() method.")
         self._warn(warning)
         return self.add_hookspecs(module_or_class)
@@ -204,16 +254,19 @@ class PytestPluginManager(PluginManager)
                                    "argument" % (hook.name))
             self._warn(warning)
 
     def register(self, plugin, name=None):
         ret = super(PytestPluginManager, self).register(plugin, name)
         if ret:
             self.hook.pytest_plugin_registered.call_historic(
                       kwargs=dict(plugin=plugin, manager=self))
+
+            if isinstance(plugin, types.ModuleType):
+                self.consider_module(plugin)
         return ret
 
     def getplugin(self, name):
         # support deprecated naming because plugins (xdist e.g.) use it
         return self.get_plugin(name)
 
     def hasplugin(self, name):
         """Return True if the plugin with the given name is registered."""
@@ -348,62 +401,78 @@ class PytestPluginManager(PluginManager)
             name = arg[3:]
             self.set_blocked(name)
             if not name.startswith("pytest_"):
                 self.set_blocked("pytest_" + name)
         else:
             self.import_plugin(arg)
 
     def consider_conftest(self, conftestmodule):
-        if self.register(conftestmodule, name=conftestmodule.__file__):
-            self.consider_module(conftestmodule)
+        self.register(conftestmodule, name=conftestmodule.__file__)
 
     def consider_env(self):
         self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
 
     def consider_module(self, mod):
-        self._import_plugin_specs(getattr(mod, "pytest_plugins", None))
+        self._import_plugin_specs(getattr(mod, 'pytest_plugins', []))
 
     def _import_plugin_specs(self, spec):
-        if spec:
-            if isinstance(spec, str):
-                spec = spec.split(",")
-            for import_spec in spec:
-                self.import_plugin(import_spec)
+        plugins = _get_plugin_specs_as_list(spec)
+        for import_spec in plugins:
+            self.import_plugin(import_spec)
 
     def import_plugin(self, modname):
         # most often modname refers to builtin modules, e.g. "pytester",
         # "terminal" or "capture".  Those plugins are registered under their
         # basename for historic purposes but must be imported with the
         # _pytest prefix.
-        assert isinstance(modname, str)
+        assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname
+        modname = str(modname)
         if self.get_plugin(modname) is not None:
             return
         if modname in builtin_plugins:
             importspec = "_pytest." + modname
         else:
             importspec = modname
+        self.rewrite_hook.mark_rewrite(importspec)
         try:
             __import__(importspec)
         except ImportError as e:
-            new_exc = ImportError('Error importing plugin "%s": %s' % (modname, e))
+            new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0])))
             # copy over name and path attributes
             for attr in ('name', 'path'):
                 if hasattr(e, attr):
                     setattr(new_exc, attr, getattr(e, attr))
             raise new_exc
         except Exception as e:
             import pytest
             if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
                 raise
             self._warn("skipped plugin %r: %s" %((modname, e.msg)))
         else:
             mod = sys.modules[importspec]
             self.register(mod, modname)
-            self.consider_module(mod)
+
+
+def _get_plugin_specs_as_list(specs):
+    """
+    Parses a list of "plugin specs" and returns a list of plugin names.
+
+    Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in
+    which case it is returned as a list. Specs can also be `None` in which case an
+    empty list is returned.
+    """
+    if specs is not None:
+        if isinstance(specs, str):
+            specs = specs.split(',') if specs else []
+        if not isinstance(specs, (list, tuple)):
+            raise UsageError("Plugin specs must be a ','-separated string or a "
+                             "list/tuple of strings for plugin names. Given: %r" % specs)
+        return list(specs)
+    return []
 
 
 class Parser:
     """ Parser for command line arguments and ini-file values.
 
     :ivar extra_info: dict of generic param -> value to display in case
         there's an error processing the command line arguments.
     """
@@ -532,68 +601,66 @@ class ArgumentError(Exception):
     def __str__(self):
         if self.option_id:
             return "option %s: %s" % (self.option_id, self.msg)
         else:
             return self.msg
 
 
 class Argument:
-    """class that mimics the necessary behaviour of optparse.Option """
+    """class that mimics the necessary behaviour of optparse.Option
+
+    its currently a least effort implementation
+    and ignoring choices and integer prefixes
+    https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
+    """
     _typ_map = {
         'int': int,
         'string': str,
-        }
-    # enable after some grace period for plugin writers
-    TYPE_WARN = False
+        'float': float,
+        'complex': complex,
+    }
 
     def __init__(self, *names, **attrs):
         """store parms in private vars for use in add_argument"""
         self._attrs = attrs
         self._short_opts = []
         self._long_opts = []
         self.dest = attrs.get('dest')
-        if self.TYPE_WARN:
-            try:
-                help = attrs['help']
-                if '%default' in help:
-                    warnings.warn(
-                        'pytest now uses argparse. "%default" should be'
-                        ' changed to "%(default)s" ',
-                        FutureWarning,
-                        stacklevel=3)
-            except KeyError:
-                pass
+        if '%default' in (attrs.get('help') or ''):
+            warnings.warn(
+                'pytest now uses argparse. "%default" should be'
+                ' changed to "%(default)s" ',
+                DeprecationWarning,
+                stacklevel=3)
         try:
             typ = attrs['type']
         except KeyError:
             pass
         else:
             # this might raise a keyerror as well, don't want to catch that
             if isinstance(typ, py.builtin._basestring):
                 if typ == 'choice':
-                    if self.TYPE_WARN:
-                        warnings.warn(
-                            'type argument to addoption() is a string %r.'
-                            ' For parsearg this is optional and when supplied '
-                            ' should be a type.'
-                            ' (options: %s)' % (typ, names),
-                            FutureWarning,
-                            stacklevel=3)
+                    warnings.warn(
+                        'type argument to addoption() is a string %r.'
+                        ' For parsearg this is optional and when supplied'
+                        ' should be a type.'
+                        ' (options: %s)' % (typ, names),
+                        DeprecationWarning,
+                        stacklevel=3)
                     # argparse expects a type here take it from
                     # the type of the first element
                     attrs['type'] = type(attrs['choices'][0])
                 else:
-                    if self.TYPE_WARN:
-                        warnings.warn(
-                            'type argument to addoption() is a string %r.'
-                            ' For parsearg this should be a type.'
-                            ' (options: %s)' % (typ, names),
-                            FutureWarning,
-                            stacklevel=3)
+                    warnings.warn(
+                        'type argument to addoption() is a string %r.'
+                        ' For parsearg this should be a type.'
+                        ' (options: %s)' % (typ, names),
+                        DeprecationWarning,
+                        stacklevel=3)
                     attrs['type'] = Argument._typ_map[typ]
                 # used in test_parseopt -> test_parse_defaultgetter
                 self.type = attrs['type']
             else:
                 self.type = typ
         try:
             # attribute existence is tested in Config._processopt
             self.default = attrs['default']
@@ -650,30 +717,27 @@ class Argument:
                 if not (opt[0:2] == "--" and opt[2] != "-"):
                     raise ArgumentError(
                         "invalid long option string %r: "
                         "must start with --, followed by non-dash" % opt,
                         self)
                 self._long_opts.append(opt)
 
     def __repr__(self):
-        retval = 'Argument('
+        args = []
         if self._short_opts:
-            retval += '_short_opts: ' + repr(self._short_opts) + ', '
+            args += ['_short_opts: ' + repr(self._short_opts)]
         if self._long_opts:
-            retval += '_long_opts: ' + repr(self._long_opts) + ', '
-        retval += 'dest: ' + repr(self.dest) + ', '
+            args += ['_long_opts: ' + repr(self._long_opts)]
+        args += ['dest: ' + repr(self.dest)]
         if hasattr(self, 'type'):
-            retval += 'type: ' + repr(self.type) + ', '
+            args += ['type: ' + repr(self.type)]
         if hasattr(self, 'default'):
-            retval += 'default: ' + repr(self.default) + ', '
-        if retval[-2:] == ', ':  # always long enough to test ("Argument(" )
-            retval = retval[:-2]
-        retval += ')'
-        return retval
+            args += ['default: ' + repr(self.default)]
+        return 'Argument({0})'.format(', '.join(args))
 
 
 class OptionGroup:
     def __init__(self, name, description="", parser=None):
         self.name = name
         self.description = description
         self.options = []
         self.parser = parser
@@ -681,16 +745,20 @@ class OptionGroup:
     def addoption(self, *optnames, **attrs):
         """ add an option to this group.
 
         if a shortened version of a long option is specified it will
         be suppressed in the help. addoption('--twowords', '--two-words')
         results in help showing '--two-words' only, but --twowords gets
         accepted **and** the automatic destination is in args.twowords
         """
+        conflict = set(optnames).intersection(
+            name for opt in self.options for name in opt.names())
+        if conflict:
+            raise ValueError("option names %s already added" % conflict)
         option = Argument(*optnames, **attrs)
         self._addoption_instance(option, shortupper=False)
 
     def _addoption(self, *optnames, **attrs):
         option = Argument(*optnames, **attrs)
         self._addoption_instance(option, shortupper=True)
 
     def _addoption_instance(self, option, shortupper=False):
@@ -767,17 +835,17 @@ class DropShorterLongHelpFormatter(argpa
                    len(short_long[shortened]) < len(xxoption):
                     short_long[shortened] = xxoption
         # now short_long has been filled out to the longest with dashes
         # **and** we keep the right option ordering from add_argument
         for option in options: #
             if len(option) == 2 or option[2] == ' ':
                 return_list.append(option)
             if option[2:] == short_long.get(option.replace('-', '')):
-                return_list.append(option.replace(' ', '='))
+                return_list.append(option.replace(' ', '=', 1))
         action._formatted_action_invocation = ', '.join(return_list)
         return action._formatted_action_invocation
 
 
 
 def _ensure_removed_sysmodule(modname):
     try:
         del sys.modules[modname]
@@ -792,19 +860,21 @@ class CmdOptions(object):
         return "<CmdOptions %r>" %(self.__dict__,)
     def copy(self):
         return CmdOptions(self.__dict__)
 
 class Notset:
     def __repr__(self):
         return "<NOTSET>"
 
+
 notset = Notset()
 FILE_OR_DIR = 'file_or_dir'
 
+
 class Config(object):
     """ access to configuration values, pluginmanager and plugin hooks.  """
 
     def __init__(self, pluginmanager):
         #: access to command line option as attributes.
         #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
         self.option = CmdOptions()
         _a = FILE_OR_DIR
@@ -812,24 +882,27 @@ class Config(object):
             usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
             processopt=self._processopt,
         )
         #: a pluginmanager instance
         self.pluginmanager = pluginmanager
         self.trace = self.pluginmanager.trace.root.get("config")
         self.hook = self.pluginmanager.hook
         self._inicache = {}
+        self._override_ini = ()
         self._opt2dest = {}
         self._cleanup = []
         self._warn = self.pluginmanager._warn
         self.pluginmanager.register(self, "pytestconfig")
         self._configured = False
+
         def do_setns(dic):
             import pytest
             setns(pytest, dic)
+
         self.hook.pytest_namespace.call_historic(do_setns, {})
         self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
 
     def add_cleanup(self, func):
         """ Add a function to be called when the config object gets out of
         use (usually coninciding with pytest_unconfigure)."""
         self._cleanup.append(func)
 
@@ -842,21 +915,21 @@ class Config(object):
         if self._configured:
             self._configured = False
             self.hook.pytest_unconfigure(config=self)
             self.hook.pytest_configure._call_history = []
         while self._cleanup:
             fin = self._cleanup.pop()
             fin()
 
-    def warn(self, code, message, fslocation=None):
+    def warn(self, code, message, fslocation=None, nodeid=None):
         """ generate a warning for this test session. """
         self.hook.pytest_logwarning.call_historic(kwargs=dict(
             code=code, message=message,
-            fslocation=fslocation, nodeid=None))
+            fslocation=fslocation, nodeid=nodeid))
 
     def get_terminal_writer(self):
         return self.pluginmanager.get_plugin("terminalreporter")._tw
 
     def pytest_cmdline_parse(self, pluginmanager, args):
         # REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
         self.parse(args)
         return self
@@ -903,37 +976,104 @@ class Config(object):
                 setattr(self.option, opt.dest, opt.default)
 
     @hookimpl(trylast=True)
     def pytest_load_initial_conftests(self, early_config):
         self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
 
     def _initini(self, args):
         ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
-        r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args)
+        r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn)
         self.rootdir, self.inifile, self.inicfg = r
         self._parser.extra_info['rootdir'] = self.rootdir
         self._parser.extra_info['inifile'] = self.inifile
         self.invocation_dir = py.path.local()
         self._parser.addini('addopts', 'extra command line options', 'args')
         self._parser.addini('minversion', 'minimally required pytest version')
+        self._override_ini = ns.override_ini or ()
+
+    def _consider_importhook(self, args):
+        """Install the PEP 302 import hook if using assertion re-writing.
+
+        Needs to parse the --assert=<mode> option from the commandline
+        and find all the installed plugins to mark them for re-writing
+        by the importhook.
+        """
+        ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
+        mode = ns.assertmode
+        if mode == 'rewrite':
+            try:
+                hook = _pytest.assertion.install_importhook(self)
+            except SystemError:
+                mode = 'plain'
+            else:
+                self._mark_plugins_for_rewrite(hook)
+        self._warn_about_missing_assertion(mode)
+
+    def _mark_plugins_for_rewrite(self, hook):
+        """
+        Given an importhook, mark for rewrite any top-level
+        modules or packages in the distribution package for
+        all pytest plugins.
+        """
+        import pkg_resources
+        self.pluginmanager.rewrite_hook = hook
+
+        # 'RECORD' available for plugins installed normally (pip install)
+        # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)
+        # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa
+        # so it shouldn't be an issue
+        metadata_files = 'RECORD', 'SOURCES.txt'
+
+        package_files = (
+            entry.split(',')[0]
+            for entrypoint in pkg_resources.iter_entry_points('pytest11')
+            for metadata in metadata_files
+            for entry in entrypoint.dist._get_metadata(metadata)
+        )
+
+        for fn in package_files:
+            is_simple_module = os.sep not in fn and fn.endswith('.py')
+            is_package = fn.count(os.sep) == 1 and fn.endswith('__init__.py')
+            if is_simple_module:
+                module_name, ext = os.path.splitext(fn)
+                hook.mark_rewrite(module_name)
+            elif is_package:
+                package_name = os.path.dirname(fn)
+                hook.mark_rewrite(package_name)
+
+    def _warn_about_missing_assertion(self, mode):
+        try:
+            assert False
+        except AssertionError:
+            pass
+        else:
+            if mode == 'plain':
+                sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED"
+                                 " and FAILING TESTS WILL PASS.  Are you"
+                                 " using python -O?")
+            else:
+                sys.stderr.write("WARNING: assertions not in test modules or"
+                                 " plugins will be ignored"
+                                 " because assert statements are not executed "
+                                 "by the underlying Python interpreter "
+                                 "(are you using python -O?)\n")
 
     def _preparse(self, args, addopts=True):
         self._initini(args)
         if addopts:
             args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
             args[:] = self.getini("addopts") + args
         self._checkversion()
+        self._consider_importhook(args)
         self.pluginmanager.consider_preparse(args)
-        try:
-            self.pluginmanager.load_setuptools_entrypoints("pytest11")
-        except ImportError as e:
-            self.warn("I2", "could not load setuptools entry import: %s" % (e,))
+        self.pluginmanager.load_setuptools_entrypoints('pytest11')
         self.pluginmanager.consider_env()
         self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
+        confcutdir = self.known_args_namespace.confcutdir
         if self.known_args_namespace.confcutdir is None and self.inifile:
             confcutdir = py.path.local(self.inifile).dirname
             self.known_args_namespace.confcutdir = confcutdir
         try:
             self.hook.pytest_load_initial_conftests(early_config=self,
                     args=args, parser=self._parser)
         except ConftestImportFailure:
             e = sys.exc_info()[1]
@@ -961,57 +1101,63 @@ class Config(object):
         assert not hasattr(self, 'args'), (
                 "can only parse cmdline args at most once per Config object")
         self._origargs = args
         self.hook.pytest_addhooks.call_historic(
                                   kwargs=dict(pluginmanager=self.pluginmanager))
         self._preparse(args, addopts=addopts)
         # XXX deprecated hook:
         self.hook.pytest_cmdline_preparse(config=self, args=args)
-        args = self._parser.parse_setoption(args, self.option, namespace=self.option)
-        if not args:
-            cwd = os.getcwd()
-            if cwd == self.rootdir:
-                args = self.getini('testpaths')
+        self._parser.after_preparse = True
+        try:
+            args = self._parser.parse_setoption(args, self.option, namespace=self.option)
             if not args:
-                args = [cwd]
-        self.args = args
+                cwd = os.getcwd()
+                if cwd == self.rootdir:
+                    args = self.getini('testpaths')
+                if not args:
+                    args = [cwd]
+            self.args = args
+        except PrintHelp:
+            pass
 
     def addinivalue_line(self, name, line):
         """ add a line to an ini-file option. The option must have been
         declared but might not yet be set in which case the line becomes the
         the first line in its value. """
         x = self.getini(name)
         assert isinstance(x, list)
         x.append(line) # modifies the cached list inline
 
     def getini(self, name):
         """ return configuration value from an :ref:`ini file <inifiles>`. If the
         specified name hasn't been registered through a prior
-        :py:func:`parser.addini <pytest.config.Parser.addini>`
+        :py:func:`parser.addini <_pytest.config.Parser.addini>`
         call (usually from a plugin), a ValueError is raised. """
         try:
             return self._inicache[name]
         except KeyError:
             self._inicache[name] = val = self._getini(name)
             return val
 
     def _getini(self, name):
         try:
             description, type, default = self._parser._inidict[name]
         except KeyError:
             raise ValueError("unknown configuration value: %r" %(name,))
-        try:
-            value = self.inicfg[name]
-        except KeyError:
-            if default is not None:
-                return default
-            if type is None:
-                return ''
-            return []
+        value = self._get_override_ini_value(name)
+        if value is None:
+            try:
+                value = self.inicfg[name]
+            except KeyError:
+                if default is not None:
+                    return default
+                if type is None:
+                    return ''
+                return []
         if type == "pathlist":
             dp = py.path.local(self.inicfg.config.path).dirpath()
             l = []
             for relpath in shlex.split(value):
                 l.append(dp.join(relpath, abs=True))
             return l
         elif type == "args":
             return shlex.split(value)
@@ -1032,16 +1178,32 @@ class Config(object):
         l = []
         for relroot in relroots:
             if not isinstance(relroot, py.path.local):
                 relroot = relroot.replace("/", py.path.local.sep)
                 relroot = modpath.join(relroot, abs=True)
             l.append(relroot)
         return l
 
+    def _get_override_ini_value(self, name):
+        value = None
+        # override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and
+        # and -o foo1=bar1 -o foo2=bar2 options
+        # always use the last item if multiple value set for same ini-name,
+        # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
+        for ini_config_list in self._override_ini:
+            for ini_config in ini_config_list:
+                try:
+                    (key, user_ini_value) = ini_config.split("=", 1)
+                except ValueError:
+                    raise UsageError("-o/--override-ini expects option=value style.")
+                if key == name:
+                    value = user_ini_value
+        return value
+
     def getoption(self, name, default=notset, skip=False):
         """ return command line option value.
 
         :arg name: name of the option.  You may also specify
             the literal ``--OPT`` option instead of the "dest" option name.
         :arg default: default value if no option of that name exists.
         :arg skip: if True raise pytest.skip if option does not exists
             or has a None value.
@@ -1069,79 +1231,122 @@ class Config(object):
         return self.getoption(name, skip=True)
 
 def exists(path, ignore=EnvironmentError):
     try:
         return path.check()
     except ignore:
         return False
 
-def getcfg(args, inibasenames):
+def getcfg(args, warnfunc=None):
+    """
+    Search the list of arguments for a valid ini-file for pytest,
+    and return a tuple of (rootdir, inifile, cfg-dict).
+
+    note: warnfunc is an optional function used to warn
+        about ini-files that use deprecated features.
+        This parameter should be removed when pytest
+        adopts standard deprecation warnings (#1804).
+    """
+    from _pytest.deprecated import SETUP_CFG_PYTEST
+    inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
     args = [x for x in args if not str(x).startswith("-")]
     if not args:
         args = [py.path.local()]
     for arg in args:
         arg = py.path.local(arg)
         for base in arg.parts(reverse=True):
             for inibasename in inibasenames:
                 p = base.join(inibasename)
                 if exists(p):
                     iniconfig = py.iniconfig.IniConfig(p)
                     if 'pytest' in iniconfig.sections:
+                        if inibasename == 'setup.cfg' and warnfunc:
+                            warnfunc('C1', SETUP_CFG_PYTEST)
                         return base, p, iniconfig['pytest']
+                    if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections:
+                        return base, p, iniconfig['tool:pytest']
                     elif inibasename == "pytest.ini":
                         # allowed to be empty
                         return base, p, {}
     return None, None, None
 
 
-def get_common_ancestor(args):
-    # args are what we get after early command line parsing (usually
-    # strings, but can be py.path.local objects as well)
+def get_common_ancestor(paths):
     common_ancestor = None
-    for arg in args:
-        if str(arg)[0] == "-":
+    for path in paths:
+        if not path.exists():
             continue
-        p = py.path.local(arg)
         if common_ancestor is None:
-            common_ancestor = p
+            common_ancestor = path
         else:
-            if p.relto(common_ancestor) or p == common_ancestor:
+            if path.relto(common_ancestor) or path == common_ancestor:
                 continue
-            elif common_ancestor.relto(p):
-                common_ancestor = p
+            elif common_ancestor.relto(path):
+                common_ancestor = path
             else:
-                shared = p.common(common_ancestor)
+                shared = path.common(common_ancestor)
                 if shared is not None:
                     common_ancestor = shared
     if common_ancestor is None:
         common_ancestor = py.path.local()
-    elif not common_ancestor.isdir():
+    elif common_ancestor.isfile():
         common_ancestor = common_ancestor.dirpath()
     return common_ancestor
 
 
-def determine_setup(inifile, args):
+def get_dirs_from_args(args):
+    def is_option(x):
+        return str(x).startswith('-')
+
+    def get_file_part_from_node_id(x):
+        return str(x).split('::')[0]
+
+    def get_dir_from_path(path):
+        if path.isdir():
+            return path
+        return py.path.local(path.dirname)
+
+    # These look like paths but may not exist
+    possible_paths = (
+        py.path.local(get_file_part_from_node_id(arg))
+        for arg in args
+        if not is_option(arg)
+    )
+
+    return [
+        get_dir_from_path(path)
+        for path in possible_paths
+        if path.exists()
+    ]
+
+
+def determine_setup(inifile, args, warnfunc=None):
+    dirs = get_dirs_from_args(args)
     if inifile:
         iniconfig = py.iniconfig.IniConfig(inifile)
         try:
             inicfg = iniconfig["pytest"]
         except KeyError:
             inicfg = None
-        rootdir = get_common_ancestor(args)
+        rootdir = get_common_ancestor(dirs)
     else:
-        ancestor = get_common_ancestor(args)
-        rootdir, inifile, inicfg = getcfg(
-            [ancestor], ["pytest.ini", "tox.ini", "setup.cfg"])
+        ancestor = get_common_ancestor(dirs)
+        rootdir, inifile, inicfg = getcfg([ancestor], warnfunc=warnfunc)
         if rootdir is None:
             for rootdir in ancestor.parts(reverse=True):
                 if rootdir.join("setup.py").exists():
                     break
             else:
-                rootdir = ancestor
+                rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc)
+                if rootdir is None:
+                    rootdir = get_common_ancestor([py.path.local(), ancestor])
+                    is_fs_root = os.path.splitdrive(str(rootdir))[1] == os.sep
+                    if is_fs_root:
+                        rootdir = ancestor
     return rootdir, inifile, inicfg or {}
 
 
 def setns(obj, dic):
     import pytest
     for name, value in dic.items():
         if isinstance(value, dict):
             mod = getattr(obj, name, None)
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/debugging.py
@@ -0,0 +1,123 @@
+""" interactive debugging with PDB, the Python Debugger. """
+from __future__ import absolute_import, division, print_function
+import pdb
+import sys
+
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group._addoption(
+        '--pdb', dest="usepdb", action="store_true",
+        help="start the interactive Python debugger on errors.")
+    group._addoption(
+        '--pdbcls', dest="usepdb_cls", metavar="modulename:classname",
+        help="start a custom interactive Python debugger on errors. "
+             "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb")
+
+
+def pytest_configure(config):
+    if config.getvalue("usepdb_cls"):
+        modname, classname = config.getvalue("usepdb_cls").split(":")
+        __import__(modname)
+        pdb_cls = getattr(sys.modules[modname], classname)
+    else:
+        pdb_cls = pdb.Pdb
+
+    if config.getvalue("usepdb"):
+        config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
+
+    old = (pdb.set_trace, pytestPDB._pluginmanager)
+
+    def fin():
+        pdb.set_trace, pytestPDB._pluginmanager = old
+        pytestPDB._config = None
+        pytestPDB._pdb_cls = pdb.Pdb
+
+    pdb.set_trace = pytestPDB.set_trace
+    pytestPDB._pluginmanager = config.pluginmanager
+    pytestPDB._config = config
+    pytestPDB._pdb_cls = pdb_cls
+    config._cleanup.append(fin)
+
+class pytestPDB:
+    """ Pseudo PDB that defers to the real pdb. """
+    _pluginmanager = None
+    _config = None
+    _pdb_cls = pdb.Pdb
+
+    @classmethod
+    def set_trace(cls):
+        """ invoke PDB set_trace debugging, dropping any IO capturing. """
+        import _pytest.config
+        frame = sys._getframe().f_back
+        if cls._pluginmanager is not None:
+            capman = cls._pluginmanager.getplugin("capturemanager")
+            if capman:
+                capman.suspendcapture(in_=True)
+            tw = _pytest.config.create_terminal_writer(cls._config)
+            tw.line()
+            tw.sep(">", "PDB set_trace (IO-capturing turned off)")
+            cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config)
+        cls._pdb_cls().set_trace(frame)
+
+
+class PdbInvoke:
+    def pytest_exception_interact(self, node, call, report):
+        capman = node.config.pluginmanager.getplugin("capturemanager")
+        if capman:
+            out, err = capman.suspendcapture(in_=True)
+            sys.stdout.write(out)
+            sys.stdout.write(err)
+        _enter_pdb(node, call.excinfo, report)
+
+    def pytest_internalerror(self, excrepr, excinfo):
+        for line in str(excrepr).split("\n"):
+            sys.stderr.write("INTERNALERROR> %s\n" % line)
+            sys.stderr.flush()
+        tb = _postmortem_traceback(excinfo)
+        post_mortem(tb)
+
+
+def _enter_pdb(node, excinfo, rep):
+    # XXX we re-use the TerminalReporter's terminalwriter
+    # because this seems to avoid some encoding related troubles
+    # for not completely clear reasons.
+    tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
+    tw.line()
+    tw.sep(">", "traceback")
+    rep.toterminal(tw)
+    tw.sep(">", "entering PDB")
+    tb = _postmortem_traceback(excinfo)
+    post_mortem(tb)
+    rep._pdbshown = True
+    return rep
+
+
+def _postmortem_traceback(excinfo):
+    # A doctest.UnexpectedException is not useful for post_mortem.
+    # Use the underlying exception instead:
+    from doctest import UnexpectedException
+    if isinstance(excinfo.value, UnexpectedException):
+        return excinfo.value.exc_info[2]
+    else:
+        return excinfo._excinfo[2]
+
+
+def _find_last_non_hidden_frame(stack):
+    i = max(0, len(stack) - 1)
+    while i and stack[i][0].f_locals.get("__tracebackhide__", False):
+        i -= 1
+    return i
+
+
+def post_mortem(t):
+    class Pdb(pytestPDB._pdb_cls):
+        def get_stack(self, f, t):
+            stack, i = pdb.Pdb.get_stack(self, f, t)
+            if f is None:
+                i = _find_last_non_hidden_frame(stack)
+            return stack, i
+    p = Pdb()
+    p.reset()
+    p.interaction(None, t)
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/deprecated.py
@@ -0,0 +1,24 @@
+"""
+This module contains deprecation messages and bits of code used elsewhere in the codebase
+that is planned to be removed in the next pytest release.
+
+Keeping it in a central location makes it easy to track what is deprecated and should
+be removed when the time comes.
+"""
+from __future__ import absolute_import, division, print_function
+
+MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \
+                      'pass a list of arguments instead.'
+
+YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0'
+
+FUNCARG_PREFIX = (
+    '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated '
+    'and scheduled to be removed in pytest 4.0.  '
+    'Please remove the prefix and use the @pytest.fixture decorator instead.')
+
+SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool:pytest] instead.'
+
+GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue"
+
+RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0'
--- a/third_party/python/pytest/_pytest/doctest.py
+++ b/third_party/python/pytest/_pytest/doctest.py
@@ -1,27 +1,46 @@
 """ discover and run doctests in modules and test files."""
-from __future__ import absolute_import
+from __future__ import absolute_import, division, print_function
 
 import traceback
 
 import pytest
-from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo
-from _pytest.python import FixtureRequest
+from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr
+from _pytest.fixtures import FixtureRequest
 
 
+DOCTEST_REPORT_CHOICE_NONE = 'none'
+DOCTEST_REPORT_CHOICE_CDIFF = 'cdiff'
+DOCTEST_REPORT_CHOICE_NDIFF = 'ndiff'
+DOCTEST_REPORT_CHOICE_UDIFF = 'udiff'
+DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = 'only_first_failure'
+
+DOCTEST_REPORT_CHOICES = (
+    DOCTEST_REPORT_CHOICE_NONE,
+    DOCTEST_REPORT_CHOICE_CDIFF,
+    DOCTEST_REPORT_CHOICE_NDIFF,
+    DOCTEST_REPORT_CHOICE_UDIFF,
+    DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
+)
 
 def pytest_addoption(parser):
     parser.addini('doctest_optionflags', 'option flags for doctests',
         type="args", default=["ELLIPSIS"])
+    parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8")
     group = parser.getgroup("collect")
     group.addoption("--doctest-modules",
         action="store_true", default=False,
         help="run doctests in all .py modules",
         dest="doctestmodules")
+    group.addoption("--doctest-report",
+        type=str.lower, default="udiff",
+        help="choose another output format for diffs on doctest failure",
+        choices=DOCTEST_REPORT_CHOICES,
+        dest="doctestreport")
     group.addoption("--doctest-glob",
         action="append", default=[], metavar="pat",
         help="doctests file matching pattern, default: test*.txt",
         dest="doctestglob")
     group.addoption("--doctest-ignore-import-errors",
         action="store_true", default=False,
         help="ignore doctest ImportErrors",
         dest="doctest_ignore_import_errors")
@@ -54,28 +73,29 @@ class ReprFailDoctest(TerminalRepr):
 
     def toterminal(self, tw):
         for line in self.lines:
             tw.line(line)
         self.reprlocation.toterminal(tw)
 
 
 class DoctestItem(pytest.Item):
-
     def __init__(self, name, parent, runner=None, dtest=None):
         super(DoctestItem, self).__init__(name, parent)
         self.runner = runner
         self.dtest = dtest
         self.obj = None
         self.fixture_request = None
 
     def setup(self):
         if self.dtest is not None:
             self.fixture_request = _setup_fixtures(self)
-            globs = dict(getfixture=self.fixture_request.getfuncargvalue)
+            globs = dict(getfixture=self.fixture_request.getfixturevalue)
+            for name, value in self.fixture_request.getfixturevalue('doctest_namespace').items():
+                globs[name] = value
             self.dtest.globs.update(globs)
 
     def runtest(self):
         _check_all_skipped(self.dtest)
         self.runner.run(self.dtest)
 
     def repr_failure(self, excinfo):
         import doctest
@@ -87,33 +107,33 @@ class DoctestItem(pytest.Item):
             filename = test.filename
             if test.lineno is None:
                 lineno = None
             else:
                 lineno = test.lineno + example.lineno + 1
             message = excinfo.type.__name__
             reprlocation = ReprFileLocation(filename, lineno, message)
             checker = _get_checker()
-            REPORT_UDIFF = doctest.REPORT_UDIFF
+            report_choice = _get_report_choice(self.config.getoption("doctestreport"))
             if lineno is not None:
                 lines = doctestfailure.test.docstring.splitlines(False)
                 # add line numbers to the left of the error message
                 lines = ["%03d %s" % (i + test.lineno + 1, x)
                          for (i, x) in enumerate(lines)]
                 # trim docstring error lines to 10
                 lines = lines[example.lineno - 9:example.lineno + 1]
             else:
                 lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
                 indent = '>>>'
                 for line in example.source.splitlines():
                     lines.append('??? %s %s' % (indent, line))
                     indent = '...'
             if excinfo.errisinstance(doctest.DocTestFailure):
                 lines += checker.output_difference(example,
-                        doctestfailure.got, REPORT_UDIFF).split("\n")
+                        doctestfailure.got, report_choice).split("\n")
             else:
                 inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
                 lines += ["UNEXPECTED EXCEPTION: %s" %
                             repr(inner_excinfo.value)]
                 lines += traceback.format_exception(*excinfo.value.exc_info)
             return ReprFailDoctest(reprlocation, lines)
         else:
             return super(DoctestItem, self).repr_failure(excinfo)
@@ -138,40 +158,39 @@ def _get_flag_lookup():
 def get_optionflags(parent):
     optionflags_str = parent.config.getini("doctest_optionflags")
     flag_lookup_table = _get_flag_lookup()
     flag_acc = 0
     for flag in optionflags_str:
         flag_acc |= flag_lookup_table[flag]
     return flag_acc
 
-
-class DoctestTextfile(DoctestItem, pytest.Module):
+class DoctestTextfile(pytest.Module):
+    obj = None
 
-    def runtest(self):
+    def collect(self):
         import doctest
-        fixture_request = _setup_fixtures(self)
 
         # inspired by doctest.testfile; ideally we would use it directly,
         # but it doesn't support passing a custom checker
-        text = self.fspath.read()
+        encoding = self.config.getini("doctest_encoding")
+        text = self.fspath.read_text(encoding)
         filename = str(self.fspath)
         name = self.fspath.basename
-        globs = dict(getfixture=fixture_request.getfuncargvalue)
-        if '__name__' not in globs:
-            globs['__name__'] = '__main__'
+        globs = {'__name__': '__main__'}
 
         optionflags = get_optionflags(self)
         runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
                                      checker=_get_checker())
+        _fix_spoof_python2(runner, encoding)
 
         parser = doctest.DocTestParser()
         test = parser.get_doctest(text, globs, name, filename, 0)
-        _check_all_skipped(test)
-        runner.run(test)
+        if test.examples:
+            yield DoctestItem(test.name, self, runner, test)
 
 
 def _check_all_skipped(test):
     """raises pytest.skip() if all examples in the given DocTest have the SKIP
     option set.
     """
     import doctest
     all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
@@ -192,16 +211,17 @@ class DoctestModule(pytest.Module):
                     pytest.skip('unable to import module %r' % self.fspath)
                 else:
                     raise
         # uses internal doctest module parsing mechanism
         finder = doctest.DocTestFinder()
         optionflags = get_optionflags(self)
         runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
                                      checker=_get_checker())
+
         for test in finder.find(module, module.__name__):
             if test.examples:  # skip empty doctests
                 yield DoctestItem(test.name, self, runner, test)
 
 
 def _setup_fixtures(doctest_item):
     """
     Used by DoctestTextfile and DoctestItem to setup fixture information.
@@ -283,8 +303,58 @@ def _get_allow_unicode_flag():
 
 
 def _get_allow_bytes_flag():
     """
     Registers and returns the ALLOW_BYTES flag.
     """
     import doctest
     return doctest.register_optionflag('ALLOW_BYTES')
+
+
+def _get_report_choice(key):
+    """
+    This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
+    importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
+    """
+    import doctest
+
+    return {
+        DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
+        DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
+        DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
+        DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
+        DOCTEST_REPORT_CHOICE_NONE: 0,
+    }[key]
+
+
+def _fix_spoof_python2(runner, encoding):
+    """
+    Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This
+    should patch only doctests for text files because they don't have a way to declare their
+    encoding. Doctests in docstrings from Python modules don't have the same problem given that
+    Python already decoded the strings.
+    
+    This fixes the problem related in issue #2434.
+    """
+    from _pytest.compat import _PY2
+    if not _PY2:
+        return
+
+    from doctest import _SpoofOut
+
+    class UnicodeSpoof(_SpoofOut):
+
+        def getvalue(self):
+            result = _SpoofOut.getvalue(self)
+            if encoding:
+                result = result.decode(encoding)
+            return result
+
+    runner._fakeout = UnicodeSpoof()
+
+
+@pytest.fixture(scope='session')
+def doctest_namespace():
+    """
+    Inject names into the doctest namespace.
+    """
+    return dict()
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/fixtures.py
@@ -0,0 +1,1129 @@
+from __future__ import absolute_import, division, print_function
+import sys
+
+from py._code.code import FormattedExcinfo
+
+import py
+import warnings
+
+import inspect
+import _pytest
+from _pytest._code.code import TerminalRepr
+from _pytest.compat import (
+    NOTSET, exc_clear, _format_args,
+    getfslineno, get_real_func,
+    is_generator, isclass, getimfunc,
+    getlocation, getfuncargnames,
+    safe_getattr,
+)
+from _pytest.runner import fail
+from _pytest.compat import FuncargnamesCompatAttr
+
+def pytest_sessionstart(session):
+    import _pytest.python
+    scopename2class.update({
+        'class': _pytest.python.Class,
+        'module': _pytest.python.Module,
+        'function': _pytest.main.Item,
+    })
+    session._fixturemanager = FixtureManager(session)
+
+
+scopename2class = {}
+
+
+scope2props = dict(session=())
+scope2props["module"] = ("fspath", "module")
+scope2props["class"] = scope2props["module"] + ("cls",)
+scope2props["instance"] = scope2props["class"] + ("instance", )
+scope2props["function"] = scope2props["instance"] + ("function", "keywords")
+
+def scopeproperty(name=None, doc=None):
+    def decoratescope(func):
+        scopename = name or func.__name__
+
+        def provide(self):
+            if func.__name__ in scope2props[self.scope]:
+                return func(self)
+            raise AttributeError("%s not available in %s-scoped context" % (
+                scopename, self.scope))
+
+        return property(provide, None, None, func.__doc__)
+    return decoratescope
+
+
+def get_scope_node(node, scope):
+    cls = scopename2class.get(scope)
+    if cls is None:
+        if scope == "session":
+            return node.session
+        raise ValueError("unknown scope")
+    return node.getparent(cls)
+
+
+def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
+    # this function will transform all collected calls to a functions
+    # if they use direct funcargs (i.e. direct parametrization)
+    # because we want later test execution to be able to rely on
+    # an existing FixtureDef structure for all arguments.
+    # XXX we can probably avoid this algorithm  if we modify CallSpec2
+    # to directly care for creating the fixturedefs within its methods.
+    if not metafunc._calls[0].funcargs:
+        return # this function call does not have direct parametrization
+    # collect funcargs of all callspecs into a list of values
+    arg2params = {}
+    arg2scope = {}
+    for callspec in metafunc._calls:
+        for argname, argvalue in callspec.funcargs.items():
+            assert argname not in callspec.params
+            callspec.params[argname] = argvalue
+            arg2params_list = arg2params.setdefault(argname, [])
+            callspec.indices[argname] = len(arg2params_list)
+            arg2params_list.append(argvalue)
+            if argname not in arg2scope:
+                scopenum = callspec._arg2scopenum.get(argname,
+                                                      scopenum_function)
+                arg2scope[argname] = scopes[scopenum]
+        callspec.funcargs.clear()
+
+    # register artificial FixtureDef's so that later at test execution
+    # time we can rely on a proper FixtureDef to exist for fixture setup.
+    arg2fixturedefs = metafunc._arg2fixturedefs
+    for argname, valuelist in arg2params.items():
+        # if we have a scope that is higher than function we need
+        # to make sure we only ever create an according fixturedef on
+        # a per-scope basis. We thus store and cache the fixturedef on the
+        # node related to the scope.
+        scope = arg2scope[argname]
+        node = None
+        if scope != "function":
+            node = get_scope_node(collector, scope)
+            if node is None:
+                assert scope == "class" and isinstance(collector, _pytest.python.Module)
+                # use module-level collector for class-scope (for now)
+                node = collector
+        if node and argname in node._name2pseudofixturedef:
+            arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
+        else:
+            fixturedef =  FixtureDef(fixturemanager, '', argname,
+                           get_direct_param_fixture_func,
+                           arg2scope[argname],
+                           valuelist, False, False)
+            arg2fixturedefs[argname] = [fixturedef]
+            if node is not None:
+                node._name2pseudofixturedef[argname] = fixturedef
+
+
+
+def getfixturemarker(obj):
+    """ return fixturemarker or None if it doesn't exist or raised
+    exceptions."""
+    try:
+        return getattr(obj, "_pytestfixturefunction", None)
+    except Exception:
+        # some objects raise errors like request (from flask import request)
+        # we don't expect them to be fixture functions
+        return None
+
+
+
+def get_parametrized_fixture_keys(item, scopenum):
+    """ return list of keys for all parametrized arguments which match
+    the specified scope. """
+    assert scopenum < scopenum_function  # function
+    try:
+        cs = item.callspec
+    except AttributeError:
+        pass
+    else:
+        # cs.indictes.items() is random order of argnames but
+        # then again different functions (items) can change order of
+        # arguments so it doesn't matter much probably
+        for argname, param_index in cs.indices.items():
+            if cs._arg2scopenum[argname] != scopenum:
+                continue
+            if scopenum == 0:    # session
+                key = (argname, param_index)
+            elif scopenum == 1:  # module
+                key = (argname, param_index, item.fspath)
+            elif scopenum == 2:  # class
+                key = (argname, param_index, item.fspath, item.cls)
+            yield key
+
+
+# algorithm for sorting on a per-parametrized resource setup basis
+# it is called for scopenum==0 (session) first and performs sorting
+# down to the lower scopes such as to minimize number of "high scope"
+# setups and teardowns
+
+def reorder_items(items):
+    argkeys_cache = {}
+    for scopenum in range(0, scopenum_function):
+        argkeys_cache[scopenum] = d = {}
+        for item in items:
+            keys = set(get_parametrized_fixture_keys(item, scopenum))
+            if keys:
+                d[item] = keys
+    return reorder_items_atscope(items, set(), argkeys_cache, 0)
+
+def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
+    if scopenum >= scopenum_function or len(items) < 3:
+        return items
+    items_done = []
+    while 1:
+        items_before, items_same, items_other, newignore = \
+                slice_items(items, ignore, argkeys_cache[scopenum])
+        items_before = reorder_items_atscope(
+                            items_before, ignore, argkeys_cache,scopenum+1)
+        if items_same is None:
+            # nothing to reorder in this scope
+            assert items_other is None
+            return items_done + items_before
+        items_done.extend(items_before)
+        items = items_same + items_other
+        ignore = newignore
+
+
+def slice_items(items, ignore, scoped_argkeys_cache):
+    # we pick the first item which uses a fixture instance in the
+    # requested scope and which we haven't seen yet.  We slice the input
+    # items list into a list of items_nomatch, items_same and
+    # items_other
+    if scoped_argkeys_cache:  # do we need to do work at all?
+        it = iter(items)
+        # first find a slicing key
+        for i, item in enumerate(it):
+            argkeys = scoped_argkeys_cache.get(item)
+            if argkeys is not None:
+                argkeys = argkeys.difference(ignore)
+                if argkeys:  # found a slicing key
+                    slicing_argkey = argkeys.pop()
+                    items_before = items[:i]
+                    items_same = [item]
+                    items_other = []
+                    # now slice the remainder of the list
+                    for item in it:
+                        argkeys = scoped_argkeys_cache.get(item)
+                        if argkeys and slicing_argkey in argkeys and \
+                            slicing_argkey not in ignore:
+                            items_same.append(item)
+                        else:
+                            items_other.append(item)
+                    newignore = ignore.copy()
+                    newignore.add(slicing_argkey)
+                    return (items_before, items_same, items_other, newignore)
+    return items, None, None, None
+
+
+def fillfixtures(function):
+    """ fill missing funcargs for a test function. """
+    try:
+        request = function._request
+    except AttributeError:
+        # XXX this special code path is only expected to execute
+        # with the oejskit plugin.  It uses classes with funcargs
+        # and we thus have to work a bit to allow this.
+        fm = function.session._fixturemanager
+        fi = fm.getfixtureinfo(function.parent, function.obj, None)
+        function._fixtureinfo = fi
+        request = function._request = FixtureRequest(function)
+        request._fillfixtures()
+        # prune out funcargs for jstests
+        newfuncargs = {}
+        for name in fi.argnames:
+            newfuncargs[name] = function.funcargs[name]
+        function.funcargs = newfuncargs
+    else:
+        request._fillfixtures()
+
+
+
+def get_direct_param_fixture_func(request):
+    return request.param
+
+class FuncFixtureInfo:
+    def __init__(self, argnames, names_closure, name2fixturedefs):
+        self.argnames = argnames
+        self.names_closure = names_closure
+        self.name2fixturedefs = name2fixturedefs
+
+
+class FixtureRequest(FuncargnamesCompatAttr):
+    """ A request for a fixture from a test or fixture function.
+
+    A request object gives access to the requesting test context
+    and has an optional ``param`` attribute in case
+    the fixture is parametrized indirectly.
+    """
+
+    def __init__(self, pyfuncitem):
+        self._pyfuncitem = pyfuncitem
+        #: fixture for which this request is being performed
+        self.fixturename = None
+        #: Scope string, one of "function", "class", "module", "session"
+        self.scope = "function"
+        self._fixture_values = {}  # argname -> fixture value
+        self._fixture_defs = {}  # argname -> FixtureDef
+        fixtureinfo = pyfuncitem._fixtureinfo
+        self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
+        self._arg2index = {}
+        self._fixturemanager = pyfuncitem.session._fixturemanager
+
+    @property
+    def fixturenames(self):
+        # backward incompatible note: now a readonly property
+        return list(self._pyfuncitem._fixtureinfo.names_closure)
+
+    @property
+    def node(self):
+        """ underlying collection node (depends on current request scope)"""
+        return self._getscopeitem(self.scope)
+
+
+    def _getnextfixturedef(self, argname):
+        fixturedefs = self._arg2fixturedefs.get(argname, None)
+        if fixturedefs is None:
+            # we arrive here because of a  a dynamic call to
+            # getfixturevalue(argname) usage which was naturally
+            # not known at parsing/collection time
+            parentid = self._pyfuncitem.parent.nodeid
+            fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
+            self._arg2fixturedefs[argname] = fixturedefs
+        # fixturedefs list is immutable so we maintain a decreasing index
+        index = self._arg2index.get(argname, 0) - 1
+        if fixturedefs is None or (-index > len(fixturedefs)):
+            raise FixtureLookupError(argname, self)
+        self._arg2index[argname] = index
+        return fixturedefs[index]
+
+    @property
+    def config(self):
+        """ the pytest config object associated with this request. """
+        return self._pyfuncitem.config
+
+
+    @scopeproperty()
+    def function(self):
+        """ test function object if the request has a per-function scope. """
+        return self._pyfuncitem.obj
+
+    @scopeproperty("class")
+    def cls(self):
+        """ class (can be None) where the test function was collected. """
+        clscol = self._pyfuncitem.getparent(_pytest.python.Class)
+        if clscol:
+            return clscol.obj
+
+    @property
+    def instance(self):
+        """ instance (can be None) on which test function was collected. """
+        # unittest support hack, see _pytest.unittest.TestCaseFunction
+        try:
+            return self._pyfuncitem._testcase
+        except AttributeError:
+            function = getattr(self, "function", None)
+            if function is not None:
+                return py.builtin._getimself(function)
+
+    @scopeproperty()
+    def module(self):
+        """ python module object where the test function was collected. """
+        return self._pyfuncitem.getparent(_pytest.python.Module).obj
+
+    @scopeproperty()
+    def fspath(self):
+        """ the file system path of the test module which collected this test. """
+        return self._pyfuncitem.fspath
+
+    @property
+    def keywords(self):
+        """ keywords/markers dictionary for the underlying node. """
+        return self.node.keywords
+
+    @property
+    def session(self):
+        """ pytest session object. """
+        return self._pyfuncitem.session
+
+    def addfinalizer(self, finalizer):
+        """ add finalizer/teardown function to be called after the
+        last test within the requesting test context finished
+        execution. """
+        # XXX usually this method is shadowed by fixturedef specific ones
+        self._addfinalizer(finalizer, scope=self.scope)
+
+    def _addfinalizer(self, finalizer, scope):
+        colitem = self._getscopeitem(scope)
+        self._pyfuncitem.session._setupstate.addfinalizer(
+            finalizer=finalizer, colitem=colitem)
+
+    def applymarker(self, marker):
+        """ Apply a marker to a single test function invocation.
+        This method is useful if you don't want to have a keyword/marker
+        on all function invocations.
+
+        :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
+            created by a call to ``pytest.mark.NAME(...)``.
+        """
+        try:
+            self.node.keywords[marker.markname] = marker
+        except AttributeError:
+            raise ValueError(marker)
+
+    def raiseerror(self, msg):
+        """ raise a FixtureLookupError with the given message. """
+        raise self._fixturemanager.FixtureLookupError(None, self, msg)
+
+    def _fillfixtures(self):
+        item = self._pyfuncitem
+        fixturenames = getattr(item, "fixturenames", self.fixturenames)
+        for argname in fixturenames:
+            if argname not in item.funcargs:
+                item.funcargs[argname] = self.getfixturevalue(argname)
+
+    def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
+        """ (deprecated) Return a testing resource managed by ``setup`` &
+        ``teardown`` calls.  ``scope`` and ``extrakey`` determine when the
+        ``teardown`` function will be called so that subsequent calls to
+        ``setup`` would recreate the resource.  With pytest-2.3 you often
+        do not need ``cached_setup()`` as you can directly declare a scope
+        on a fixture function and register a finalizer through
+        ``request.addfinalizer()``.
+
+        :arg teardown: function receiving a previously setup resource.
+        :arg setup: a no-argument function creating a resource.
+        :arg scope: a string value out of ``function``, ``class``, ``module``
+            or ``session`` indicating the caching lifecycle of the resource.
+        :arg extrakey: added to internal caching key of (funcargname, scope).
+        """
+        if not hasattr(self.config, '_setupcache'):
+            self.config._setupcache = {} # XXX weakref?
+        cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
+        cache = self.config._setupcache
+        try:
+            val = cache[cachekey]
+        except KeyError:
+            self._check_scope(self.fixturename, self.scope, scope)
+            val = setup()
+            cache[cachekey] = val
+            if teardown is not None:
+                def finalizer():
+                    del cache[cachekey]
+                    teardown(val)
+                self._addfinalizer(finalizer, scope=scope)
+        return val
+
+    def getfixturevalue(self, argname):
+        """ Dynamically run a named fixture function.
+
+        Declaring fixtures via function argument is recommended where possible.
+        But if you can only decide whether to use another fixture at test
+        setup time, you may use this function to retrieve it inside a fixture
+        or test function body.
+        """
+        return self._get_active_fixturedef(argname).cached_result[0]
+
+    def getfuncargvalue(self, argname):
+        """ Deprecated, use getfixturevalue. """
+        from _pytest import deprecated
+        warnings.warn(
+            deprecated.GETFUNCARGVALUE,
+            DeprecationWarning)
+        return self.getfixturevalue(argname)
+
+    def _get_active_fixturedef(self, argname):
+        try:
+            return self._fixture_defs[argname]
+        except KeyError:
+            try:
+                fixturedef = self._getnextfixturedef(argname)
+            except FixtureLookupError:
+                if argname == "request":
+                    class PseudoFixtureDef:
+                        cached_result = (self, [0], None)
+                        scope = "function"
+                    return PseudoFixtureDef
+                raise
+        # remove indent to prevent the python3 exception
+        # from leaking into the call
+        result = self._getfixturevalue(fixturedef)
+        self._fixture_values[argname] = result
+        self._fixture_defs[argname] = fixturedef
+        return fixturedef
+
+    def _get_fixturestack(self):
+        current = self
+        l = []
+        while 1:
+            fixturedef = getattr(current, "_fixturedef", None)
+            if fixturedef is None:
+                l.reverse()
+                return l
+            l.append(fixturedef)
+            current = current._parent_request
+
+    def _getfixturevalue(self, fixturedef):
+        # prepare a subrequest object before calling fixture function
+        # (latter managed by fixturedef)
+        argname = fixturedef.argname
+        funcitem = self._pyfuncitem
+        scope = fixturedef.scope
+        try:
+            param = funcitem.callspec.getparam(argname)
+        except (AttributeError, ValueError):
+            param = NOTSET
+            param_index = 0
+            if fixturedef.params is not None:
+                frame = inspect.stack()[3]
+                frameinfo = inspect.getframeinfo(frame[0])
+                source_path = frameinfo.filename
+                source_lineno = frameinfo.lineno
+                source_path = py.path.local(source_path)
+                if source_path.relto(funcitem.config.rootdir):
+                    source_path = source_path.relto(funcitem.config.rootdir)
+                msg = (
+                    "The requested fixture has no parameter defined for the "
+                    "current test.\n\nRequested fixture '{0}' defined in:\n{1}"
+                    "\n\nRequested here:\n{2}:{3}".format(
+                        fixturedef.argname,
+                        getlocation(fixturedef.func, funcitem.config.rootdir),
+                        source_path,
+                        source_lineno,
+                    )
+                )
+                fail(msg)
+        else:
+            # indices might not be set if old-style metafunc.addcall() was used
+            param_index = funcitem.callspec.indices.get(argname, 0)
+            # if a parametrize invocation set a scope it will override
+            # the static scope defined with the fixture function
+            paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
+            if paramscopenum is not None:
+                scope = scopes[paramscopenum]
+
+        subrequest = SubRequest(self, scope, param, param_index, fixturedef)
+
+        # check if a higher-level scoped fixture accesses a lower level one
+        subrequest._check_scope(argname, self.scope, scope)
+
+        # clear sys.exc_info before invoking the fixture (python bug?)
+        # if its not explicitly cleared it will leak into the call
+        exc_clear()
+        try:
+            # call the fixture function
+            val = fixturedef.execute(request=subrequest)
+        finally:
+            # if fixture function failed it might have registered finalizers
+            self.session._setupstate.addfinalizer(fixturedef.finish,
+                                                  subrequest.node)
+        return val
+
+    def _check_scope(self, argname, invoking_scope, requested_scope):
+        if argname == "request":
+            return
+        if scopemismatch(invoking_scope, requested_scope):
+            # try to report something helpful
+            lines = self._factorytraceback()
+            fail("ScopeMismatch: You tried to access the %r scoped "
+                 "fixture %r with a %r scoped request object, "
+                 "involved factories\n%s" % (
+                    (requested_scope, argname, invoking_scope, "\n".join(lines))),
+                pytrace=False)
+
+    def _factorytraceback(self):
+        lines = []
+        for fixturedef in self._get_fixturestack():
+            factory = fixturedef.func
+            fs, lineno = getfslineno(factory)
+            p = self._pyfuncitem.session.fspath.bestrelpath(fs)
+            args = _format_args(factory)
+            lines.append("%s:%d:  def %s%s" % (
+                p, lineno, factory.__name__, args))
+        return lines
+
+    def _getscopeitem(self, scope):
+        if scope == "function":
+            # this might also be a non-function Item despite its attribute name
+            return self._pyfuncitem
+        node = get_scope_node(self._pyfuncitem, scope)
+        if node is None and scope == "class":
+            # fallback to function item itself
+            node = self._pyfuncitem
+        assert node
+        return node
+
+    def __repr__(self):
+        return "<FixtureRequest for %r>" %(self.node)
+
+
+class SubRequest(FixtureRequest):
+    """ a sub request for handling getting a fixture from a
+    test function/fixture. """
+    def __init__(self, request, scope, param, param_index, fixturedef):
+        self._parent_request = request
+        self.fixturename = fixturedef.argname
+        if param is not NOTSET:
+            self.param = param
+        self.param_index = param_index
+        self.scope = scope
+        self._fixturedef = fixturedef
+        self.addfinalizer = fixturedef.addfinalizer
+        self._pyfuncitem = request._pyfuncitem
+        self._fixture_values  = request._fixture_values
+        self._fixture_defs = request._fixture_defs
+        self._arg2fixturedefs = request._arg2fixturedefs
+        self._arg2index = request._arg2index
+        self._fixturemanager = request._fixturemanager
+
+    def __repr__(self):
+        return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
+
+
+class ScopeMismatchError(Exception):
+    """ A fixture function tries to use a different fixture function which
+    which has a lower scope (e.g. a Session one calls a function one)
+    """
+
+
+scopes = "session module class function".split()
+scopenum_function = scopes.index("function")
+
+
+def scopemismatch(currentscope, newscope):
+    return scopes.index(newscope) > scopes.index(currentscope)
+
+
+def scope2index(scope, descr, where=None):
+    """Look up the index of ``scope`` and raise a descriptive value error
+    if not defined.
+    """
+    try:
+        return scopes.index(scope)
+    except ValueError:
+        raise ValueError(
+            "{0} {1}has an unsupported scope value '{2}'".format(
+                descr, 'from {0} '.format(where) if where else '',
+                scope)
+        )
+
+
+class FixtureLookupError(LookupError):
+    """ could not return a requested Fixture (missing or invalid). """
+    def __init__(self, argname, request, msg=None):
+        self.argname = argname
+        self.request = request
+        self.fixturestack = request._get_fixturestack()
+        self.msg = msg
+
+    def formatrepr(self):
+        tblines = []
+        addline = tblines.append
+        stack = [self.request._pyfuncitem.obj]
+        stack.extend(map(lambda x: x.func, self.fixturestack))
+        msg = self.msg
+        if msg is not None:
+            # the last fixture raise an error, let's present
+            # it at the requesting side
+            stack = stack[:-1]
+        for function in stack:
+            fspath, lineno = getfslineno(function)
+            try:
+                lines, _ = inspect.getsourcelines(get_real_func(function))
+            except (IOError, IndexError, TypeError):
+                error_msg = "file %s, line %s: source code not available"
+                addline(error_msg % (fspath, lineno+1))
+            else:
+                addline("file %s, line %s" % (fspath, lineno+1))
+                for i, line in enumerate(lines):
+                    line = line.rstrip()
+                    addline("  " + line)
+                    if line.lstrip().startswith('def'):
+                        break
+
+        if msg is None:
+            fm = self.request._fixturemanager
+            available = []
+            parentid = self.request._pyfuncitem.parent.nodeid
+            for name, fixturedefs in fm._arg2fixturedefs.items():
+                faclist = list(fm._matchfactories(fixturedefs, parentid))
+                if faclist and name not in available:
+                    available.append(name)
+            msg = "fixture %r not found" % (self.argname,)
+            msg += "\n available fixtures: %s" %(", ".join(sorted(available)),)
+            msg += "\n use 'pytest --fixtures [testpath]' for help on them."
+
+        return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
+
+
+class FixtureLookupErrorRepr(TerminalRepr):
+    def __init__(self, filename, firstlineno, tblines, errorstring, argname):
+        self.tblines = tblines
+        self.errorstring = errorstring
+        self.filename = filename
+        self.firstlineno = firstlineno
+        self.argname = argname
+
+    def toterminal(self, tw):
+        # tw.line("FixtureLookupError: %s" %(self.argname), red=True)
+        for tbline in self.tblines:
+            tw.line(tbline.rstrip())
+        lines = self.errorstring.split("\n")
+        if lines:
+            tw.line('{0}       {1}'.format(FormattedExcinfo.fail_marker,
+                                           lines[0].strip()), red=True)
+            for line in lines[1:]:
+                tw.line('{0}       {1}'.format(FormattedExcinfo.flow_marker,
+                                               line.strip()), red=True)
+        tw.line()
+        tw.line("%s:%d" % (self.filename, self.firstlineno+1))
+
+
+def fail_fixturefunc(fixturefunc, msg):
+    fs, lineno = getfslineno(fixturefunc)
+    location = "%s:%s" % (fs, lineno+1)
+    source = _pytest._code.Source(fixturefunc)
+    fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
+         pytrace=False)
+
+
+def call_fixture_func(fixturefunc, request, kwargs):
+    yieldctx = is_generator(fixturefunc)
+    if yieldctx:
+        it = fixturefunc(**kwargs)
+        res = next(it)
+
+        def teardown():
+            try:
+                next(it)
+            except StopIteration:
+                pass
+            else:
+                fail_fixturefunc(fixturefunc,
+                    "yield_fixture function has more than one 'yield'")
+
+        request.addfinalizer(teardown)
+    else:
+        res = fixturefunc(**kwargs)
+    return res
+
+
+class FixtureDef:
+    """ A container for a factory definition. """
+    def __init__(self, fixturemanager, baseid, argname, func, scope, params,
+                 unittest=False, ids=None):
+        self._fixturemanager = fixturemanager
+        self.baseid = baseid or ''
+        self.has_location = baseid is not None
+        self.func = func
+        self.argname = argname
+        self.scope = scope
+        self.scopenum = scope2index(
+            scope or "function",
+            descr='fixture {0}'.format(func.__name__),
+            where=baseid
+        )
+        self.params = params
+        startindex = unittest and 1 or None
+        self.argnames = getfuncargnames(func, startindex=startindex)
+        self.unittest = unittest
+        self.ids = ids
+        self._finalizer = []
+
+    def addfinalizer(self, finalizer):
+        self._finalizer.append(finalizer)
+
+    def finish(self):
+        exceptions = []
+        try:
+            while self._finalizer:
+                try:
+                    func = self._finalizer.pop()
+                    func()
+                except:
+                    exceptions.append(sys.exc_info())
+            if exceptions:
+                e = exceptions[0]
+                del exceptions  # ensure we don't keep all frames alive because of the traceback
+                py.builtin._reraise(*e)
+
+        finally:
+            ihook = self._fixturemanager.session.ihook
+            ihook.pytest_fixture_post_finalizer(fixturedef=self)
+            # even if finalization fails, we invalidate
+            # the cached fixture value
+            if hasattr(self, "cached_result"):
+                del self.cached_result
+
+    def execute(self, request):
+        # get required arguments and register our own finish()
+        # with their finalization
+        for argname in self.argnames:
+            fixturedef = request._get_active_fixturedef(argname)
+            if argname != "request":
+                fixturedef.addfinalizer(self.finish)
+
+        my_cache_key = request.param_index
+        cached_result = getattr(self, "cached_result", None)
+        if cached_result is not None:
+            result, cache_key, err = cached_result
+            if my_cache_key == cache_key:
+                if err is not None:
+                    py.builtin._reraise(*err)
+                else:
+                    return result
+            # we have a previous but differently parametrized fixture instance
+            # so we need to tear it down before creating a new one
+            self.finish()
+            assert not hasattr(self, "cached_result")
+
+        ihook = self._fixturemanager.session.ihook
+        return ihook.pytest_fixture_setup(fixturedef=self, request=request)
+
+    def __repr__(self):
+        return ("<FixtureDef name=%r scope=%r baseid=%r >" %
+                (self.argname, self.scope, self.baseid))
+
+def pytest_fixture_setup(fixturedef, request):
+    """ Execution of fixture setup. """
+    kwargs = {}
+    for argname in fixturedef.argnames:
+        fixdef = request._get_active_fixturedef(argname)
+        result, arg_cache_key, exc = fixdef.cached_result
+        request._check_scope(argname, request.scope, fixdef.scope)
+        kwargs[argname] = result
+
+    fixturefunc = fixturedef.func
+    if fixturedef.unittest:
+        if request.instance is not None:
+            # bind the unbound method to the TestCase instance
+            fixturefunc = fixturedef.func.__get__(request.instance)
+    else:
+        # the fixture function needs to be bound to the actual
+        # request.instance so that code working with "fixturedef" behaves
+        # as expected.
+        if request.instance is not None:
+            fixturefunc = getimfunc(fixturedef.func)
+            if fixturefunc != fixturedef.func:
+                fixturefunc = fixturefunc.__get__(request.instance)
+    my_cache_key = request.param_index
+    try:
+        result = call_fixture_func(fixturefunc, request, kwargs)
+    except Exception:
+        fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
+        raise
+    fixturedef.cached_result = (result, my_cache_key, None)
+    return result
+
+
+class FixtureFunctionMarker:
+    def __init__(self, scope, params, autouse=False, ids=None, name=None):
+        self.scope = scope
+        self.params = params
+        self.autouse = autouse
+        self.ids = ids
+        self.name = name
+
+    def __call__(self, function):
+        if isclass(function):
+            raise ValueError(
+                    "class fixtures not supported (may be in the future)")
+        function._pytestfixturefunction = self
+        return function
+
+
+
+def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
+    """ (return a) decorator to mark a fixture factory function.
+
+    This decorator can be used (with or without parameters) to define a
+    fixture function.  The name of the fixture function can later be
+    referenced to cause its invocation ahead of running tests: test
+    modules or classes can use the pytest.mark.usefixtures(fixturename)
+    marker.  Test functions can directly use fixture names as input
+    arguments in which case the fixture instance returned from the fixture
+    function will be injected.
+
+    :arg scope: the scope for which this fixture is shared, one of
+                "function" (default), "class", "module" or "session".
+
+    :arg params: an optional list of parameters which will cause multiple
+                invocations of the fixture function and all of the tests
+                using it.
+
+    :arg autouse: if True, the fixture func is activated for all tests that
+                can see it.  If False (the default) then an explicit
+                reference is needed to activate the fixture.
+
+    :arg ids: list of string ids each corresponding to the params
+                so that they are part of the test id. If no ids are provided
+                they will be generated automatically from the params.
+
+    :arg name: the name of the fixture. This defaults to the name of the
+                decorated function. If a fixture is used in the same module in
+                which it is defined, the function name of the fixture will be
+                shadowed by the function arg that requests the fixture; one way
+                to resolve this is to name the decorated function
+                ``fixture_<fixturename>`` and then use
+                ``@pytest.fixture(name='<fixturename>')``.
+
+    Fixtures can optionally provide their values to test functions using a ``yield`` statement,
+    instead of ``return``. In this case, the code block after the ``yield`` statement is executed
+    as teardown code regardless of the test outcome. A fixture function must yield exactly once.
+    """
+    if callable(scope) and params is None and autouse == False:
+        # direct decoration
+        return FixtureFunctionMarker(
+                "function", params, autouse, name=name)(scope)
+    if params is not None and not isinstance(params, (list, tuple)):
+        params = list(params)
+    return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
+
+
+def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
+    """ (return a) decorator to mark a yield-fixture factory function.
+
+    .. deprecated:: 3.0
+        Use :py:func:`pytest.fixture` directly instead.
+    """
+    if callable(scope) and params is None and not autouse:
+        # direct decoration
+        return FixtureFunctionMarker(
+                "function", params, autouse, ids=ids, name=name)(scope)
+    else:
+        return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
+
+
+defaultfuncargprefixmarker = fixture()
+
+
+@fixture(scope="session")
+def pytestconfig(request):
+    """ the pytest config object with access to command line opts."""
+    return request.config
+
+
+class FixtureManager:
+    """
+    pytest fixtures definitions and information is stored and managed
+    from this class.
+
+    During collection fm.parsefactories() is called multiple times to parse
+    fixture function definitions into FixtureDef objects and internal
+    data structures.
+
+    During collection of test functions, metafunc-mechanics instantiate
+    a FuncFixtureInfo object which is cached per node/func-name.
+    This FuncFixtureInfo object is later retrieved by Function nodes
+    which themselves offer a fixturenames attribute.
+
+    The FuncFixtureInfo object holds information about fixtures and FixtureDefs
+    relevant for a particular function.  An initial list of fixtures is
+    assembled like this:
+
+    - ini-defined usefixtures
+    - autouse-marked fixtures along the collection chain up from the function
+    - usefixtures markers at module/class/function level
+    - test function funcargs
+
+    Subsequently the funcfixtureinfo.fixturenames attribute is computed
+    as the closure of the fixtures needed to setup the initial fixtures,
+    i. e. fixtures needed by fixture functions themselves are appended
+    to the fixturenames list.
+
+    Upon the test-setup phases all fixturenames are instantiated, retrieved
+    by a lookup of their FuncFixtureInfo.
+    """
+
+    _argprefix = "pytest_funcarg__"
+    FixtureLookupError = FixtureLookupError
+    FixtureLookupErrorRepr = FixtureLookupErrorRepr
+
+    def __init__(self, session):
+        self.session = session
+        self.config = session.config
+        self._arg2fixturedefs = {}
+        self._holderobjseen = set()
+        self._arg2finish = {}
+        self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
+        session.config.pluginmanager.register(self, "funcmanage")
+
+
+    def getfixtureinfo(self, node, func, cls, funcargs=True):
+        if funcargs and not hasattr(node, "nofuncargs"):
+            if cls is not None:
+                startindex = 1
+            else:
+                startindex = None
+            argnames = getfuncargnames(func, startindex)
+        else:
+            argnames = ()
+        usefixtures = getattr(func, "usefixtures", None)
+        initialnames = argnames
+        if usefixtures is not None:
+            initialnames = usefixtures.args + initialnames
+        fm = node.session._fixturemanager
+        names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
+                                                              node)
+        return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
+
+    def pytest_plugin_registered(self, plugin):
+        nodeid = None
+        try:
+            p = py.path.local(plugin.__file__)
+        except AttributeError:
+            pass
+        else:
+            # construct the base nodeid which is later used to check
+            # what fixtures are visible for particular tests (as denoted
+            # by their test id)
+            if p.basename.startswith("conftest.py"):
+                nodeid = p.dirpath().relto(self.config.rootdir)
+                if p.sep != "/":
+                    nodeid = nodeid.replace(p.sep, "/")
+        self.parsefactories(plugin, nodeid)
+
+    def _getautousenames(self, nodeid):
+        """ return a tuple of fixture names to be used. """
+        autousenames = []
+        for baseid, basenames in self._nodeid_and_autousenames:
+            if nodeid.startswith(baseid):
+                if baseid:
+                    i = len(baseid)
+                    nextchar = nodeid[i:i+1]
+                    if nextchar and nextchar not in ":/":
+                        continue
+                autousenames.extend(basenames)
+        # make sure autousenames are sorted by scope, scopenum 0 is session
+        autousenames.sort(
+            key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
+        return autousenames
+
+    def getfixtureclosure(self, fixturenames, parentnode):
+        # collect the closure of all fixtures , starting with the given
+        # fixturenames as the initial set.  As we have to visit all
+        # factory definitions anyway, we also return a arg2fixturedefs
+        # mapping so that the caller can reuse it and does not have
+        # to re-discover fixturedefs again for each fixturename
+        # (discovering matching fixtures for a given name/node is expensive)
+
+        parentid = parentnode.nodeid
+        fixturenames_closure = self._getautousenames(parentid)
+
+        def merge(otherlist):
+            for arg in otherlist:
+                if arg not in fixturenames_closure:
+                    fixturenames_closure.append(arg)
+
+        merge(fixturenames)
+        arg2fixturedefs = {}
+        lastlen = -1
+        while lastlen != len(fixturenames_closure):
+            lastlen = len(fixturenames_closure)
+            for argname in fixturenames_closure:
+                if argname in arg2fixturedefs:
+                    continue
+                fixturedefs = self.getfixturedefs(argname, parentid)
+                if fixturedefs:
+                    arg2fixturedefs[argname] = fixturedefs
+                    merge(fixturedefs[-1].argnames)
+        return fixturenames_closure, arg2fixturedefs
+
+    def pytest_generate_tests(self, metafunc):
+        for argname in metafunc.fixturenames:
+            faclist = metafunc._arg2fixturedefs.get(argname)
+            if faclist:
+                fixturedef = faclist[-1]
+                if fixturedef.params is not None:
+                    func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
+                    # skip directly parametrized arguments
+                    argnames = func_params[0]
+                    if not isinstance(argnames, (tuple, list)):
+                        argnames = [x.strip() for x in argnames.split(",") if x.strip()]
+                    if argname not in func_params and argname not in argnames:
+                        metafunc.parametrize(argname, fixturedef.params,
+                                             indirect=True, scope=fixturedef.scope,
+                                             ids=fixturedef.ids)
+            else:
+                continue  # will raise FixtureLookupError at setup time
+
+    def pytest_collection_modifyitems(self, items):
+        # separate parametrized setups
+        items[:] = reorder_items(items)
+
+    def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
+        if nodeid is not NOTSET:
+            holderobj = node_or_obj
+        else:
+            holderobj = node_or_obj.obj
+            nodeid = node_or_obj.nodeid
+        if holderobj in self._holderobjseen:
+            return
+        self._holderobjseen.add(holderobj)
+        autousenames = []
+        for name in dir(holderobj):
+            # The attribute can be an arbitrary descriptor, so the attribute
+            # access below can raise. safe_getatt() ignores such exceptions.
+            obj = safe_getattr(holderobj, name, None)
+            # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
+            # or are "@pytest.fixture" marked
+            marker = getfixturemarker(obj)
+            if marker is None:
+                if not name.startswith(self._argprefix):
+                    continue
+                if not callable(obj):
+                    continue
+                marker = defaultfuncargprefixmarker
+                from _pytest import deprecated
+                self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
+                name = name[len(self._argprefix):]
+            elif not isinstance(marker, FixtureFunctionMarker):
+                # magic globals  with __getattr__ might have got us a wrong
+                # fixture attribute
+                continue
+            else:
+                if marker.name:
+                    name = marker.name
+                msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
+                      'and be decorated with @pytest.fixture:\n%s' % name
+                assert not name.startswith(self._argprefix), msg
+
+            fixture_def = FixtureDef(self, nodeid, name, obj,
+                                     marker.scope, marker.params,
+                                     unittest=unittest, ids=marker.ids)
+
+            faclist = self._arg2fixturedefs.setdefault(name, [])
+            if fixture_def.has_location:
+                faclist.append(fixture_def)
+            else:
+                # fixturedefs with no location are at the front
+                # so this inserts the current fixturedef after the
+                # existing fixturedefs from external plugins but
+                # before the fixturedefs provided in conftests.
+                i = len([f for f in faclist if not f.has_location])
+                faclist.insert(i, fixture_def)
+            if marker.autouse:
+                autousenames.append(name)
+
+        if autousenames:
+            self._nodeid_and_autousenames.append((nodeid or '', autousenames))
+
+    def getfixturedefs(self, argname, nodeid):
+        """
+        Gets a list of fixtures which are applicable to the given node id.
+
+        :param str argname: name of the fixture to search for
+        :param str nodeid: full node id of the requesting test.
+        :return: list[FixtureDef]
+        """
+        try:
+            fixturedefs = self._arg2fixturedefs[argname]
+        except KeyError:
+            return None
+        else:
+            return tuple(self._matchfactories(fixturedefs, nodeid))
+
+    def _matchfactories(self, fixturedefs, nodeid):
+        for fixturedef in fixturedefs:
+            if nodeid.startswith(fixturedef.baseid):
+                yield fixturedef
+
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/freeze_support.py
@@ -0,0 +1,44 @@
+"""
+Provides a function to report all internal modules for using freezing tools
+pytest
+"""
+from __future__ import absolute_import, division, print_function
+
+
+
+def freeze_includes():
+    """
+    Returns a list of module names used by py.test that should be
+    included by cx_freeze.
+    """
+    import py
+    import _pytest
+    result = list(_iter_all_modules(py))
+    result += list(_iter_all_modules(_pytest))
+    return result
+
+
+def _iter_all_modules(package, prefix=''):
+    """
+    Iterates over the names of all modules that can be found in the given
+    package, recursively.
+    Example:
+        _iter_all_modules(_pytest) ->
+            ['_pytest.assertion.newinterpret',
+             '_pytest.capture',
+             '_pytest.core',
+             ...
+            ]
+    """
+    import os
+    import pkgutil
+    if type(package) is not str:
+        path, prefix = package.__path__[0], package.__name__ + '.'
+    else:
+        path = package
+    for _, name, is_package in pkgutil.iter_modules([path]):
+        if is_package:
+            for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
+                yield prefix + m
+        else:
+            yield prefix + name
deleted file mode 100755
--- a/third_party/python/pytest/_pytest/genscript.py
+++ /dev/null
@@ -1,132 +0,0 @@
-""" (deprecated) generate a single-file self-contained version of pytest """
-import os
-import sys
-import pkgutil
-
-import py
-import _pytest
-
-
-
-def find_toplevel(name):
-    for syspath in sys.path:
-        base = py.path.local(syspath)
-        lib = base/name
-        if lib.check(dir=1):
-            return lib
-        mod = base.join("%s.py" % name)
-        if mod.check(file=1):
-            return mod
-    raise LookupError(name)
-
-def pkgname(toplevel, rootpath, path):
-    parts = path.parts()[len(rootpath.parts()):]
-    return '.'.join([toplevel] + [x.purebasename for x in parts])
-
-def pkg_to_mapping(name):
-    toplevel = find_toplevel(name)
-    name2src = {}
-    if toplevel.check(file=1): # module
-        name2src[toplevel.purebasename] = toplevel.read()
-    else: # package
-        for pyfile in toplevel.visit('*.py'):
-            pkg = pkgname(name, toplevel, pyfile)
-            name2src[pkg] = pyfile.read()
-        # with wheels py source code might be not be installed
-        # and the resulting genscript is useless, just bail out.
-        assert name2src, "no source code found for %r at %r" %(name, toplevel)
-    return name2src
-
-def compress_mapping(mapping):
-    import base64, pickle, zlib
-    data = pickle.dumps(mapping, 2)
-    data = zlib.compress(data, 9)
-    data = base64.encodestring(data)
-    data = data.decode('ascii')
-    return data
-
-
-def compress_packages(names):
-    mapping = {}
-    for name in names:
-        mapping.update(pkg_to_mapping(name))
-    return compress_mapping(mapping)
-
-def generate_script(entry, packages):
-    data = compress_packages(packages)
-    tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
-    exe = tmpl.read()
-    exe = exe.replace('@SOURCES@', data)
-    exe = exe.replace('@ENTRY@', entry)
-    return exe
-
-
-def pytest_addoption(parser):
-    group = parser.getgroup("debugconfig")
-    group.addoption("--genscript", action="store", default=None,
-        dest="genscript", metavar="path",
-        help="create standalone pytest script at given target path.")
-
-def pytest_cmdline_main(config):
-    import _pytest.config
-    genscript = config.getvalue("genscript")
-    if genscript:
-        tw = _pytest.config.create_terminal_writer(config)
-        tw.line("WARNING: usage of genscript is deprecated.",
-                red=True)
-        deps =  ['py', '_pytest', 'pytest']  # pluggy is vendored
-        if sys.version_info < (2,7):
-            deps.append("argparse")
-            tw.line("generated script will run on python2.6-python3.3++")
-        else:
-            tw.line("WARNING: generated script will not run on python2.6 "
-                    "due to 'argparse' dependency. Use python2.6 "
-                    "to generate a python2.6 compatible script", red=True)
-        script = generate_script(
-            'import pytest; raise SystemExit(pytest.cmdline.main())',
-            deps,
-        )
-        genscript = py.path.local(genscript)
-        genscript.write(script)
-        tw.line("generated pytest standalone script: %s" % genscript,
-                bold=True)
-        return 0
-
-
-def pytest_namespace():
-    return {'freeze_includes': freeze_includes}
-
-
-def freeze_includes():
-    """
-    Returns a list of module names used by py.test that should be
-    included by cx_freeze.
-    """
-    result = list(_iter_all_modules(py))
-    result += list(_iter_all_modules(_pytest))
-    return result
-
-
-def _iter_all_modules(package, prefix=''):
-    """
-    Iterates over the names of all modules that can be found in the given
-    package, recursively.
-
-    Example:
-        _iter_all_modules(_pytest) ->
-            ['_pytest.assertion.newinterpret',
-             '_pytest.capture',
-             '_pytest.core',
-             ...
-            ]
-    """
-    if type(package) is not str:
-        path, prefix = package.__path__[0], package.__name__ + '.'
-    else:
-        path = package
-    for _, name, is_package in pkgutil.iter_modules([path]):
-        if is_package:
-            for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
-                yield prefix + m
-        else:
-            yield prefix + name
--- a/third_party/python/pytest/_pytest/helpconfig.py
+++ b/third_party/python/pytest/_pytest/helpconfig.py
@@ -1,30 +1,69 @@
 """ version info, help messages, tracing configuration.  """
+from __future__ import absolute_import, division, print_function
+
 import py
 import pytest
+from _pytest.config import PrintHelp
 import os, sys
+from argparse import Action
+
+
+class HelpAction(Action):
+    """This is an argparse Action that will raise an exception in
+    order to skip the rest of the argument parsing when --help is passed.
+    This prevents argparse from quitting due to missing required arguments
+    when any are defined, for example by ``pytest_addoption``.
+    This is similar to the way that the builtin argparse --help option is
+    implemented by raising SystemExit.
+    """
+
+    def __init__(self,
+                 option_strings,
+                 dest=None,
+                 default=False,
+                 help=None):
+        super(HelpAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            const=True,
+            default=default,
+            nargs=0,
+            help=help)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        setattr(namespace, self.dest, self.const)
+
+        # We should only skip the rest of the parsing after preparse is done
+        if getattr(parser._parser, 'after_preparse', False):
+            raise PrintHelp
+
 
 def pytest_addoption(parser):
     group = parser.getgroup('debugconfig')
     group.addoption('--version', action="store_true",
             help="display pytest lib version and import information.")
-    group._addoption("-h", "--help", action="store_true", dest="help",
+    group._addoption("-h", "--help", action=HelpAction, dest="help",
             help="show help message and configuration info")
     group._addoption('-p', action="append", dest="plugins", default = [],
                metavar="name",
                help="early-load given plugin (multi-allowed). "
                     "To avoid loading of plugins, use the `no:` prefix, e.g. "
                     "`no:doctest`.")
     group.addoption('--traceconfig', '--trace-config',
                action="store_true", default=False,
                help="trace considerations of conftest.py files."),
     group.addoption('--debug',
                action="store_true", dest="debug", default=False,
                help="store internal tracing debug information in 'pytestdebug.log'.")
+    group._addoption(
+        '-o', '--override-ini', nargs='*', dest="override_ini",
+        action="append",
+        help="override config option with option=value style, e.g. `-o xfail_strict=True`.")
 
 
 @pytest.hookimpl(hookwrapper=True)
 def pytest_cmdline_parse():
     outcome = yield
     config = outcome.get_result()
     if config.option.debug:
         path = os.path.abspath("pytestdebug.log")
@@ -32,22 +71,24 @@ def pytest_cmdline_parse():
         debugfile.write("versions pytest-%s, py-%s, "
                 "python-%s\ncwd=%s\nargs=%s\n\n" %(
             pytest.__version__, py.__version__,
             ".".join(map(str, sys.version_info)),
             os.getcwd(), config._origargs))
         config.trace.root.setwriter(debugfile.write)
         undo_tracing = config.pluginmanager.enable_tracing()
         sys.stderr.write("writing pytestdebug information to %s\n" % path)
+
         def unset_tracing():
             debugfile.close()
             sys.stderr.write("wrote pytestdebug information to %s\n" %
                              debugfile.name)
             config.trace.root.setwriter(None)
             undo_tracing()
+
         config.add_cleanup(unset_tracing)
 
 def pytest_cmdline_main(config):
     if config.option.version:
         p = py.path.local(pytest.__file__)
         sys.stderr.write("This is pytest version %s, imported from %s\n" %
             (pytest.__version__, p))
         plugininfo = getpluginversioninfo(config)
@@ -62,19 +103,18 @@ def pytest_cmdline_main(config):
         return 0
 
 def showhelp(config):
     reporter = config.pluginmanager.get_plugin('terminalreporter')
     tw = reporter._tw
     tw.write(config._parser.optparser.format_help())
     tw.line()
     tw.line()
-    #tw.sep( "=", "config file settings")
-    tw.line("[pytest] ini-options in the next "
-            "pytest.ini|tox.ini|setup.cfg file:")
+    tw.line("[pytest] ini-options in the first "
+            "pytest.ini|tox.ini|setup.cfg file found:")
     tw.line()
 
     for name in config._parser._ininames:
         help, type, default = config._parser._inidict[name]
         if type is None:
             type = "string"
         spec = "%s (%s)" % (name, type)
         line = "  %-24s %s" %(spec, help)
@@ -87,18 +127,18 @@ def showhelp(config):
         ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
         ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
     ]
     for name, help in vars:
         tw.line("  %-24s %s" % (name, help))
     tw.line()
     tw.line()
 
-    tw.line("to see available markers type: py.test --markers")
-    tw.line("to see available fixtures type: py.test --fixtures")
+    tw.line("to see available markers type: pytest --markers")
+    tw.line("to see available fixtures type: pytest --fixtures")
     tw.line("(shown according to specified file_or_dir or current dir "
             "if not specified)")
 
     for warningreport in reporter.stats.get('warnings', []):
         tw.line("warning : " + warningreport.message, red=True)
     return
 
 
--- a/third_party/python/pytest/_pytest/hookspec.py
+++ b/third_party/python/pytest/_pytest/hookspec.py
@@ -11,17 +11,19 @@ hookspec = HookspecMarker("pytest")
 @hookspec(historic=True)
 def pytest_addhooks(pluginmanager):
     """called at plugin registration time to allow adding new hooks via a call to
     pluginmanager.add_hookspecs(module_or_class, prefix)."""
 
 
 @hookspec(historic=True)
 def pytest_namespace():
-    """return dict of name->object to be made globally available in
+    """
+    DEPRECATED: this hook causes direct monkeypatching on pytest, its use is strongly discouraged
+    return dict of name->object to be made globally available in
     the pytest namespace.  This hook is called at plugin registration
     time.
     """
 
 @hookspec(historic=True)
 def pytest_plugin_registered(plugin, manager):
     """ a new pytest plugin got registered. """
 
@@ -29,17 +31,17 @@ def pytest_plugin_registered(plugin, man
 @hookspec(historic=True)
 def pytest_addoption(parser):
     """register argparse-style options and ini-style config values,
     called once at the beginning of a test run.
 
     .. note::
 
         This function should be implemented only in plugins or ``conftest.py``
-        files situated at the tests root directory due to how py.test
+        files situated at the tests root directory due to how pytest
         :ref:`discovers plugins during startup <pluginorder>`.
 
     :arg parser: To add command line options, call
         :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
         To add ini-file values call :py:func:`parser.addini(...)
         <_pytest.config.Parser.addini>`.
 
     Options can later be accessed through the
@@ -66,56 +68,66 @@ def pytest_configure(config):
 # -------------------------------------------------------------------------
 # Bootstrapping hooks called for plugins registered early enough:
 # internal and 3rd party plugins as well as directly
 # discoverable conftest.py local plugins.
 # -------------------------------------------------------------------------
 
 @hookspec(firstresult=True)
 def pytest_cmdline_parse(pluginmanager, args):
-    """return initialized config object, parsing the specified args. """
+    """return initialized config object, parsing the specified args.
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_cmdline_preparse(config, args):
     """(deprecated) modify command line arguments before option parsing. """
 
 @hookspec(firstresult=True)
 def pytest_cmdline_main(config):
     """ called for performing the main command line action. The default
-    implementation will invoke the configure hooks and runtest_mainloop. """
+    implementation will invoke the configure hooks and runtest_mainloop.
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_load_initial_conftests(early_config, parser, args):
     """ implements the loading of initial conftest files ahead
     of command line option parsing. """
 
 
 # -------------------------------------------------------------------------
 # collection hooks
 # -------------------------------------------------------------------------
 
 @hookspec(firstresult=True)
 def pytest_collection(session):
-    """ perform the collection protocol for the given session. """
+    """ perform the collection protocol for the given session.
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_collection_modifyitems(session, config, items):
     """ called after collection has been performed, may filter or re-order
     the items in-place."""
 
 def pytest_collection_finish(session):
     """ called after collection has been performed and modified. """
 
 @hookspec(firstresult=True)
 def pytest_ignore_collect(path, config):
     """ return True to prevent considering this path for collection.
     This hook is consulted for all files and directories prior to calling
     more specific hooks.
+
+    Stops at first non-None result, see :ref:`firstresult`
     """
 
 @hookspec(firstresult=True)
 def pytest_collect_directory(path, parent):
-    """ called before traversing a directory for collection files. """
+    """ called before traversing a directory for collection files.
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_collect_file(path, parent):
     """ return collection Node or None for the given path. Any new node
     needs to have the specified ``parent`` as a parent."""
 
 # logging hooks for collection
 def pytest_collectstart(collector):
     """ collector starts collecting. """
@@ -126,49 +138,66 @@ def pytest_itemcollected(item):
 def pytest_collectreport(report):
     """ collector finished collecting. """
 
 def pytest_deselected(items):
     """ called for test items deselected by keyword. """
 
 @hookspec(firstresult=True)
 def pytest_make_collect_report(collector):
-    """ perform ``collector.collect()`` and return a CollectReport. """
+    """ perform ``collector.collect()`` and return a CollectReport.
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 # -------------------------------------------------------------------------
 # Python test function related hooks
 # -------------------------------------------------------------------------
 
 @hookspec(firstresult=True)
 def pytest_pycollect_makemodule(path, parent):
     """ return a Module collector or None for the given path.
     This hook will be called for each matching test module path.
     The pytest_collect_file hook needs to be used if you want to
     create test modules for files that do not match as a test module.
-    """
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 @hookspec(firstresult=True)
 def pytest_pycollect_makeitem(collector, name, obj):
-    """ return custom item/collector for a python object in a module, or None.  """
+    """ return custom item/collector for a python object in a module, or None.
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 @hookspec(firstresult=True)
 def pytest_pyfunc_call(pyfuncitem):
-    """ call underlying test function. """
+    """ call underlying test function.
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_generate_tests(metafunc):
     """ generate (multiple) parametrized calls to a test function."""
 
+@hookspec(firstresult=True)
+def pytest_make_parametrize_id(config, val, argname):
+    """Return a user-friendly string representation of the given ``val`` that will be used
+    by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
+    The parameter name is available as ``argname``, if required.
+
+    Stops at first non-None result, see :ref:`firstresult` """
+
 # -------------------------------------------------------------------------
 # generic runtest related hooks
 # -------------------------------------------------------------------------
 
 @hookspec(firstresult=True)
 def pytest_runtestloop(session):
     """ called for performing the main runtest loop
-    (after collection finished). """
+    (after collection finished).
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_itemstart(item, node):
     """ (deprecated, use pytest_runtest_logstart). """
 
 @hookspec(firstresult=True)
 def pytest_runtest_protocol(item, nextitem):
     """ implements the runtest_setup/call/teardown protocol for
     the given test item, including capturing exceptions and calling
@@ -176,17 +205,19 @@ def pytest_runtest_protocol(item, nextit
 
     :arg item: test item for which the runtest protocol is performed.
 
     :arg nextitem: the scheduled-to-be-next test item (or None if this
                    is the end my friend).  This argument is passed on to
                    :py:func:`pytest_runtest_teardown`.
 
     :return boolean: True if no further hook implementations should be invoked.
-    """
+
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_runtest_logstart(nodeid, location):
     """ signal the start of running a single test item. """
 
 def pytest_runtest_setup(item):
     """ called before ``pytest_runtest_call(item)``. """
 
 def pytest_runtest_call(item):
@@ -199,79 +230,106 @@ def pytest_runtest_teardown(item, nextit
                    test item is scheduled).  This argument can be used to
                    perform exact teardowns, i.e. calling just enough finalizers
                    so that nextitem only needs to call setup-functions.
     """
 
 @hookspec(firstresult=True)
 def pytest_runtest_makereport(item, call):
     """ return a :py:class:`_pytest.runner.TestReport` object
-    for the given :py:class:`pytest.Item` and
+    for the given :py:class:`pytest.Item <_pytest.main.Item>` and
     :py:class:`_pytest.runner.CallInfo`.
-    """
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 def pytest_runtest_logreport(report):
     """ process a test setup/call/teardown report relating to
     the respective phase of executing a test. """
 
 # -------------------------------------------------------------------------
+# Fixture related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_fixture_setup(fixturedef, request):
+    """ performs fixture setup execution.
+
+    Stops at first non-None result, see :ref:`firstresult` """
+
+def pytest_fixture_post_finalizer(fixturedef):
+    """ called after fixture teardown, but before the cache is cleared so
+    the fixture result cache ``fixturedef.cached_result`` can
+    still be accessed."""
+
+# -------------------------------------------------------------------------
 # test session related hooks
 # -------------------------------------------------------------------------
 
 def pytest_sessionstart(session):
     """ before session.main() is called. """
 
 def pytest_sessionfinish(session, exitstatus):
     """ whole test run finishes. """
 
 def pytest_unconfigure(config):
     """ called before test process is exited.  """
 
 
 # -------------------------------------------------------------------------
-# hooks for customising the assert methods
+# hooks for customizing the assert methods
 # -------------------------------------------------------------------------
 
 def pytest_assertrepr_compare(config, op, left, right):
     """return explanation for comparisons in failing assert expressions.
 
     Return None for no custom explanation, otherwise return a list
     of strings.  The strings will be joined by newlines but any newlines
     *in* a string will be escaped.  Note that all but the first line will
-    be indented sligthly, the intention is for the first line to be a summary.
+    be indented slightly, the intention is for the first line to be a summary.
     """
 
 # -------------------------------------------------------------------------
 # hooks for influencing reporting (invoked from _pytest_terminal)
 # -------------------------------------------------------------------------
 
 def pytest_report_header(config, startdir):
-    """ return a string to be displayed as header info for terminal reporting."""
+    """ return a string to be displayed as header info for terminal reporting.
+
+    .. note::
+
+        This function should be implemented only in plugins or ``conftest.py``
+        files situated at the tests root directory due to how pytest
+        :ref:`discovers plugins during startup <pluginorder>`.
+    """
 
 @hookspec(firstresult=True)
 def pytest_report_teststatus(report):
-    """ return result-category, shortletter and verbose word for reporting."""
+    """ return result-category, shortletter and verbose word for reporting.
 
-def pytest_terminal_summary(terminalreporter):
+    Stops at first non-None result, see :ref:`firstresult` """
+
+def pytest_terminal_summary(terminalreporter, exitstatus):
     """ add additional section in terminal summary reporting.  """
 
 
 @hookspec(historic=True)
 def pytest_logwarning(message, code, nodeid, fslocation):
     """ process a warning specified by a message, a code string,
     a nodeid and fslocation (both of which may be None
     if the warning is not tied to a partilar node/location)."""
 
 # -------------------------------------------------------------------------
 # doctest hooks
 # -------------------------------------------------------------------------
 
 @hookspec(firstresult=True)
 def pytest_doctest_prepare_content(content):
-    """ return processed content for a given doctest"""
+    """ return processed content for a given doctest
+
+    Stops at first non-None result, see :ref:`firstresult` """
 
 # -------------------------------------------------------------------------
 # error handling and internal debugging hooks
 # -------------------------------------------------------------------------
 
 def pytest_internalerror(excrepr, excinfo):
     """ called for internal errors. """
 
--- a/third_party/python/pytest/_pytest/junitxml.py
+++ b/third_party/python/pytest/_pytest/junitxml.py
@@ -1,37 +1,42 @@
 """
     report test results in JUnit-XML format,
     for use with Jenkins and build integration servers.
 
 
 Based on initial code from Ross Lawley.
+
+Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
+src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
 """
-# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
-# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
+from __future__ import absolute_import, division, print_function
 
+import functools
 import py
 import os
 import re
 import sys
 import time
 import pytest
+from _pytest.config import filename_arg
 
 # Python 2.X and 3.X compatibility
 if sys.version_info[0] < 3:
     from codecs import open
 else:
     unichr = chr
     unicode = str
     long = int
 
 
 class Junit(py.xml.Namespace):
     pass
 
+
 # We need to get the subset of the invalid unicode ranges according to
 # XML 1.0 which are valid in this python build.  Hence we calculate
 # this dynamically instead of hardcoding it.  The spec range of valid
 # chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
 #                    | [#x10000-#x10FFFF]
 _legal_chars = (0x09, 0x0A, 0x0d)
 _legal_ranges = (
     (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
@@ -97,43 +102,41 @@ class _NodeReporter(object):
             classnames.insert(0, self.xml.prefix)
         attrs = {
             "classname": ".".join(classnames),
             "name": bin_xml_escape(names[-1]),
             "file": testreport.location[0],
         }
         if testreport.location[1] is not None:
             attrs["line"] = testreport.location[1]
+        if hasattr(testreport, "url"):
+            attrs["url"] = testreport.url
         self.attrs = attrs
 
     def to_xml(self):
         testcase = Junit.testcase(time=self.duration, **self.attrs)
         testcase.append(self.make_properties_node())
         for node in self.nodes:
             testcase.append(node)
         return testcase
 
     def _add_simple(self, kind, message, data=None):
         data = bin_xml_escape(data)
         node = kind(data, message=message)
         self.append(node)
 
-    def _write_captured_output(self, report):
+    def write_captured_output(self, report):
         for capname in ('out', 'err'):
-            allcontent = ""
-            for name, content in report.get_sections("Captured std%s" %
-                                                     capname):
-                allcontent += content
-            if allcontent:
+            content = getattr(report, 'capstd' + capname)
+            if content:
                 tag = getattr(Junit, 'system-' + capname)
-                self.append(tag(bin_xml_escape(allcontent)))
+                self.append(tag(bin_xml_escape(content)))
 
     def append_pass(self, report):
         self.add_stats('passed')
-        self._write_captured_output(report)
 
     def append_failure(self, report):
         # msg = str(report.longrepr.reprtraceback.extraline)
         if hasattr(report, "wasxfail"):
             self._add_simple(
                 Junit.skipped,
                 "xfail-marked test passes unexpectedly")
         else:
@@ -142,57 +145,59 @@ class _NodeReporter(object):
             elif isinstance(report.longrepr, (unicode, str)):
                 message = report.longrepr
             else:
                 message = str(report.longrepr)
             message = bin_xml_escape(message)
             fail = Junit.failure(message=message)
             fail.append(bin_xml_escape(report.longrepr))
             self.append(fail)
-        self._write_captured_output(report)
 
     def append_collect_error(self, report):
         # msg = str(report.longrepr.reprtraceback.extraline)
         self.append(Junit.error(bin_xml_escape(report.longrepr),
                                 message="collection failure"))
 
     def append_collect_skipped(self, report):
         self._add_simple(
             Junit.skipped, "collection skipped", report.longrepr)
 
     def append_error(self, report):
+        if getattr(report, 'when', None) == 'teardown':
+            msg = "test teardown failure"
+        else:
+            msg = "test setup failure"
         self._add_simple(
-            Junit.error, "test setup failure", report.longrepr)
-        self._write_captured_output(report)
+            Junit.error, msg, report.longrepr)
 
     def append_skipped(self, report):
         if hasattr(report, "wasxfail"):
             self._add_simple(
                 Junit.skipped, "expected test failure", report.wasxfail
             )
         else:
             filename, lineno, skipreason = report.longrepr
             if skipreason.startswith("Skipped: "):
                 skipreason = bin_xml_escape(skipreason[9:])
             self.append(
                 Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
                               type="pytest.skip",
                               message=skipreason))
-        self._write_captured_output(report)
+        self.write_captured_output(report)
 
     def finalize(self):
         data = self.to_xml().unicode(indent=0)
         self.__dict__.clear()
         self.to_xml = lambda: py.xml.raw(data)
 
 
 @pytest.fixture
 def record_xml_property(request):
-    """Fixture that adds extra xml properties to the tag for the calling test.
-    The fixture is callable with (name, value), with value being automatically
+    """Add extra xml properties to the tag for the calling test.
+    The fixture is callable with ``(name, value)``, with value being automatically
     xml-encoded.
     """
     request.node.warn(
         code='C3',
         message='record_xml_property is an experimental feature',
     )
     xml = getattr(request.config, "_xml", None)
     if xml is not None:
@@ -207,31 +212,33 @@ def record_xml_property(request):
 
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting")
     group.addoption(
         '--junitxml', '--junit-xml',
         action="store",
         dest="xmlpath",
         metavar="path",
+        type=functools.partial(filename_arg, optname="--junitxml"),
         default=None,
         help="create junit-xml style report file at given path.")
     group.addoption(
         '--junitprefix', '--junit-prefix',
         action="store",
         metavar="str",
         default=None,
         help="prepend prefix to classnames in junit-xml output")
+    parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest")
 
 
 def pytest_configure(config):
     xmlpath = config.option.xmlpath
     # prevent opening xmllog on slave nodes (xdist)
     if xmlpath and not hasattr(config, 'slaveinput'):
-        config._xml = LogXML(xmlpath, config.option.junitprefix)
+        config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name"))
         config.pluginmanager.register(config._xml)
 
 
 def pytest_unconfigure(config):
     xml = getattr(config, '_xml', None)
     if xml:
         del config._xml
         config.pluginmanager.unregister(xml)
@@ -248,28 +255,33 @@ def mangle_test_address(address):
     names[0] = names[0].replace("/", '.')
     names[0] = _py_ext_re.sub("", names[0])
     # put any params back
     names[-1] += possible_open_bracket + params
     return names
 
 
 class LogXML(object):
-    def __init__(self, logfile, prefix):
+    def __init__(self, logfile, prefix, suite_name="pytest"):
         logfile = os.path.expanduser(os.path.expandvars(logfile))
         self.logfile = os.path.normpath(os.path.abspath(logfile))
         self.prefix = prefix
+        self.suite_name = suite_name
         self.stats = dict.fromkeys([
             'error',
             'passed',
             'failure',
             'skipped',
         ], 0)
         self.node_reporters = {}  # nodeid -> _NodeReporter
         self.node_reporters_ordered = []
+        self.global_properties = []
+        # List of reports that failed on call but teardown is pending.
+        self.open_reports = []
+        self.cnt_double_fail_tests = 0
 
     def finalize(self, report):
         nodeid = getattr(report, 'nodeid', report)
         # local hack to handle xdist report order
         slavenode = getattr(report, 'node', None)
         reporter = self.node_reporters.pop((nodeid, slavenode))
         if reporter is not None:
             reporter.finalize()
@@ -279,19 +291,22 @@ class LogXML(object):
         # local hack to handle xdist report order
         slavenode = getattr(report, 'node', None)
 
         key = nodeid, slavenode
 
         if key in self.node_reporters:
             # TODO: breasks for --dist=each
             return self.node_reporters[key]
+
         reporter = _NodeReporter(nodeid, self)
+
         self.node_reporters[key] = reporter
         self.node_reporters_ordered.append(reporter)
+
         return reporter
 
     def add_stats(self, key):
         if key in self.stats:
             self.stats[key] += 1
 
     def _opentestcase(self, report):
         reporter = self.node_reporter(report)
@@ -316,32 +331,64 @@ class LogXML(object):
         possible call order in xdist:
             -> setup node1
             -> call node1
             -> setup node2
             -> call node2
             -> teardown node2
             -> teardown node1
         """
+        close_report = None
         if report.passed:
             if report.when == "call":  # ignore setup/teardown
                 reporter = self._opentestcase(report)
                 reporter.append_pass(report)
         elif report.failed:
+            if report.when == "teardown":
+                # The following vars are needed when xdist plugin is used
+                report_wid = getattr(report, "worker_id", None)
+                report_ii = getattr(report, "item_index", None)
+                close_report = next(
+                    (rep for rep in self.open_reports
+                     if (rep.nodeid == report.nodeid and
+                         getattr(rep, "item_index", None) == report_ii and
+                         getattr(rep, "worker_id", None) == report_wid
+                         )
+                     ), None)
+                if close_report:
+                    # We need to open new testcase in case we have failure in
+                    # call and error in teardown in order to follow junit
+                    # schema
+                    self.finalize(close_report)
+                    self.cnt_double_fail_tests += 1
             reporter = self._opentestcase(report)
             if report.when == "call":
                 reporter.append_failure(report)
+                self.open_reports.append(report)
             else:
                 reporter.append_error(report)
         elif report.skipped:
             reporter = self._opentestcase(report)
             reporter.append_skipped(report)
         self.update_testcase_duration(report)
         if report.when == "teardown":
+            reporter = self._opentestcase(report)
+            reporter.write_captured_output(report)
             self.finalize(report)
+            report_wid = getattr(report, "worker_id", None)
+            report_ii = getattr(report, "item_index", None)
+            close_report = next(
+                (rep for rep in self.open_reports
+                 if (rep.nodeid == report.nodeid and
+                      getattr(rep, "item_index", None) == report_ii and
+                      getattr(rep, "worker_id", None) == report_wid
+                     )
+                 ), None)
+            if close_report:
+                self.open_reports.remove(close_report)
 
     def update_testcase_duration(self, report):
         """accumulates total duration for nodeid from given report and updates
         the Junit.testcase with the new total if already created.
         """
         reporter = self.node_reporter(report)
         reporter.duration += getattr(report, 'duration', 0.0)
 
@@ -364,24 +411,42 @@ class LogXML(object):
     def pytest_sessionfinish(self):
         dirname = os.path.dirname(os.path.abspath(self.logfile))
         if not os.path.isdir(dirname):
             os.makedirs(dirname)
         logfile = open(self.logfile, 'w', encoding='utf-8')
         suite_stop_time = time.time()
         suite_time_delta = suite_stop_time - self.suite_start_time
 
-        numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped']
-
+        numtests = (self.stats['passed'] + self.stats['failure'] +
+                    self.stats['skipped'] + self.stats['error'] -
+                    self.cnt_double_fail_tests)
         logfile.write('<?xml version="1.0" encoding="utf-8"?>')
+
         logfile.write(Junit.testsuite(
+            self._get_global_properties_node(),
             [x.to_xml() for x in self.node_reporters_ordered],
-            name="pytest",
+            name=self.suite_name,
             errors=self.stats['error'],
             failures=self.stats['failure'],
             skips=self.stats['skipped'],
             tests=numtests,
             time="%.3f" % suite_time_delta, ).unicode(indent=0))
         logfile.close()
 
     def pytest_terminal_summary(self, terminalreporter):
         terminalreporter.write_sep("-",
                                    "generated xml file: %s" % (self.logfile))
+
+    def add_global_property(self, name, value):
+        self.global_properties.append((str(name), bin_xml_escape(value)))
+
+    def _get_global_properties_node(self):
+        """Return a Junit node containing custom properties, if any.
+        """
+        if self.global_properties:
+            return Junit.properties(
+                    [
+                        Junit.property(name=name, value=value)
+                        for name, value in self.global_properties
+                    ]
+            )
+        return ''
--- a/third_party/python/pytest/_pytest/main.py
+++ b/third_party/python/pytest/_pytest/main.py
@@ -1,106 +1,120 @@
 """ core implementation of testing process: init, session, runtest loop. """
-import imp
+from __future__ import absolute_import, division, print_function
+
+import functools
 import os
-import re
 import sys
 
 import _pytest
 import _pytest._code
 import py
-import pytest
 try:
     from collections import MutableMapping as MappingMixin
 except ImportError:
     from UserDict import DictMixin as MappingMixin
 
-from _pytest.runner import collect_one_node
+from _pytest.config import directory_arg, UsageError, hookimpl
+from _pytest.runner import collect_one_node, exit
 
 tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
 
 # exitcodes for the command line
 EXIT_OK = 0
 EXIT_TESTSFAILED = 1
 EXIT_INTERRUPTED = 2
 EXIT_INTERNALERROR = 3
 EXIT_USAGEERROR = 4
 EXIT_NOTESTSCOLLECTED = 5
 
-name_re = re.compile("^[a-zA-Z_]\w*$")
 
 def pytest_addoption(parser):
     parser.addini("norecursedirs", "directory patterns to avoid for recursion",
-        type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
+        type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
     parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
         type="args", default=[])
     #parser.addini("dirpatterns",
     #    "patterns specifying possible locations of test files",
     #    type="linelist", default=["**/test_*.txt",
     #            "**/test_*.py", "**/*_test.py"]
     #)
     group = parser.getgroup("general", "running and selection options")
-    group._addoption('-x', '--exitfirst', action="store_true", default=False,
-               dest="exitfirst",
+    group._addoption('-x', '--exitfirst', action="store_const",
+               dest="maxfail", const=1,
                help="exit instantly on first error or failed test."),
     group._addoption('--maxfail', metavar="num",
                action="store", type=int, dest="maxfail", default=0,
                help="exit after first num failures or errors.")
     group._addoption('--strict', action="store_true",
                help="run pytest in strict mode, warnings become errors.")
     group._addoption("-c", metavar="file", type=str, dest="inifilename",
                help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
+    group._addoption("--continue-on-collection-errors", action="store_true",
+               default=False, dest="continue_on_collection_errors",
+               help="Force test execution even if collection errors occur.")
 
     group = parser.getgroup("collect", "collection")
     group.addoption('--collectonly', '--collect-only', action="store_true",
         help="only collect tests, don't execute them."),
     group.addoption('--pyargs', action="store_true",
         help="try to interpret all arguments as python packages.")
     group.addoption("--ignore", action="append", metavar="path",
         help="ignore path during collection (multi-allowed).")
     # when changing this to --conf-cut-dir, config.py Conftest.setinitial
     # needs upgrading as well
     group.addoption('--confcutdir', dest="confcutdir", default=None,
-        metavar="dir",
+        metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
         help="only load conftest.py's relative to specified dir.")
     group.addoption('--noconftest', action="store_true",
         dest="noconftest", default=False,
         help="Don't load any conftest.py files.")
+    group.addoption('--keepduplicates', '--keep-duplicates', action="store_true",
+        dest="keepduplicates", default=False,
+        help="Keep duplicate tests.")
 
     group = parser.getgroup("debugconfig",
         "test session debugging and configuration")
     group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
                help="base temporary directory for this test run.")
 
 
+
 def pytest_namespace():
-    collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
-    return dict(collect=collect)
+    """keeping this one works around a deeper startup issue in pytest
+
+    i tried to find it for a while but the amount of time turned unsustainable,
+    so i put a hack in to revisit later
+    """
+    return {}
+
 
 def pytest_configure(config):
-    pytest.config = config # compatibiltiy
-    if config.option.exitfirst:
-        config.option.maxfail = 1
+    __import__('pytest').config = config # compatibiltiy
+
 
 def wrap_session(config, doit):
     """Skeleton command line program"""
     session = Session(config)
     session.exitstatus = EXIT_OK
     initstate = 0
     try:
         try:
             config._do_configure()
             initstate = 1
             config.hook.pytest_sessionstart(session=session)
             initstate = 2
             session.exitstatus = doit(config, session) or 0
-        except pytest.UsageError:
+        except UsageError:
             raise
         except KeyboardInterrupt:
             excinfo = _pytest._code.ExceptionInfo()
+            if initstate < 2 and isinstance(excinfo.value, exit.Exception):
+                sys.stderr.write('{0}: {1}\n'.format(
+                    excinfo.typename, excinfo.value.msg))
             config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
             session.exitstatus = EXIT_INTERRUPTED
         except:
             excinfo = _pytest._code.ExceptionInfo()
             config.notify_exception(excinfo, config.option)
             session.exitstatus = EXIT_INTERNALERROR
             if excinfo.errisinstance(SystemExit):
                 sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
@@ -110,79 +124,103 @@ def wrap_session(config, doit):
         session.startdir.chdir()
         if initstate >= 2:
             config.hook.pytest_sessionfinish(
                 session=session,
                 exitstatus=session.exitstatus)
         config._ensure_unconfigure()
     return session.exitstatus
 
+
 def pytest_cmdline_main(config):
     return wrap_session(config, _main)
 
+
 def _main(config, session):
     """ default command line protocol for initialization, session,
     running tests and reporting. """
     config.hook.pytest_collection(session=session)
     config.hook.pytest_runtestloop(session=session)
 
     if session.testsfailed:
         return EXIT_TESTSFAILED
     elif session.testscollected == 0:
         return EXIT_NOTESTSCOLLECTED
 
+
 def pytest_collection(session):
     return session.perform_collect()
 
+
 def pytest_runtestloop(session):
+    if (session.testsfailed and
+            not session.config.option.continue_on_collection_errors):
+        raise session.Interrupted(
+            "%d errors during collection" % session.testsfailed)
+
     if session.config.option.collectonly:
         return True
 
-    def getnextitem(i):
-        # this is a function to avoid python2
-        # keeping sys.exc_info set when calling into a test
-        # python2 keeps sys.exc_info till the frame is left
-        try:
-            return session.items[i+1]
-        except IndexError:
-            return None
-
     for i, item in enumerate(session.items):
-        nextitem = getnextitem(i)
+        nextitem = session.items[i+1] if i+1 < len(session.items) else None
         item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
         if session.shouldstop:
             raise session.Interrupted(session.shouldstop)
     return True
 
+
 def pytest_ignore_collect(path, config):
-    p = path.dirpath()
-    ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
+    ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
     ignore_paths = ignore_paths or []
     excludeopt = config.getoption("ignore")
     if excludeopt:
         ignore_paths.extend([py.path.local(x) for x in excludeopt])
-    return path in ignore_paths
+
+    if py.path.local(path) in ignore_paths:
+        return True
+
+    # Skip duplicate paths.
+    keepduplicates = config.getoption("keepduplicates")
+    duplicate_paths = config.pluginmanager._duplicatepaths
+    if not keepduplicates:
+        if path in duplicate_paths:
+            return True
+        else:
+            duplicate_paths.add(path)
+
+    return False
+
 
 class FSHookProxy:
     def __init__(self, fspath, pm, remove_mods):
         self.fspath = fspath
         self.pm = pm
         self.remove_mods = remove_mods
 
     def __getattr__(self, name):
         x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
         self.__dict__[name] = x
         return x
 
-def compatproperty(name):
-    def fget(self):
-        # deprecated - use pytest.name
-        return getattr(pytest, name)
+class _CompatProperty(object):
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, owner):
+        if obj is None:
+            return self
 
-    return property(fget)
+        # TODO: reenable in the features branch
+        # warnings.warn(
+        #     "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format(
+        #         name=self.name, owner=type(owner).__name__),
+        #     PendingDeprecationWarning, stacklevel=2)
+        return getattr(__import__('pytest'), self.name)
+
+
 
 class NodeKeywords(MappingMixin):
     def __init__(self, node):
         self.node = node
         self.parent = node.parent
         self._markers = {node.name: True}
 
     def __getitem__(self, key):
@@ -244,45 +282,46 @@ class Node(object):
         # used for storing artificial fixturedefs for direct parametrization
         self._name2pseudofixturedef = {}
 
     @property
     def ihook(self):
         """ fspath sensitive hook proxy used to call pytest hooks"""
         return self.session.gethookproxy(self.fspath)
 
-    Module = compatproperty("Module")
-    Class = compatproperty("Class")
-    Instance = compatproperty("Instance")
-    Function = compatproperty("Function")
-    File = compatproperty("File")
-    Item = compatproperty("Item")
+    Module = _CompatProperty("Module")
+    Class = _CompatProperty("Class")
+    Instance = _CompatProperty("Instance")
+    Function = _CompatProperty("Function")
+    File = _CompatProperty("File")
+    Item = _CompatProperty("Item")
 
     def _getcustomclass(self, name):
-        cls = getattr(self, name)
-        if cls != getattr(pytest, name):
-            py.log._apiwarn("2.0", "use of node.%s is deprecated, "
-                "use pytest_pycollect_makeitem(...) to create custom "
-                "collection nodes" % name)
+        maybe_compatprop = getattr(type(self), name)
+        if isinstance(maybe_compatprop, _CompatProperty):
+            return getattr(__import__('pytest'), name)
+        else:
+            cls = getattr(self, name)
+            # TODO: reenable in the features branch
+            # warnings.warn("use of node.%s is deprecated, "
+            #    "use pytest_pycollect_makeitem(...) to create custom "
+            #    "collection nodes" % name, category=DeprecationWarning)
         return cls
 
     def __repr__(self):
         return "<%s %r>" %(self.__class__.__name__,
                            getattr(self, 'name', None))
 
     def warn(self, code, message):
         """ generate a warning with the given code and message for this
         item. """
         assert isinstance(code, str)
         fslocation = getattr(self, "location", None)
         if fslocation is None:
             fslocation = getattr(self, "fspath", None)
-        else:
-            fslocation = "%s:%s" % fslocation[:2]
-
         self.ihook.pytest_logwarning.call_historic(kwargs=dict(
             code=code, message=message,
             nodeid=self.nodeid, fslocation=fslocation))
 
     # methods for ordering nodes
     @property
     def nodeid(self):
         """ a ::-separated string denoting its collection tree address. """
@@ -333,19 +372,19 @@ class Node(object):
         chain.reverse()
         return chain
 
     def add_marker(self, marker):
         """ dynamically add a marker object to the node.
 
         ``marker`` can be a string or pytest.mark.* instance.
         """
-        from _pytest.mark import MarkDecorator
+        from _pytest.mark import MarkDecorator, MARK_GEN
         if isinstance(marker, py.builtin._basestring):
-            marker = MarkDecorator(marker)
+            marker = getattr(MARK_GEN, marker)
         elif not isinstance(marker, MarkDecorator):
             raise ValueError("is not a string or pytest.mark.* Marker")
         self.keywords[marker.name] = marker
 
     def get_marker(self, name):
         """ get a marker object from this node or None if
         the node doesn't have a marker with that name. """
         val = self.keywords.get(name, None)
@@ -387,28 +426,37 @@ class Node(object):
     def _repr_failure_py(self, excinfo, style=None):
         fm = self.session._fixturemanager
         if excinfo.errisinstance(fm.FixtureLookupError):
             return excinfo.value.formatrepr()
         tbfilter = True
         if self.config.option.fulltrace:
             style="long"
         else:
+            tb = _pytest._code.Traceback([excinfo.traceback[-1]])
             self._prunetraceback(excinfo)
+            if len(excinfo.traceback) == 0:
+                excinfo.traceback = tb
             tbfilter = False  # prunetraceback already does it
             if style == "auto":
                 style = "long"
         # XXX should excinfo.getrepr record all data and toterminal() process it?
         if style is None:
             if self.config.option.tbstyle == "short":
                 style = "short"
             else:
                 style = "long"
 
-        return excinfo.getrepr(funcargs=True,
+        try:
+            os.getcwd()
+            abspath = False
+        except OSError:
+            abspath = True
+
+        return excinfo.getrepr(funcargs=True, abspath=abspath,
                                showlocals=self.config.option.showlocals,
                                style=style, tbfilter=tbfilter)
 
     repr_failure = _repr_failure_py
 
 class Collector(Node):
     """ Collector instances create children through collect()
         and thus iteratively build a tree.
@@ -425,20 +473,16 @@ class Collector(Node):
 
     def repr_failure(self, excinfo):
         """ represent a collection failure. """
         if excinfo.errisinstance(self.CollectError):
             exc = excinfo.value
             return str(exc.args[0])
         return self._repr_failure_py(excinfo, style="short")
 
-    def _memocollect(self):
-        """ internal helper method to cache results of calling collect(). """
-        return self._memoizedcall('_collected', lambda: list(self.collect()))
-
     def _prunetraceback(self, excinfo):
         if hasattr(self, 'fspath'):
             traceback = excinfo.traceback
             ntraceback = traceback.cut(path=self.fspath)
             if ntraceback == traceback:
                 ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
             excinfo.traceback = ntraceback.filter()
 
@@ -505,69 +549,64 @@ class Interrupted(KeyboardInterrupt):
     __module__ = 'builtins' # for py3
 
 class Session(FSCollector):
     Interrupted = Interrupted
 
     def __init__(self, config):
         FSCollector.__init__(self, config.rootdir, parent=None,
                              config=config, session=self)
-        self._fs2hookproxy = {}
         self.testsfailed = 0
         self.testscollected = 0
         self.shouldstop = False
         self.trace = config.trace.root.get("collection")
         self._norecursepatterns = config.getini("norecursedirs")
         self.startdir = py.path.local()
         self.config.pluginmanager.register(self, name="session")
 
     def _makeid(self):
         return ""
 
-    @pytest.hookimpl(tryfirst=True)
+    @hookimpl(tryfirst=True)
     def pytest_collectstart(self):
         if self.shouldstop:
             raise self.Interrupted(self.shouldstop)
 
-    @pytest.hookimpl(tryfirst=True)
+    @hookimpl(tryfirst=True)
     def pytest_runtest_logreport(self, report):
         if report.failed and not hasattr(report, 'wasxfail'):
             self.testsfailed += 1
             maxfail = self.config.getvalue("maxfail")
             if maxfail and self.testsfailed >= maxfail:
                 self.shouldstop = "stopping after %d failures" % (
                     self.testsfailed)
     pytest_collectreport = pytest_runtest_logreport
 
     def isinitpath(self, path):
         return path in self._initialpaths
 
     def gethookproxy(self, fspath):
-        try:
-            return self._fs2hookproxy[fspath]
-        except KeyError:
-            # check if we have the common case of running
-            # hooks with all conftest.py filesall conftest.py
-            pm = self.config.pluginmanager
-            my_conftestmodules = pm._getconftestmodules(fspath)
-            remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
-            if remove_mods:
-                # one or more conftests are not in use at this fspath
-                proxy = FSHookProxy(fspath, pm, remove_mods)
-            else:
-                # all plugis are active for this fspath
-                proxy = self.config.hook
-
-            self._fs2hookproxy[fspath] = proxy
-            return proxy
+        # check if we have the common case of running
+        # hooks with all conftest.py filesall conftest.py
+        pm = self.config.pluginmanager
+        my_conftestmodules = pm._getconftestmodules(fspath)
+        remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
+        if remove_mods:
+            # one or more conftests are not in use at this fspath
+            proxy = FSHookProxy(fspath, pm, remove_mods)
+        else:
+            # all plugis are active for this fspath
+            proxy = self.config.hook
+        return proxy
 
     def perform_collect(self, args=None, genitems=True):
         hook = self.config.hook
         try:
             items = self._perform_collect(args, genitems)
+            self.config.pluginmanager.check_pending()
             hook.pytest_collection_modifyitems(session=self,
                 config=self.config, items=items)
         finally:
             hook.pytest_collection_finish(session=self)
         self.testscollected = len(items)
         return items
 
     def _perform_collect(self, args, genitems):
@@ -586,18 +625,18 @@ class Session(FSCollector):
         rep = collect_one_node(self)
         self.ihook.pytest_collectreport(report=rep)
         self.trace.root.indent -= 1
         if self._notfound:
             errors = []
             for arg, exc in self._notfound:
                 line = "(no name %r in any of %r)" % (arg, exc.args[0])
                 errors.append("not found: %s\n%s" % (arg, line))
-                #XXX: test this
-            raise pytest.UsageError(*errors)
+                # XXX: test this
+            raise UsageError(*errors)
         if not genitems:
             return rep.result
         else:
             if rep.passed:
                 for node in rep.result:
                     self.items.extend(self.genitems(node))
             return items
 
@@ -615,17 +654,17 @@ class Session(FSCollector):
                 self._notfound.append((arg, sys.exc_info()[1]))
 
             self.trace.root.indent -= 1
 
     def _collect(self, arg):
         names = self._parsearg(arg)
         path = names.pop(0)
         if path.check(dir=1):
-            assert not names, "invalid arg %r" %(arg,)
+            assert not names, "invalid arg %r" % (arg,)
             for path in path.visit(fil=lambda x: x.check(file=1),
                                    rec=self._recurse, bf=True, sort=True):
                 for x in self._collectfile(path):
                     yield x
         else:
             assert path.check(file=1)
             for x in self.matchnodes(self._collectfile(path), names):
                 yield x
@@ -644,54 +683,51 @@ class Session(FSCollector):
         for pat in self._norecursepatterns:
             if path.check(fnmatch=pat):
                 return False
         ihook = self.gethookproxy(path)
         ihook.pytest_collect_directory(path=path, parent=self)
         return True
 
     def _tryconvertpyarg(self, x):
-        mod = None
-        path = [os.path.abspath('.')] + sys.path
-        for name in x.split('.'):
-            # ignore anything that's not a proper name here
-            # else something like --pyargs will mess up '.'
-            # since imp.find_module will actually sometimes work for it
-            # but it's supposed to be considered a filesystem path
-            # not a package
-            if name_re.match(name) is None:
-                return x
-            try:
-                fd, mod, type_ = imp.find_module(name, path)
-            except ImportError:
-                return x
-            else:
-                if fd is not None:
-                    fd.close()
+        """Convert a dotted module name to path.
 
-            if type_[2] != imp.PKG_DIRECTORY:
-                path = [os.path.dirname(mod)]
-            else:
-                path = [mod]
-        return mod
+        """
+        import pkgutil
+        try:
+            loader = pkgutil.find_loader(x)
+        except ImportError:
+            return x
+        if loader is None:
+            return x
+        # This method is sometimes invoked when AssertionRewritingHook, which
+        # does not define a get_filename method, is already in place:
+        try:
+            path = loader.get_filename(x)
+        except AttributeError:
+            # Retrieve path from AssertionRewritingHook:
+            path = loader.modules[x][0].co_filename
+        if loader.is_package(x):
+            path = os.path.dirname(path)
+        return path
 
     def _parsearg(self, arg):
         """ return (fspath, names) tuple after checking the file exists. """
-        arg = str(arg)
+        parts = str(arg).split("::")
         if self.config.option.pyargs:
-            arg = self._tryconvertpyarg(arg)
-        parts = str(arg).split("::")
+            parts[0] = self._tryconvertpyarg(parts[0])
         relpath = parts[0].replace("/", os.sep)
         path = self.config.invocation_dir.join(relpath, abs=True)
         if not path.check():
             if self.config.option.pyargs:
-                msg = "file or package not found: "
+                raise UsageError(
+                    "file or package not found: " + arg +
+                    " (missing __init__.py?)")
             else:
-                msg = "file not found: "
-            raise pytest.UsageError(msg + arg)
+                raise UsageError("file not found: " + arg)
         parts[0] = path
         return parts
 
     def matchnodes(self, matching, names):
         self.trace("matchnodes", matching, names)
         self.trace.root.indent += 1
         nodes = self._matchnodes(matching, names)
         num = len(nodes)
@@ -704,41 +740,45 @@ class Session(FSCollector):
     def _matchnodes(self, matching, names):
         if not matching or not names:
             return matching
         name = names[0]
         assert name
         nextnames = names[1:]
         resultnodes = []
         for node in matching:
-            if isinstance(node, pytest.Item):
+            if isinstance(node, Item):
                 if not names:
                     resultnodes.append(node)
                 continue
-            assert isinstance(node, pytest.Collector)
+            assert isinstance(node, Collector)
             rep = collect_one_node(node)
             if rep.passed:
                 has_matched = False
                 for x in rep.result:
                     # TODO: remove parametrized workaround once collection structure contains parametrization
                     if x.name == name or x.name.split("[")[0] == name:
                         resultnodes.extend(self.matchnodes([x], nextnames))
                         has_matched = True
                 # XXX accept IDs that don't have "()" for class instances
                 if not has_matched and len(rep.result) == 1 and x.name == "()":
                     nextnames.insert(0, name)
                     resultnodes.extend(self.matchnodes([x], nextnames))
-            node.ihook.pytest_collectreport(report=rep)
+            else:
+                # report collection failures here to avoid failing to run some test
+                # specified in the command line because the module could not be
+                # imported (#134)
+                node.ihook.pytest_collectreport(report=rep)
         return resultnodes
 
     def genitems(self, node):
         self.trace("genitems", node)
-        if isinstance(node, pytest.Item):
+        if isinstance(node, Item):
             node.ihook.pytest_itemcollected(item=node)
             yield node
         else:
-            assert isinstance(node, pytest.Collector)
+            assert isinstance(node, Collector)
             rep = collect_one_node(node)
             if rep.passed:
                 for subnode in rep.result:
                     for x in self.genitems(subnode):
                         yield x
             node.ihook.pytest_collectreport(report=rep)
--- a/third_party/python/pytest/_pytest/mark.py
+++ b/third_party/python/pytest/_pytest/mark.py
@@ -1,30 +1,89 @@
 """ generic mechanism for marking and selecting python functions. """
+from __future__ import absolute_import, division, print_function
+
 import inspect
+from collections import namedtuple
+from operator import attrgetter
+from .compat import imap
+
+
+def alias(name):
+    return property(attrgetter(name), doc='alias for ' + name)
+
+
+class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')):
+    @classmethod
+    def param(cls, *values, **kw):
+        marks = kw.pop('marks', ())
+        if isinstance(marks, MarkDecorator):
+            marks = marks,
+        else:
+            assert isinstance(marks, (tuple, list, set))
+
+        def param_extract_id(id=None):
+            return id
+
+        id = param_extract_id(**kw)
+        return cls(values, marks, id)
+
+    @classmethod
+    def extract_from(cls, parameterset, legacy_force_tuple=False):
+        """
+        :param parameterset:
+            a legacy style parameterset that may or may not be a tuple,
+            and may or may not be wrapped into a mess of mark objects
+
+        :param legacy_force_tuple:
+            enforce tuple wrapping so single argument tuple values
+            don't get decomposed and break tests
+
+        """
+
+        if isinstance(parameterset, cls):
+            return parameterset
+        if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple:
+            return cls.param(parameterset)
+
+        newmarks = []
+        argval = parameterset
+        while isinstance(argval, MarkDecorator):
+            newmarks.append(MarkDecorator(Mark(
+                argval.markname, argval.args[:-1], argval.kwargs)))
+            argval = argval.args[-1]
+        assert not isinstance(argval, ParameterSet)
+        if legacy_force_tuple:
+            argval = argval,
+
+        return cls(argval, marks=newmarks, id=None)
+
+    @property
+    def deprecated_arg_dict(self):
+        return dict((mark.name, mark) for mark in self.marks)
 
 
 class MarkerError(Exception):
 
     """Error in use of a pytest marker/attribute."""
 
 
-def pytest_namespace():
-    return {'mark': MarkGenerator()}
+def param(*values, **kw):
+    return ParameterSet.param(*values, **kw)
 
 
 def pytest_addoption(parser):
     group = parser.getgroup("general")
     group._addoption(
         '-k',
         action="store", dest="keyword", default='', metavar="EXPRESSION",
         help="only run tests which match the given substring expression. "
              "An expression is a python evaluatable expression "
              "where all names are substring-matched against test names "
-             "and their parent classes. Example: -k 'test_method or test "
+             "and their parent classes. Example: -k 'test_method or test_"
              "other' matches all test functions and classes whose name "
              "contains 'test_method' or 'test_other'. "
              "Additionally keywords are matched to classes and functions "
              "containing extra names in their 'extra_keyword_matches' set, "
              "as well as functions which have names assigned directly to them."
     )
 
     group._addoption(
@@ -49,27 +108,29 @@ def pytest_cmdline_main(config):
         tw = _pytest.config.create_terminal_writer(config)
         for line in config.getini("markers"):
             name, rest = line.split(":", 1)
             tw.write("@pytest.mark.%s:" % name, bold=True)
             tw.line(rest)
             tw.line()
         config._ensure_unconfigure()
         return 0
+
+
 pytest_cmdline_main.tryfirst = True
 
 
 def pytest_collection_modifyitems(items, config):
     keywordexpr = config.option.keyword.lstrip()
     matchexpr = config.option.markexpr
     if not keywordexpr and not matchexpr:
         return
     # pytest used to allow "-" for negating
     # but today we just allow "-" at the beginning, use "not" instead
-    # we probably remove "-" alltogether soon
+    # we probably remove "-" altogether soon
     if keywordexpr.startswith("-"):
         keywordexpr = "not " + keywordexpr[1:]
     selectuntil = False
     if keywordexpr[-1:] == ":":
         selectuntil = True
         keywordexpr = keywordexpr[:-1]
 
     remaining = []
@@ -155,54 +216,61 @@ def matchkeyword(colitem, keywordexpr):
         # special case to allow for simple "-k pass" and "-k 1.3"
         return mapping[keywordexpr]
     elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
         return not mapping[keywordexpr[4:]]
     return eval(keywordexpr, {}, mapping)
 
 
 def pytest_configure(config):
-    import pytest
+    config._old_mark_config = MARK_GEN._config
     if config.option.strict:
-        pytest.mark._config = config
+        MARK_GEN._config = config
+
+
+def pytest_unconfigure(config):
+    MARK_GEN._config = getattr(config, '_old_mark_config', None)
 
 
 class MarkGenerator:
     """ Factory for :class:`MarkDecorator` objects - exposed as
     a ``pytest.mark`` singleton instance.  Example::
 
          import pytest
          @pytest.mark.slowtest
          def test_function():
             pass
 
     will set a 'slowtest' :class:`MarkInfo` object
     on the ``test_function`` object. """
+    _config = None
+
 
     def __getattr__(self, name):
         if name[0] == "_":
             raise AttributeError("Marker name must NOT start with underscore")
-        if hasattr(self, '_config'):
+        if self._config is not None:
             self._check(name)
-        return MarkDecorator(name)
+        return MarkDecorator(Mark(name, (), {}))
 
     def _check(self, name):
         try:
             if name in self._markers:
                 return
         except AttributeError:
             pass
         self._markers = l = set()
         for line in self._config.getini("markers"):
             beginning = line.split(":", 1)
             x = beginning[0].split("(", 1)[0]
             l.add(x)
         if name not in self._markers:
             raise AttributeError("%r not a registered marker" % (name,))
 
+
 def istestfunc(func):
     return hasattr(func, "__call__") and \
         getattr(func, "__name__", "<lambda>") != "<lambda>"
 
 class MarkDecorator:
     """ A decorator for test functions and test classes.  When applied
     it will create :class:`MarkInfo` objects which may be
     :ref:`retrieved by hooks as item keywords <excontrolskip>`.
@@ -230,29 +298,33 @@ class MarkDecorator:
          MarkDecorator's content updated with the arguments passed to this
          call.
 
     Note: The rules above prevent MarkDecorator objects from storing only a
     single function or class reference as their positional argument with no
     additional keyword or positional arguments.
 
     """
-    def __init__(self, name, args=None, kwargs=None):
-        self.name = name
-        self.args = args or ()
-        self.kwargs = kwargs or {}
+    def __init__(self, mark):
+        assert isinstance(mark, Mark), repr(mark)
+        self.mark = mark
+
+    name = alias('mark.name')
+    args = alias('mark.args')
+    kwargs = alias('mark.kwargs')
 
     @property
     def markname(self):
         return self.name # for backward-compat (2.4.1 had this attr)
 
+    def __eq__(self, other):
+        return self.mark == other.mark
+
     def __repr__(self):
-        d = self.__dict__.copy()
-        name = d.pop('name')
-        return "<MarkDecorator %r %r>" % (name, d)
+        return "<MarkDecorator %r>" % (self.mark,)
 
     def __call__(self, *args, **kwargs):
         """ if passed a single callable argument: decorate it with mark info.
             otherwise add *args/**kwargs in-place to mark information. """
         if args and not kwargs:
             func = args[0]
             is_class = inspect.isclass(func)
             if len(args) == 1 and (istestfunc(func) or is_class):
@@ -265,47 +337,55 @@ class MarkDecorator:
                         # from a superclass by accident
                         mark_list = mark_list + [self]
                         func.pytestmark = mark_list
                     else:
                         func.pytestmark = [self]
                 else:
                     holder = getattr(func, self.name, None)
                     if holder is None:
-                        holder = MarkInfo(
-                            self.name, self.args, self.kwargs
-                        )
+                        holder = MarkInfo(self.mark)
                         setattr(func, self.name, holder)
                     else:
-                        holder.add(self.args, self.kwargs)
+                        holder.add_mark(self.mark)
                 return func
-        kw = self.kwargs.copy()
-        kw.update(kwargs)
-        args = self.args + args
-        return self.__class__(self.name, args=args, kwargs=kw)
+
+        mark = Mark(self.name, args, kwargs)
+        return self.__class__(self.mark.combined_with(mark))
+
+
+
 
 
-class MarkInfo:
+class Mark(namedtuple('Mark', 'name, args, kwargs')):
+
+    def combined_with(self, other):
+        assert self.name == other.name
+        return Mark(
+            self.name, self.args + other.args,
+            dict(self.kwargs, **other.kwargs))
+
+
+class MarkInfo(object):
     """ Marking object created by :class:`MarkDecorator` instances. """
-    def __init__(self, name, args, kwargs):
-        #: name of attribute
-        self.name = name
-        #: positional argument list, empty if none specified
-        self.args = args
-        #: keyword argument dictionary, empty if nothing specified
-        self.kwargs = kwargs.copy()
-        self._arglist = [(args, kwargs.copy())]
+    def __init__(self, mark):
+        assert isinstance(mark, Mark), repr(mark)
+        self.combined = mark
+        self._marks = [mark]
+
+    name = alias('combined.name')
+    args = alias('combined.args')
+    kwargs = alias('combined.kwargs')
 
     def __repr__(self):
-        return "<MarkInfo %r args=%r kwargs=%r>" % (
-            self.name, self.args, self.kwargs
-        )
+        return "<MarkInfo {0!r}>".format(self.combined)
 
-    def add(self, args, kwargs):
+    def add_mark(self, mark):
         """ add a MarkInfo with the given args and kwargs. """
-        self._arglist.append((args, kwargs))
-        self.args += args
-        self.kwargs.update(kwargs)
+        self._marks.append(mark)
+        self.combined = self.combined.combined_with(mark)
 
     def __iter__(self):
         """ yield MarkInfo objects each relating to a marking-call. """
-        for args, kwargs in self._arglist:
-            yield MarkInfo(self.name, args, kwargs)
+        return imap(MarkInfo, self._marks)
+
+
+MARK_GEN = MarkGenerator()
--- a/third_party/python/pytest/_pytest/monkeypatch.py
+++ b/third_party/python/pytest/_pytest/monkeypatch.py
@@ -1,39 +1,43 @@
 """ monkeypatching and mocking functionality.  """
+from __future__ import absolute_import, division, print_function
 
-import os, sys
+import os
+import sys
 import re
 
 from py.builtin import _basestring
+from _pytest.fixtures import fixture
 
 RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
 
 
-def pytest_funcarg__monkeypatch(request):
-    """The returned ``monkeypatch`` funcarg provides these
+@fixture
+def monkeypatch():
+    """The returned ``monkeypatch`` fixture provides these
     helper methods to modify objects, dictionaries or os.environ::
 
         monkeypatch.setattr(obj, name, value, raising=True)
         monkeypatch.delattr(obj, name, raising=True)
         monkeypatch.setitem(mapping, name, value)
         monkeypatch.delitem(obj, name, raising=True)
         monkeypatch.setenv(name, value, prepend=False)
         monkeypatch.delenv(name, value, raising=True)
         monkeypatch.syspath_prepend(path)
         monkeypatch.chdir(path)
 
     All modifications will be undone after the requesting
-    test function has finished. The ``raising``
+    test function or fixture has finished. The ``raising``
     parameter determines if a KeyError or AttributeError
     will be raised if the set/deletion operation has no target.
     """
-    mpatch = monkeypatch()
-    request.addfinalizer(mpatch.undo)
-    return mpatch
+    mpatch = MonkeyPatch()
+    yield mpatch
+    mpatch.undo()
 
 
 def resolve(name):
     # simplified from zope.dottedname
     parts = name.split('.')
 
     used = parts.pop(0)
     found = __import__(used)
@@ -88,18 +92,19 @@ def derive_importpath(import_path, raisi
 class Notset:
     def __repr__(self):
         return "<notset>"
 
 
 notset = Notset()
 
 
-class monkeypatch:
-    """ Object keeping a record of setattr/item/env/syspath changes. """
+class MonkeyPatch:
+    """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
+    """
 
     def __init__(self):
         self._setattr = []
         self._setitem = []
         self._cwd = None
         self._savesyspath = None
 
     def setattr(self, target, name, value=notset, raising=True):
@@ -215,20 +220,20 @@ class monkeypatch:
             path.chdir()
         else:
             os.chdir(path)
 
     def undo(self):
         """ Undo previous changes.  This call consumes the
         undo stack. Calling it a second time has no effect unless
         you do more monkeypatching after the undo call.
-        
+
         There is generally no need to call `undo()`, since it is
         called automatically during tear-down.
-        
+
         Note that the same `monkeypatch` fixture is used across a
         single test function invocation. If `monkeypatch` is used both by
         the test function itself and one of the test fixtures,
         calling `undo()` will undo all of the changes made in
         both functions.
         """
         for obj, name, value in reversed(self._setattr):
             if value is not notset:
--- a/third_party/python/pytest/_pytest/nose.py
+++ b/third_party/python/pytest/_pytest/nose.py
@@ -1,42 +1,43 @@
 """ run test suites written for nose. """
+from __future__ import absolute_import, division, print_function
 
 import sys
 
 import py
-import pytest
-from _pytest import unittest
+from _pytest import unittest, runner, python
+from _pytest.config import hookimpl
 
 
 def get_skip_exceptions():
     skip_classes = set()
     for module_name in ('unittest', 'unittest2', 'nose'):
         mod = sys.modules.get(module_name)
         if hasattr(mod, 'SkipTest'):
             skip_classes.add(mod.SkipTest)
     return tuple(skip_classes)
 
 
 def pytest_runtest_makereport(item, call):
     if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
         # let's substitute the excinfo with a pytest.skip one
-        call2 = call.__class__(lambda:
-                    pytest.skip(str(call.excinfo.value)), call.when)
+        call2 = call.__class__(
+            lambda: runner.skip(str(call.excinfo.value)), call.when)
         call.excinfo = call2.excinfo
 
 
-@pytest.hookimpl(trylast=True)
+@hookimpl(trylast=True)
 def pytest_runtest_setup(item):
     if is_potential_nosetest(item):
-        if isinstance(item.parent, pytest.Generator):
+        if isinstance(item.parent, python.Generator):
             gen = item.parent
             if not hasattr(gen, '_nosegensetup'):
                 call_optional(gen.obj, 'setup')
-                if isinstance(gen.parent, pytest.Instance):
+                if isinstance(gen.parent, python.Instance):
                     call_optional(gen.parent.obj, 'setup')
                 gen._nosegensetup = True
         if not call_optional(item.obj, 'setup'):
             # call module level setup if there is no object level one
             call_optional(item.parent.obj, 'setup')
         #XXX this implies we only call teardown when setup worked
         item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
 
@@ -45,24 +46,24 @@ def teardown_nose(item):
         if not call_optional(item.obj, 'teardown'):
             call_optional(item.parent.obj, 'teardown')
         #if hasattr(item.parent, '_nosegensetup'):
         #    #call_optional(item._nosegensetup, 'teardown')
         #    del item.parent._nosegensetup
 
 
 def pytest_make_collect_report(collector):
-    if isinstance(collector, pytest.Generator):
+    if isinstance(collector, python.Generator):
         call_optional(collector.obj, 'setup')
 
 
 def is_potential_nosetest(item):
     # extra check needed since we do not do nose style setup/teardown
     # on direct unittest style classes
-    return isinstance(item, pytest.Function) and \
+    return isinstance(item, python.Function) and \
         not isinstance(item, unittest.TestCaseFunction)
 
 
 def call_optional(obj, name):
     method = getattr(obj, name, None)
     isfixture = hasattr(method, "_pytestfixturefunction")
     if method is not None and not isfixture and py.builtin.callable(method):
         # If there's any problems allow the exception to raise rather than
--- a/third_party/python/pytest/_pytest/pastebin.py
+++ b/third_party/python/pytest/_pytest/pastebin.py
@@ -1,55 +1,62 @@
 """ submit failure or test session information to a pastebin service. """
+from __future__ import absolute_import, division, print_function
+
 import pytest
 import sys
 import tempfile
 
 
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting")
     group._addoption('--pastebin', metavar="mode",
         action='store', dest="pastebin", default=None,
         choices=['failed', 'all'],
         help="send failed|all info to bpaste.net pastebin service.")
 
+
 @pytest.hookimpl(trylast=True)
 def pytest_configure(config):
     import py
     if config.option.pastebin == "all":
         tr = config.pluginmanager.getplugin('terminalreporter')
         # if no terminal reporter plugin is present, nothing we can do here;
         # this can happen when this function executes in a slave node
         # when using pytest-xdist, for example
         if tr is not None:
             # pastebin file will be utf-8 encoded binary file
             config._pastebinfile = tempfile.TemporaryFile('w+b')
             oldwrite = tr._tw.write
+
             def tee_write(s, **kwargs):
                 oldwrite(s, **kwargs)
                 if py.builtin._istext(s):
                     s = s.encode('utf-8')
                 config._pastebinfile.write(s)
+
             tr._tw.write = tee_write
 
+
 def pytest_unconfigure(config):
     if hasattr(config, '_pastebinfile'):
         # get terminal contents and delete file
         config._pastebinfile.seek(0)
         sessionlog = config._pastebinfile.read()
         config._pastebinfile.close()
         del config._pastebinfile
         # undo our patching in the terminal reporter
         tr = config.pluginmanager.getplugin('terminalreporter')
         del tr._tw.__dict__['write']
         # write summary
         tr.write_sep("=", "Sending information to Paste Service")
         pastebinurl = create_new_paste(sessionlog)
         tr.write_line("pastebin session-log: %s\n" % pastebinurl)
 
+
 def create_new_paste(contents):
     """
     Creates a new paste using bpaste.net service.
 
     :contents: paste contents as utf-8 encoded bytes
     :returns: url to the pasted contents
     """
     import re
@@ -67,16 +74,17 @@ def create_new_paste(contents):
     url = 'https://bpaste.net'
     response = urlopen(url, data=urlencode(params).encode('ascii')).read()
     m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
     if m:
         return '%s/show/%s' % (url, m.group(1))
     else:
         return 'bad response: ' + response
 
+
 def pytest_terminal_summary(terminalreporter):
     import _pytest.config
     if terminalreporter.config.option.pastebin != "failed":
         return
     tr = terminalreporter
     if 'failed' in tr.stats:
         terminalreporter.write_sep("=", "Sending information to Paste Service")
         for rep in terminalreporter.stats.get('failed'):
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/pdb.py
+++ /dev/null
@@ -1,109 +0,0 @@
-""" interactive debugging with PDB, the Python Debugger. """
-from __future__ import absolute_import
-import pdb
-import sys
-
-import pytest
-
-
-def pytest_addoption(parser):
-    group = parser.getgroup("general")
-    group._addoption('--pdb',
-               action="store_true", dest="usepdb", default=False,
-               help="start the interactive Python debugger on errors.")
-
-def pytest_namespace():
-    return {'set_trace': pytestPDB().set_trace}
-
-def pytest_configure(config):
-    if config.getvalue("usepdb"):
-        config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
-
-    old = (pdb.set_trace, pytestPDB._pluginmanager)
-    def fin():
-        pdb.set_trace, pytestPDB._pluginmanager = old
-        pytestPDB._config = None
-    pdb.set_trace = pytest.set_trace
-    pytestPDB._pluginmanager = config.pluginmanager
-    pytestPDB._config = config
-    config._cleanup.append(fin)
-
-class pytestPDB:
-    """ Pseudo PDB that defers to the real pdb. """
-    _pluginmanager = None
-    _config = None
-
-    def set_trace(self):
-        """ invoke PDB set_trace debugging, dropping any IO capturing. """
-        import _pytest.config
-        frame = sys._getframe().f_back
-        if self._pluginmanager is not None:
-            capman = self._pluginmanager.getplugin("capturemanager")
-            if capman:
-                capman.suspendcapture(in_=True)
-            tw = _pytest.config.create_terminal_writer(self._config)
-            tw.line()
-            tw.sep(">", "PDB set_trace (IO-capturing turned off)")
-            self._pluginmanager.hook.pytest_enter_pdb(config=self._config)
-        pdb.Pdb().set_trace(frame)
-
-
-class PdbInvoke:
-    def pytest_exception_interact(self, node, call, report):
-        capman = node.config.pluginmanager.getplugin("capturemanager")
-        if capman:
-            out, err = capman.suspendcapture(in_=True)
-            sys.stdout.write(out)
-            sys.stdout.write(err)
-        _enter_pdb(node, call.excinfo, report)
-
-    def pytest_internalerror(self, excrepr, excinfo):
-        for line in str(excrepr).split("\n"):
-            sys.stderr.write("INTERNALERROR> %s\n" %line)
-            sys.stderr.flush()
-        tb = _postmortem_traceback(excinfo)
-        post_mortem(tb)
-
-
-def _enter_pdb(node, excinfo, rep):
-    # XXX we re-use the TerminalReporter's terminalwriter
-    # because this seems to avoid some encoding related troubles
-    # for not completely clear reasons.
-    tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
-    tw.line()
-    tw.sep(">", "traceback")
-    rep.toterminal(tw)
-    tw.sep(">", "entering PDB")
-    tb = _postmortem_traceback(excinfo)
-    post_mortem(tb)
-    rep._pdbshown = True
-    return rep
-
-
-def _postmortem_traceback(excinfo):
-    # A doctest.UnexpectedException is not useful for post_mortem.
-    # Use the underlying exception instead:
-    from doctest import UnexpectedException
-    if isinstance(excinfo.value, UnexpectedException):
-        return excinfo.value.exc_info[2]
-    else:
-        return excinfo._excinfo[2]
-
-
-def _find_last_non_hidden_frame(stack):
-    i = max(0, len(stack) - 1)
-    while i and stack[i][0].f_locals.get("__tracebackhide__", False):
-        i -= 1
-    return i
-
-
-def post_mortem(t):
-    class Pdb(pdb.Pdb):
-        def get_stack(self, f, t):
-            stack, i = pdb.Pdb.get_stack(self, f, t)
-            if f is None:
-                i = _find_last_non_hidden_frame(stack)
-            return stack, i
-    p = Pdb()
-    p.reset()
-    p.interaction(None, t)
--- a/third_party/python/pytest/_pytest/pytester.py
+++ b/third_party/python/pytest/_pytest/pytester.py
@@ -1,26 +1,30 @@
 """ (disabled by default) support for testing pytest and pytest plugins. """
+from __future__ import absolute_import, division, print_function
+
 import codecs
 import gc
 import os
 import platform
 import re
 import subprocess
 import sys
 import time
 import traceback
 from fnmatch import fnmatch
 
-from py.builtin import print_
+from weakref import WeakKeyDictionary
 
+from _pytest.capture import MultiCapture, SysCapture
 from _pytest._code import Source
 import py
 import pytest
 from _pytest.main import Session, EXIT_OK
+from _pytest.assertion.rewrite import AssertionRewritingHook
 
 
 def pytest_addoption(parser):
     # group = parser.getgroup("pytester", "pytester (self-tests) options")
     parser.addoption('--lsof',
            action="store_true", dest="lsof", default=False,
            help=("run FD checks if lsof is available"))
 
@@ -79,17 +83,17 @@ class LsofFdLeakChecker(object):
             # cmdexec may raise UnicodeDecodeError on Windows systems
             # with locale other than english:
             # https://bitbucket.org/pytest-dev/py/issues/66
             return False
         else:
             return True
 
     @pytest.hookimpl(hookwrapper=True, tryfirst=True)
-    def pytest_runtest_item(self, item):
+    def pytest_runtest_protocol(self, item):
         lines1 = self.get_open_files()
         yield
         if hasattr(sys, "pypy_version_info"):
             gc.collect()
         lines2 = self.get_open_files()
 
         new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
         leaked_files = [t for t in lines2 if t[0] in new_fds]
@@ -98,17 +102,18 @@ class LsofFdLeakChecker(object):
             error.append("***** %s FD leakage detected" % len(leaked_files))
             error.extend([str(f) for f in leaked_files])
             error.append("*** Before:")
             error.extend([str(f) for f in lines1])
             error.append("*** After:")
             error.extend([str(f) for f in lines2])
             error.append(error[0])
             error.append("*** function %s:%s: %s " % item.location)
-            pytest.fail("\n".join(error), pytrace=False)
+            error.append("See issue #2366")
+            item.warn('', "\n".join(error))
 
 
 # XXX copied from execnet's conftest.py - needs to be merged
 winpymap = {
     'python2.7': r'C:\Python27\python.exe',
     'python2.6': r'C:\Python26\python.exe',
     'python3.1': r'C:\Python31\python.exe',
     'python3.2': r'C:\Python32\python.exe',
@@ -118,25 +123,28 @@ winpymap = {
 }
 
 def getexecutable(name, cache={}):
     try:
         return cache[name]
     except KeyError:
         executable = py.path.local.sysfind(name)
         if executable:
+            import subprocess
+            popen = subprocess.Popen([str(executable), "--version"],
+                universal_newlines=True, stderr=subprocess.PIPE)
+            out, err = popen.communicate()
             if name == "jython":
-                import subprocess
-                popen = subprocess.Popen([str(executable), "--version"],
-                    universal_newlines=True, stderr=subprocess.PIPE)
-                out, err = popen.communicate()
                 if not err or "2.5" not in err:
                     executable = None
                 if "2.5.2" in err:
                     executable = None # http://bugs.jython.org/issue1790
+            elif popen.returncode != 0:
+                # Handle pyenv's 127.
+                executable = None
         cache[name] = executable
         return executable
 
 @pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
                         'pypy', 'pypy3'])
 def anypython(request):
     name = request.param
     executable = getexecutable(name)
@@ -217,25 +225,25 @@ class HookRecorder:
         __tracebackhide__ = True
         i = 0
         entries = list(entries)
         backlocals = sys._getframe(1).f_locals
         while entries:
             name, check = entries.pop(0)
             for ind, call in enumerate(self.calls[i:]):
                 if call._name == name:
-                    print_("NAMEMATCH", name, call)
+                    print("NAMEMATCH", name, call)
                     if eval(check, backlocals, call.__dict__):
-                        print_("CHECKERMATCH", repr(check), "->", call)
+                        print("CHECKERMATCH", repr(check), "->", call)
                     else:
-                        print_("NOCHECKERMATCH", repr(check), "-", call)
+                        print("NOCHECKERMATCH", repr(check), "-", call)
                         continue
                     i += ind + 1
                     break
-                print_("NONAMEMATCH", name, "with", call)
+                print("NONAMEMATCH", name, "with", call)
             else:
                 pytest.fail("could not find %r check %r" % (name, check))
 
     def popcall(self, name):
         __tracebackhide__ = True
         for i, call in enumerate(self.calls):
             if call._name == name:
                 del self.calls[i]
@@ -313,26 +321,27 @@ class HookRecorder:
         self.calls[:] = []
 
 
 @pytest.fixture
 def linecomp(request):
     return LineComp()
 
 
-def pytest_funcarg__LineMatcher(request):
+@pytest.fixture(name='LineMatcher')
+def LineMatcher_fixture(request):
     return LineMatcher
 
 
 @pytest.fixture
 def testdir(request, tmpdir_factory):
     return Testdir(request, tmpdir_factory)
 
 
-rex_outcome = re.compile("(\d+) ([\w-]+)")
+rex_outcome = re.compile(r"(\d+) ([\w-]+)")
 class RunResult:
     """The result of running a command.
 
     Attributes:
 
     :ret: The return value.
     :outlines: List of lines captured from stdout.
     :errlines: List of lines captures from stderr.
@@ -357,32 +366,33 @@ class RunResult:
         for line in reversed(self.outlines):
             if 'seconds' in line:
                 outcomes = rex_outcome.findall(line)
                 if outcomes:
                     d = {}
                     for num, cat in outcomes:
                         d[cat] = int(num)
                     return d
+        raise ValueError("Pytest terminal report not found")
 
     def assert_outcomes(self, passed=0, skipped=0, failed=0):
         """ assert that the specified outcomes appear with the respective
         numbers (0 means it didn't occur) in the text output from a test run."""
         d = self.parseoutcomes()
         assert passed == d.get("passed", 0)
         assert skipped == d.get("skipped", 0)
         assert failed == d.get("failed", 0)
 
 
 
 class Testdir:
-    """Temporary test directory with tools to test/run py.test itself.
+    """Temporary test directory with tools to test/run pytest itself.
 
     This is based on the ``tmpdir`` fixture but provides a number of
-    methods which aid with testing py.test itself.  Unless
+    methods which aid with testing pytest itself.  Unless
     :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
     current working directory.
 
     Attributes:
 
     :tmpdir: The :py:class:`py.path.local` instance of the temporary
        directory.
 
@@ -391,16 +401,17 @@ class Testdir:
        plugins can be added to the list.  The type of items to add to
        the list depend on the method which uses them so refer to them
        for details.
 
     """
 
     def __init__(self, request, tmpdir_factory):
         self.request = request
+        self._mod_collections  = WeakKeyDictionary()
         # XXX remove duplication with tmpdir plugin
         basetmp = tmpdir_factory.ensuretemp("testdir")
         name = request.function.__name__
         for i in range(100):
             try:
                 tmpdir = basetmp.mkdir(name + str(i))
             except py.error.EEXIST:
                 continue
@@ -436,19 +447,20 @@ class Testdir:
 
     def delete_loaded_modules(self):
         """Delete modules that have been loaded during a test.
 
         This allows the interpreter to catch module changes in case
         the module is re-imported.
         """
         for name in set(sys.modules).difference(self._savemodulekeys):
-            # it seems zope.interfaces is keeping some state
-            # (used by twisted related tests)
-            if name != "zope.interface":
+            # some zope modules used by twisted-related tests keeps internal
+            # state and can't be deleted; we had some trouble in the past
+            # with zope.interface for example
+            if not name.startswith("zope"):
                 del sys.modules[name]
 
     def make_hook_recorder(self, pluginmanager):
         """Create a new :py:class:`HookRecorder` for a PluginManager."""
         assert not hasattr(pluginmanager, "reprec")
         pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
         self.request.addfinalizer(reprec.finish_recording)
         return reprec
@@ -458,34 +470,37 @@ class Testdir:
 
         This is done automatically upon instantiation.
 
         """
         old = self.tmpdir.chdir()
         if not hasattr(self, '_olddir'):
             self._olddir = old
 
-    def _makefile(self, ext, args, kwargs):
+    def _makefile(self, ext, args, kwargs, encoding="utf-8"):
         items = list(kwargs.items())
         if args:
             source = py.builtin._totext("\n").join(
                 map(py.builtin._totext, args)) + py.builtin._totext("\n")
             basename = self.request.function.__name__
             items.insert(0, (basename, source))
         ret = None
         for name, value in items:
             p = self.tmpdir.join(name).new(ext=ext)
+            p.dirpath().ensure_dir()
             source = Source(value)
+
             def my_totext(s, encoding="utf-8"):
                 if py.builtin._isbytes(s):
                     s = py.builtin._totext(s, encoding=encoding)
                 return s
+
             source_unicode = "\n".join([my_totext(line) for line in source.lines])
             source = py.builtin._totext(source_unicode)
-            content = source.strip().encode("utf-8") # + "\n"
+            content = source.strip().encode(encoding) # + "\n"
             #content = content.rstrip() + "\n"
             p.write(content, "wb")
             if ret is None:
                 ret = p
         return ret
 
     def makefile(self, ext, *args, **kwargs):
         """Create a new file in the testdir.
@@ -552,17 +567,17 @@ class Testdir:
 
     def mkdir(self, name):
         """Create a new (sub)directory."""
         return self.tmpdir.mkdir(name)
 
     def mkpydir(self, name):
         """Create a new python package.
 
-        This creates a (sub)direcotry with an empty ``__init__.py``
+        This creates a (sub)directory with an empty ``__init__.py``
         file so that is recognised as a python package.
 
         """
         p = self.mkdir(name)
         p.ensure("__init__.py")
         return p
 
     Session = Session
@@ -583,17 +598,17 @@ class Testdir:
         res = session.perform_collect([str(p)], genitems=False)[0]
         config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
         return res
 
     def getpathnode(self, path):
         """Return the collection node of a file.
 
         This is like :py:meth:`getnode` but uses
-        :py:meth:`parseconfigure` to create the (configured) py.test
+        :py:meth:`parseconfigure` to create the (configured) pytest
         Config instance.
 
         :param path: A :py:class:`py.path.local` instance of the file.
 
         """
         config = self.parseconfigure(path)
         session = Session(config)
         x = session.fspath.bestrelpath(path)
@@ -647,48 +662,60 @@ class Testdir:
         """
         p = self.makepyfile(source)
         l = list(cmdlineargs) + [p]
         return self.inline_run(*l)
 
     def inline_genitems(self, *args):
         """Run ``pytest.main(['--collectonly'])`` in-process.
 
-        Retuns a tuple of the collected items and a
+        Returns a tuple of the collected items and a
         :py:class:`HookRecorder` instance.
 
         This runs the :py:func:`pytest.main` function to run all of
-        py.test inside the test process itself like
+        pytest inside the test process itself like
         :py:meth:`inline_run`.  However the return value is a tuple of
         the collection items and a :py:class:`HookRecorder` instance.
 
         """
         rec = self.inline_run("--collect-only", *args)
         items = [x.item for x in rec.getcalls("pytest_itemcollected")]
         return items, rec
 
     def inline_run(self, *args, **kwargs):
         """Run ``pytest.main()`` in-process, returning a HookRecorder.
 
         This runs the :py:func:`pytest.main` function to run all of
-        py.test inside the test process itself.  This means it can
+        pytest inside the test process itself.  This means it can
         return a :py:class:`HookRecorder` instance which gives more
         detailed results from then run then can be done by matching
         stdout/stderr from :py:meth:`runpytest`.
 
         :param args: Any command line arguments to pass to
            :py:func:`pytest.main`.
 
         :param plugin: (keyword-only) Extra plugin instances the
            ``pytest.main()`` instance should use.
 
         :return: A :py:class:`HookRecorder` instance.
+        """
+        # When running py.test inline any plugins active in the main
+        # test process are already imported.  So this disables the
+        # warning which will trigger to say they can no longer be
+        # re-written, which is fine as they are already re-written.
+        orig_warn = AssertionRewritingHook._warn_already_imported
 
-        """
+        def revert():
+            AssertionRewritingHook._warn_already_imported = orig_warn
+
+        self.request.addfinalizer(revert)
+        AssertionRewritingHook._warn_already_imported = lambda *a: None
+
         rec = []
+
         class Collect:
             def pytest_configure(x, config):
                 rec.append(self.make_hook_recorder(config.pluginmanager))
 
         plugins = kwargs.get("plugins") or []
         plugins.append(Collect())
         ret = pytest.main(list(args), plugins=plugins)
         self.delete_loaded_modules()
@@ -708,29 +735,34 @@ class Testdir:
         return reprec
 
     def runpytest_inprocess(self, *args, **kwargs):
         """ Return result of running pytest in-process, providing a similar
         interface to what self.runpytest() provides. """
         if kwargs.get("syspathinsert"):
             self.syspathinsert()
         now = time.time()
-        capture = py.io.StdCapture()
+        capture = MultiCapture(Capture=SysCapture)
+        capture.start_capturing()
         try:
             try:
                 reprec = self.inline_run(*args, **kwargs)
             except SystemExit as e:
+
                 class reprec:
                     ret = e.args[0]
+
             except Exception:
                 traceback.print_exc()
+
                 class reprec:
                     ret = 3
         finally:
-            out, err = capture.reset()
+            out, err = capture.readouterr()
+            capture.stop_capturing()
             sys.stdout.write(out)
             sys.stderr.write(err)
 
         res = RunResult(reprec.ret,
                         out.split("\n"), err.split("\n"),
                         time.time()-now)
         res.reprec = reprec
         return res
@@ -750,19 +782,19 @@ class Testdir:
                 #print ("basedtemp exists: %s" %(args,))
                 break
         else:
             args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
             #print ("added basetemp: %s" %(args,))
         return args
 
     def parseconfig(self, *args):
-        """Return a new py.test Config instance from given commandline args.
+        """Return a new pytest Config instance from given commandline args.
 
-        This invokes the py.test bootstrapping code in _pytest.config
+        This invokes the pytest bootstrapping code in _pytest.config
         to create a new :py:class:`_pytest.core.PluginManager` and
         call the pytest_cmdline_parse hook to create new
         :py:class:`_pytest.config.Config` instance.
 
         If :py:attr:`plugins` has been populated they should be plugin
         modules which will be registered with the PluginManager.
 
         """
@@ -772,32 +804,32 @@ class Testdir:
         config = _pytest.config._prepareconfig(args, self.plugins)
         # we don't know what the test will do with this half-setup config
         # object and thus we make sure it gets unconfigured properly in any
         # case (otherwise capturing could still be active, for example)
         self.request.addfinalizer(config._ensure_unconfigure)
         return config
 
     def parseconfigure(self, *args):
-        """Return a new py.test configured Config instance.
+        """Return a new pytest configured Config instance.
 
         This returns a new :py:class:`_pytest.config.Config` instance
         like :py:meth:`parseconfig`, but also calls the
         pytest_configure hook.
 
         """
         config = self.parseconfig(*args)
         config._do_configure()
         self.request.addfinalizer(config._ensure_unconfigure)
         return config
 
     def getitem(self,  source, funcname="test_func"):
         """Return the test item for a test function.
 
-        This writes the source to a python file and runs py.test's
+        This writes the source to a python file and runs pytest's
         collection on the resulting module, returning the test item
         for the requested function name.
 
         :param source: The module source.
 
         :param funcname: The name of the test function for which the
            Item must be returned.
 
@@ -807,61 +839,64 @@ class Testdir:
             if item.name == funcname:
                 return item
         assert 0, "%r item not found in module:\n%s\nitems: %s" %(
                   funcname, source, items)
 
     def getitems(self,  source):
         """Return all test items collected from the module.
 
-        This writes the source to a python file and runs py.test's
+        This writes the source to a python file and runs pytest's
         collection on the resulting module, returning all test items
         contained within.
 
         """
         modcol = self.getmodulecol(source)
         return self.genitems([modcol])
 
     def getmodulecol(self,  source, configargs=(), withinit=False):
         """Return the module collection node for ``source``.
 
         This writes ``source`` to a file using :py:meth:`makepyfile`
-        and then runs the py.test collection on it, returning the
+        and then runs the pytest collection on it, returning the
         collection node for the test module.
 
         :param source: The source code of the module to collect.
 
         :param configargs: Any extra arguments to pass to
            :py:meth:`parseconfigure`.
 
         :param withinit: Whether to also write a ``__init__.py`` file
-           to the temporarly directory to ensure it is a package.
+           to the temporary directory to ensure it is a package.
 
         """
         kw = {self.request.function.__name__: Source(source).strip()}
         path = self.makepyfile(**kw)
         if withinit:
             self.makepyfile(__init__ = "#")
         self.config = config = self.parseconfigure(path, *configargs)
         node = self.getnode(config, path)
+
         return node
 
     def collect_by_name(self, modcol, name):
         """Return the collection node for name from the module collection.
 
         This will search a module collection node for a collection
         node matching the given name.
 
         :param modcol: A module collection node, see
            :py:meth:`getmodulecol`.
 
         :param name: The name of the node to return.
 
         """
-        for colitem in modcol._memocollect():
+        if modcol not in self._mod_collections:
+            self._mod_collections[modcol] = list(modcol.collect())
+        for colitem in self._mod_collections[modcol]:
             if colitem.name == name:
                 return colitem
 
     def popen(self, cmdargs, stdout, stderr, **kw):
         """Invoke subprocess.Popen.
 
         This calls subprocess.Popen making sure the current working
         directory is the PYTHONPATH.
@@ -886,18 +921,18 @@ class Testdir:
 
         """
         return self._run(*cmdargs)
 
     def _run(self, *cmdargs):
         cmdargs = [str(x) for x in cmdargs]
         p1 = self.tmpdir.join("stdout")
         p2 = self.tmpdir.join("stderr")
-        print_("running:", ' '.join(cmdargs))
-        print_("     in:", str(py.path.local()))
+        print("running:", ' '.join(cmdargs))
+        print("     in:", str(py.path.local()))
         f1 = codecs.open(str(p1), "w", encoding="utf8")
         f2 = codecs.open(str(p2), "w", encoding="utf8")
         try:
             now = time.time()
             popen = self.popen(cmdargs, stdout=f1, stderr=f2,
                 close_fds=(sys.platform != "win32"))
             ret = popen.wait()
         finally:
@@ -913,38 +948,38 @@ class Testdir:
             f2.close()
         self._dump_lines(out, sys.stdout)
         self._dump_lines(err, sys.stderr)
         return RunResult(ret, out, err, time.time()-now)
 
     def _dump_lines(self, lines, fp):
         try:
             for line in lines:
-                py.builtin.print_(line, file=fp)
+                print(line, file=fp)
         except UnicodeEncodeError:
             print("couldn't print to %s because of encoding" % (fp,))
 
     def _getpytestargs(self):
         # we cannot use "(sys.executable,script)"
-        # because on windows the script is e.g. a py.test.exe
+        # because on windows the script is e.g. a pytest.exe
         return (sys.executable, _pytest_fullpath,) # noqa
 
     def runpython(self, script):
         """Run a python script using sys.executable as interpreter.
 
         Returns a :py:class:`RunResult`.
         """
         return self.run(sys.executable, script)
 
     def runpython_c(self, command):
         """Run python -c "command", return a :py:class:`RunResult`."""
         return self.run(sys.executable, "-c", command)
 
     def runpytest_subprocess(self, *args, **kwargs):
-        """Run py.test as a subprocess with given arguments.
+        """Run pytest as a subprocess with given arguments.
 
         Any plugins added to the :py:attr:`plugins` list will added
         using the ``-p`` command line option.  Addtionally
         ``--basetemp`` is used put any temporary files and directories
         in a numbered directory prefixed with "runpytest-" so they do
         not conflict with the normal numberd pytest location for
         temporary files and directories.
 
@@ -962,39 +997,37 @@ class Testdir:
         #    args = ('--confcutdir=.',) + args
         plugins = [x for x in self.plugins if isinstance(x, str)]
         if plugins:
             args = ('-p', plugins[0]) + args
         args = self._getpytestargs() + args
         return self.run(*args)
 
     def spawn_pytest(self, string, expect_timeout=10.0):
-        """Run py.test using pexpect.
+        """Run pytest using pexpect.
 
-        This makes sure to use the right py.test and sets up the
+        This makes sure to use the right pytest and sets up the
         temporary directory locations.
 
         The pexpect child is returned.
 
         """
-        basetemp = self.tmpdir.mkdir("pexpect")
+        basetemp = self.tmpdir.mkdir("temp-pexpect")
         invoke = " ".join(map(str, self._getpytestargs()))
         cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
         return self.spawn(cmd, expect_timeout=expect_timeout)
 
     def spawn(self, cmd, expect_timeout=10.0):
         """Run a command using pexpect.
 
         The pexpect child is returned.
         """
         pexpect = pytest.importorskip("pexpect", "3.0")
         if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
             pytest.skip("pypy-64 bit not supported")
-        if sys.platform == "darwin":
-            pytest.xfail("pexpect does not work reliably on darwin?!")
         if sys.platform.startswith("freebsd"):
             pytest.xfail("pexpect does not work reliably on freebsd")
         logfile = self.tmpdir.join("spawn.out").open("wb")
         child = pexpect.spawn(cmd, logfile=logfile)
         self.request.addfinalizer(logfile.close)
         child.timeout = expect_timeout
         return child
 
@@ -1030,16 +1063,17 @@ class LineMatcher:
 
     The constructor takes a list of lines without their trailing
     newlines, i.e. ``text.splitlines()``.
 
     """
 
     def __init__(self,  lines):
         self.lines = lines
+        self._log_output = []
 
     def str(self):
         """Return the entire original text."""
         return "\n".join(self.lines)
 
     def _getlines(self, lines2):
         if isinstance(lines2, str):
             lines2 = Source(lines2)
@@ -1053,58 +1087,65 @@ class LineMatcher:
         The argument is a list of lines which have to occur in the
         output, in any order.  Each line can contain glob whildcards.
 
         """
         lines2 = self._getlines(lines2)
         for line in lines2:
             for x in self.lines:
                 if line == x or fnmatch(x, line):
-                    print_("matched: ", repr(line))
+                    self._log("matched: ", repr(line))
                     break
             else:
-                raise ValueError("line %r not found in output" % line)
+                self._log("line %r not found in output" % line)
+                raise ValueError(self._log_text)
 
     def get_lines_after(self, fnline):
         """Return all lines following the given line in the text.
 
         The given line can contain glob wildcards.
         """
         for i, line in enumerate(self.lines):
             if fnline == line or fnmatch(line, fnline):
                 return self.lines[i+1:]
         raise ValueError("line %r not found in output" % fnline)
 
+    def _log(self, *args):
+        self._log_output.append(' '.join((str(x) for x in args)))
+
+    @property
+    def _log_text(self):
+        return '\n'.join(self._log_output)
+
     def fnmatch_lines(self, lines2):
         """Search the text for matching lines.
 
         The argument is a list of lines which have to match and can
         use glob wildcards.  If they do not match an pytest.fail() is
         called.  The matches and non-matches are also printed on
         stdout.
 
         """
-        def show(arg1, arg2):
-            py.builtin.print_(arg1, arg2, file=sys.stderr)
         lines2 = self._getlines(lines2)
         lines1 = self.lines[:]
         nextline = None
         extralines = []
         __tracebackhide__ = True
         for line in lines2:
             nomatchprinted = False
             while lines1:
                 nextline = lines1.pop(0)
                 if line == nextline:
-                    show("exact match:", repr(line))
+                    self._log("exact match:", repr(line))
                     break
                 elif fnmatch(nextline, line):
-                    show("fnmatch:", repr(line))
-                    show("   with:", repr(nextline))
+                    self._log("fnmatch:", repr(line))
+                    self._log("   with:", repr(nextline))
                     break
                 else:
                     if not nomatchprinted:
-                        show("nomatch:", repr(line))
+                        self._log("nomatch:", repr(line))
                         nomatchprinted = True
-                    show("    and:", repr(nextline))
+                    self._log("    and:", repr(nextline))
                 extralines.append(nextline)
             else:
-                pytest.fail("remains unmatched: %r, see stderr" % (line,))
+                self._log("remains unmatched: %r" % (line,))
+                pytest.fail(self._log_text)
--- a/third_party/python/pytest/_pytest/python.py
+++ b/third_party/python/pytest/_pytest/python.py
@@ -1,208 +1,81 @@
 """ Python test discovery, setup and run of test functions. """
+from __future__ import absolute_import, division, print_function
+
 import fnmatch
-import functools
 import inspect
-import re
-import types
 import sys
+import os
+import collections
+import math
+from itertools import count
 
 import py
-import pytest
-from _pytest._code.code import TerminalRepr
-from _pytest.mark import MarkDecorator, MarkerError
-
-try:
-    import enum
-except ImportError:  # pragma: no cover
-    # Only available in Python 3.4+ or as a backport
-    enum = None
+from _pytest.mark import MarkerError
+from _pytest.config import hookimpl
 
 import _pytest
 import _pytest._pluggy as pluggy
-
-cutdir2 = py.path.local(_pytest.__file__).dirpath()
-cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
-
-
-NoneType = type(None)
-NOTSET = object()
-isfunction = inspect.isfunction
-isclass = inspect.isclass
-callable = py.builtin.callable
-# used to work around a python2 exception info leak
-exc_clear = getattr(sys, 'exc_clear', lambda: None)
-# The type of re.compile objects is not exposed in Python.
-REGEX_TYPE = type(re.compile(''))
-
-_PY3 = sys.version_info > (3, 0)
-_PY2 = not _PY3
-
+from _pytest import fixtures
+from _pytest import main
+from _pytest.compat import (
+    isclass, isfunction, is_generator, _escape_strings,
+    REGEX_TYPE, STRING_TYPES, NoneType, NOTSET,
+    get_real_func, getfslineno, safe_getattr,
+    safe_str, getlocation, enum,
+)
+from _pytest.runner import fail
 
-if hasattr(inspect, 'signature'):
-    def _format_args(func):
-        return str(inspect.signature(func))
-else:
-    def _format_args(func):
-        return inspect.formatargspec(*inspect.getargspec(func))
-
-if  sys.version_info[:2] == (2, 6):
-    def isclass(object):
-        """ Return true if the object is a class. Overrides inspect.isclass for
-        python 2.6 because it will return True for objects which always return
-        something on __getattr__ calls (see #1035).
-        Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
-        """
-        return isinstance(object, (type, types.ClassType))
-
-def _has_positional_arg(func):
-    return func.__code__.co_argcount
+cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
+cutdir2 = py.path.local(_pytest.__file__).dirpath()
+cutdir3 = py.path.local(py.__file__).dirpath()
 
 
 def filter_traceback(entry):
+    """Return True if a TracebackEntry instance should be removed from tracebacks:
+    * dynamically generated code (no code to show up for it);
+    * internal traceback from pytest or its internal libraries, py and pluggy.
+    """
     # entry.path might sometimes return a str object when the entry
     # points to dynamically generated code
     # see https://bitbucket.org/pytest-dev/py/issues/71
     raw_filename = entry.frame.code.raw.co_filename
     is_generated = '<' in raw_filename and '>' in raw_filename
     if is_generated:
         return False
     # entry.path might point to an inexisting file, in which case it will
     # alsso return a str object. see #1133
     p = py.path.local(entry.path)
-    return p != cutdir1 and not p.relto(cutdir2)
-
-
-def get_real_func(obj):
-    """ gets the real function object of the (possibly) wrapped object by
-    functools.wraps or functools.partial.
-    """
-    while hasattr(obj, "__wrapped__"):
-        obj = obj.__wrapped__
-    if isinstance(obj, functools.partial):
-        obj = obj.func
-    return obj
-
-def getfslineno(obj):
-    # xxx let decorators etc specify a sane ordering
-    obj = get_real_func(obj)
-    if hasattr(obj, 'place_as'):
-        obj = obj.place_as
-    fslineno = _pytest._code.getfslineno(obj)
-    assert isinstance(fslineno[1], int), obj
-    return fslineno
-
-def getimfunc(func):
-    try:
-        return func.__func__
-    except AttributeError:
-        try:
-            return func.im_func
-        except AttributeError:
-            return func
-
-def safe_getattr(object, name, default):
-    """ Like getattr but return default upon any Exception.
-
-    Attribute access can potentially fail for 'evil' Python objects.
-    See issue214
-    """
-    try:
-        return getattr(object, name, default)
-    except Exception:
-        return default
+    return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3)
 
 
-class FixtureFunctionMarker:
-    def __init__(self, scope, params,
-                 autouse=False, yieldctx=False, ids=None):
-        self.scope = scope
-        self.params = params
-        self.autouse = autouse
-        self.yieldctx = yieldctx
-        self.ids = ids
-
-    def __call__(self, function):
-        if isclass(function):
-            raise ValueError(
-                    "class fixtures not supported (may be in the future)")
-        function._pytestfixturefunction = self
-        return function
-
-
-def fixture(scope="function", params=None, autouse=False, ids=None):
-    """ (return a) decorator to mark a fixture factory function.
-
-    This decorator can be used (with or or without parameters) to define
-    a fixture function.  The name of the fixture function can later be
-    referenced to cause its invocation ahead of running tests: test
-    modules or classes can use the pytest.mark.usefixtures(fixturename)
-    marker.  Test functions can directly use fixture names as input
-    arguments in which case the fixture instance returned from the fixture
-    function will be injected.
-
-    :arg scope: the scope for which this fixture is shared, one of
-                "function" (default), "class", "module", "session".
-
-    :arg params: an optional list of parameters which will cause multiple
-                invocations of the fixture function and all of the tests
-                using it.
-
-    :arg autouse: if True, the fixture func is activated for all tests that
-                can see it.  If False (the default) then an explicit
-                reference is needed to activate the fixture.
-
-    :arg ids: list of string ids each corresponding to the params
-       so that they are part of the test id. If no ids are provided
-       they will be generated automatically from the params.
-
-    """
-    if callable(scope) and params is None and autouse == False:
-        # direct decoration
-        return FixtureFunctionMarker(
-                "function", params, autouse)(scope)
-    if params is not None and not isinstance(params, (list, tuple)):
-        params = list(params)
-    return FixtureFunctionMarker(scope, params, autouse, ids=ids)
-
-def yield_fixture(scope="function", params=None, autouse=False, ids=None):
-    """ (return a) decorator to mark a yield-fixture factory function
-    (EXPERIMENTAL).
-
-    This takes the same arguments as :py:func:`pytest.fixture` but
-    expects a fixture function to use a ``yield`` instead of a ``return``
-    statement to provide a fixture.  See
-    http://pytest.org/en/latest/yieldfixture.html for more info.
-    """
-    if callable(scope) and params is None and autouse == False:
-        # direct decoration
-        return FixtureFunctionMarker(
-                "function", params, autouse, yieldctx=True)(scope)
-    else:
-        return FixtureFunctionMarker(scope, params, autouse,
-                                     yieldctx=True, ids=ids)
-
-defaultfuncargprefixmarker = fixture()
 
 def pyobj_property(name):
     def get(self):
-        node = self.getparent(getattr(pytest, name))
+        node = self.getparent(getattr(__import__('pytest'), name))
         if node is not None:
             return node.obj
     doc = "python %s object this node was collected from (can be None)." % (
           name.lower(),)
     return property(get, None, None, doc)
 
 
 def pytest_addoption(parser):
     group = parser.getgroup("general")
     group.addoption('--fixtures', '--funcargs',
                action="store_true", dest="showfixtures", default=False,
                help="show available fixtures, sorted by plugin appearance")
+    group.addoption(
+        '--fixtures-per-test',
+        action="store_true",
+        dest="show_fixtures_per_test",
+        default=False,
+        help="show fixtures per test",
+    )
     parser.addini("usefixtures", type="args", default=[],
         help="list of default fixtures to be used with this project")
     parser.addini("python_files", type="args",
         default=['test_*.py', '*_test.py'],
         help="glob-style file patterns for Python test module discovery")
     parser.addini("python_classes", type="args", default=["Test",],
         help="prefixes or glob names for Python test class discovery")
     parser.addini("python_functions", type="args", default=["test",],
@@ -214,16 +87,19 @@ def pytest_addoption(parser):
         help="prepend/append to sys.path when importing test modules, "
              "default is to prepend.")
 
 
 def pytest_cmdline_main(config):
     if config.option.showfixtures:
         showfixtures(config)
         return 0
+    if config.option.show_fixtures_per_test:
+        show_fixtures_per_test(config)
+        return 0
 
 
 def pytest_generate_tests(metafunc):
     # those alternative spellings are common - raise a specific error to alert
     # the user
     alt_spellings = ['parameterize', 'parametrise', 'parameterise']
     for attr in alt_spellings:
         if hasattr(metafunc.function, attr):
@@ -247,72 +123,52 @@ def pytest_configure(config):
         "see http://pytest.org/latest/parametrize.html for more info and "
         "examples."
     )
     config.addinivalue_line("markers",
         "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
         "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
     )
 
-def pytest_sessionstart(session):
-    session._fixturemanager = FixtureManager(session)
 
-@pytest.hookimpl(trylast=True)
-def pytest_namespace():
-    raises.Exception = pytest.fail.Exception
-    return {
-        'fixture': fixture,
-        'yield_fixture': yield_fixture,
-        'raises' : raises,
-        'collect': {
-        'Module': Module, 'Class': Class, 'Instance': Instance,
-        'Function': Function, 'Generator': Generator,
-        '_fillfuncargs': fillfixtures}
-    }
-
-@fixture(scope="session")
-def pytestconfig(request):
-    """ the pytest config object with access to command line opts."""
-    return request.config
-
-
-@pytest.hookimpl(trylast=True)
+@hookimpl(trylast=True)
 def pytest_pyfunc_call(pyfuncitem):
     testfunction = pyfuncitem.obj
     if pyfuncitem._isyieldedfunction():
         testfunction(*pyfuncitem._args)
     else:
         funcargs = pyfuncitem.funcargs
         testargs = {}
         for arg in pyfuncitem._fixtureinfo.argnames:
             testargs[arg] = funcargs[arg]
         testfunction(**testargs)
     return True
 
+
 def pytest_collect_file(path, parent):
     ext = path.ext
     if ext == ".py":
         if not parent.session.isinitpath(path):
             for pat in parent.config.getini('python_files'):
                 if path.fnmatch(pat):
                     break
             else:
                return
         ihook = parent.session.gethookproxy(path)
         return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
 
 def pytest_pycollect_makemodule(path, parent):
     return Module(path, parent)
 
-@pytest.hookimpl(hookwrapper=True)
+@hookimpl(hookwrapper=True)
 def pytest_pycollect_makeitem(collector, name, obj):
     outcome = yield
     res = outcome.get_result()
     if res is not None:
-        raise StopIteration
+        return
     # nothing was collected elsewhere, let's do it here
     if isclass(obj):
         if collector.istestclass(obj, name):
             Class = collector._getcustomclass("Class")
             outcome.force_result(Class(name, parent=collector))
     elif collector.istestfunction(obj, name):
         # mock seems to store unbound methods (issue473), normalize it
         obj = getattr(obj, "__func__", obj)
@@ -325,56 +181,55 @@ def pytest_pycollect_makeitem(collector,
                 % name, )
         elif getattr(obj, "__test__", True):
             if is_generator(obj):
                 res = Generator(name, parent=collector)
             else:
                 res = list(collector._genfunctions(name, obj))
             outcome.force_result(res)
 
-def is_generator(func):
-    try:
-        return _pytest._code.getrawcode(func).co_flags & 32 # generator function
-    except AttributeError: # builtin functions have no bytecode
-        # assume them to not be generators
-        return False
+def pytest_make_parametrize_id(config, val, argname=None):
+    return None
+
+
 
 class PyobjContext(object):
     module = pyobj_property("Module")
     cls = pyobj_property("Class")
     instance = pyobj_property("Instance")
 
 class PyobjMixin(PyobjContext):
     def obj():
         def fget(self):
-            try:
-                return self._obj
-            except AttributeError:
+            obj = getattr(self, '_obj', None)
+            if obj is None:
                 self._obj = obj = self._getobj()
-                return obj
+            return obj
+
         def fset(self, value):
             self._obj = value
+
         return property(fget, fset, None, "underlying python object")
+
     obj = obj()
 
     def _getobj(self):
         return getattr(self.parent.obj, self.name)
 
     def getmodpath(self, stopatmodule=True, includemodule=False):
         """ return python path relative to the containing module. """
         chain = self.listchain()
         chain.reverse()
         parts = []
         for node in chain:
             if isinstance(node, Instance):
                 continue
             name = node.name
             if isinstance(node, Module):
-                assert name.endswith(".py")
-                name = name[:-3]
+                name = os.path.splitext(name)[0]
                 if stopatmodule:
                     if includemodule:
                         parts.append(name)
                     break
             parts.append(name)
         parts.reverse()
         s = ".".join(parts)
         return s.replace(".[", "[")
@@ -393,17 +248,17 @@ class PyobjMixin(PyobjContext):
                 fspath = fspath[:-1]
             lineno = compat_co_firstlineno
         else:
             fspath, lineno = getfslineno(obj)
         modpath = self.getmodpath()
         assert isinstance(lineno, int)
         return fspath, lineno, modpath
 
-class PyCollector(PyobjMixin, pytest.Collector):
+class PyCollector(PyobjMixin, main.Collector):
 
     def funcnamefilter(self, name):
         return self._matches_prefix_or_glob_option('python_functions', name)
 
     def isnosetest(self, obj):
         """ Look for the __test__ attribute, which is applied by the
         @nose.tools.istest decorator
         """
@@ -413,17 +268,17 @@ class PyCollector(PyobjMixin, pytest.Col
         return safe_getattr(obj, '__test__', False) is True
 
     def classnamefilter(self, name):
         return self._matches_prefix_or_glob_option('python_classes', name)
 
     def istestfunction(self, obj, name):
         return (
             (self.funcnamefilter(name) or self.isnosetest(obj)) and
-            safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
+            safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None
         )
 
     def istestclass(self, obj, name):
         return self.classnamefilter(name) or self.isnosetest(obj)
 
     def _matches_prefix_or_glob_option(self, option_name, name):
         """
         checks if the given name matches the prefix or glob-pattern defined
@@ -490,86 +345,26 @@ class PyCollector(PyobjMixin, pytest.Col
         else:
             self.ihook.pytest_generate_tests(metafunc=metafunc)
 
         Function = self._getcustomclass("Function")
         if not metafunc._calls:
             yield Function(name, parent=self, fixtureinfo=fixtureinfo)
         else:
             # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
-            add_funcarg_pseudo_fixture_def(self, metafunc, fm)
+            fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
 
             for callspec in metafunc._calls:
-                subname = "%s[%s]" %(name, callspec.id)
+                subname = "%s[%s]" % (name, callspec.id)
                 yield Function(name=subname, parent=self,
                                callspec=callspec, callobj=funcobj,
                                fixtureinfo=fixtureinfo,
-                               keywords={callspec.id:True})
-
-def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
-    # this function will transform all collected calls to a functions
-    # if they use direct funcargs (i.e. direct parametrization)
-    # because we want later test execution to be able to rely on
-    # an existing FixtureDef structure for all arguments.
-    # XXX we can probably avoid this algorithm  if we modify CallSpec2
-    # to directly care for creating the fixturedefs within its methods.
-    if not metafunc._calls[0].funcargs:
-        return # this function call does not have direct parametrization
-    # collect funcargs of all callspecs into a list of values
-    arg2params = {}
-    arg2scope = {}
-    for callspec in metafunc._calls:
-        for argname, argvalue in callspec.funcargs.items():
-            assert argname not in callspec.params
-            callspec.params[argname] = argvalue
-            arg2params_list = arg2params.setdefault(argname, [])
-            callspec.indices[argname] = len(arg2params_list)
-            arg2params_list.append(argvalue)
-            if argname not in arg2scope:
-                scopenum = callspec._arg2scopenum.get(argname,
-                                                      scopenum_function)
-                arg2scope[argname] = scopes[scopenum]
-        callspec.funcargs.clear()
-
-    # register artificial FixtureDef's so that later at test execution
-    # time we can rely on a proper FixtureDef to exist for fixture setup.
-    arg2fixturedefs = metafunc._arg2fixturedefs
-    for argname, valuelist in arg2params.items():
-        # if we have a scope that is higher than function we need
-        # to make sure we only ever create an according fixturedef on
-        # a per-scope basis. We thus store and cache the fixturedef on the
-        # node related to the scope.
-        scope = arg2scope[argname]
-        node = None
-        if scope != "function":
-            node = get_scope_node(collector, scope)
-            if node is None:
-                assert scope == "class" and isinstance(collector, Module)
-                # use module-level collector for class-scope (for now)
-                node = collector
-        if node and argname in node._name2pseudofixturedef:
-            arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
-        else:
-            fixturedef =  FixtureDef(fixturemanager, '', argname,
-                           get_direct_param_fixture_func,
-                           arg2scope[argname],
-                           valuelist, False, False)
-            arg2fixturedefs[argname] = [fixturedef]
-            if node is not None:
-                node._name2pseudofixturedef[argname] = fixturedef
-
-
-def get_direct_param_fixture_func(request):
-    return request.param
-
-class FuncFixtureInfo:
-    def __init__(self, argnames, names_closure, name2fixturedefs):
-        self.argnames = argnames
-        self.names_closure = names_closure
-        self.name2fixturedefs = name2fixturedefs
+                               keywords={callspec.id:True},
+                               originalname=name,
+                               )
 
 
 def _marked(func, mark):
     """ Returns True if :func: is already marked with :mark:, False otherwise.
     This can happen if marker is applied to class and the test file is
     invoked more than once.
     """
     try:
@@ -590,20 +385,22 @@ def transfer_markers(funcobj, cls, mod):
         if isinstance(pytestmark, list):
             for mark in pytestmark:
                 if not _marked(funcobj, mark):
                     mark(funcobj)
         else:
             if not _marked(funcobj, pytestmark):
                 pytestmark(funcobj)
 
-class Module(pytest.File, PyCollector):
+
+class Module(main.File, PyCollector):
     """ Collector for test classes and functions. """
+
     def _getobj(self):
-        return self._memoizedcall('_obj', self._importtestmodule)
+        return self._importtestmodule()
 
     def collect(self):
         self.session._fixturemanager.parsefactories(self)
         return super(Module, self).collect()
 
     def _importtestmodule(self):
         # we assume we are only called once per module
         importmode = self.config.getoption("--import-mode")
@@ -619,72 +416,115 @@ class Module(pytest.File, PyCollector):
                 "imported module %r has this __file__ attribute:\n"
                 "  %s\n"
                 "which is not the same as the test file we want to collect:\n"
                 "  %s\n"
                 "HINT: remove __pycache__ / .pyc files and/or use a "
                 "unique basename for your test file modules"
                  % e.args
             )
-        #print "imported test module", mod
+        except ImportError:
+            from _pytest._code.code import ExceptionInfo
+            exc_info = ExceptionInfo()
+            if self.config.getoption('verbose') < 2:
+                exc_info.traceback = exc_info.traceback.filter(filter_traceback)
+            exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly()
+            formatted_tb = safe_str(exc_repr)
+            raise self.CollectError(
+                "ImportError while importing test module '{fspath}'.\n"
+                "Hint: make sure your test modules/packages have valid Python names.\n"
+                "Traceback:\n"
+                "{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
+            )
+        except _pytest.runner.Skipped as e:
+            if e.allow_module_level:
+                raise
+            raise self.CollectError(
+                "Using pytest.skip outside of a test is not allowed. If you are "
+                "trying to decorate a test function, use the @pytest.mark.skip "
+                "or @pytest.mark.skipif decorators instead."
+            )
         self.config.pluginmanager.consider_module(mod)
         return mod
 
     def setup(self):
-        setup_module = xunitsetup(self.obj, "setUpModule")
+        setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule")
         if setup_module is None:
-            setup_module = xunitsetup(self.obj, "setup_module")
+            setup_module = _get_xunit_setup_teardown(self.obj, "setup_module")
         if setup_module is not None:
-            #XXX: nose compat hack, move to nose plugin
-            # if it takes a positional arg, its probably a pytest style one
-            # so we pass the current module object
-            if _has_positional_arg(setup_module):
-                setup_module(self.obj)
-            else:
-                setup_module()
-        fin = getattr(self.obj, 'tearDownModule', None)
-        if fin is None:
-            fin = getattr(self.obj, 'teardown_module', None)
-        if fin is not None:
-            #XXX: nose compat hack, move to nose plugin
-            # if it takes a positional arg, it's probably a pytest style one
-            # so we pass the current module object
-            if _has_positional_arg(fin):
-                finalizer = lambda: fin(self.obj)
-            else:
-                finalizer = fin
-            self.addfinalizer(finalizer)
+            setup_module()
+
+        teardown_module = _get_xunit_setup_teardown(self.obj, 'tearDownModule')
+        if teardown_module is None:
+            teardown_module = _get_xunit_setup_teardown(self.obj, 'teardown_module')
+        if teardown_module is not None:
+            self.addfinalizer(teardown_module)
+
+
+def _get_xunit_setup_teardown(holder, attr_name, param_obj=None):
+    """
+    Return a callable to perform xunit-style setup or teardown if
+    the function exists in the ``holder`` object.
+    The ``param_obj`` parameter is the parameter which will be passed to the function
+    when the callable is called without arguments, defaults to the ``holder`` object.
+    Return ``None`` if a suitable callable is not found.
+    """
+    param_obj = param_obj if param_obj is not None else holder
+    result = _get_xunit_func(holder, attr_name)
+    if result is not None:
+        arg_count = result.__code__.co_argcount
+        if inspect.ismethod(result):
+            arg_count -= 1
+        if arg_count:
+            return lambda: result(param_obj)
+        else:
+            return result
+
+
+def _get_xunit_func(obj, name):
+    """Return the attribute from the given object to be used as a setup/teardown
+    xunit-style function, but only if not marked as a fixture to
+    avoid calling it twice.
+    """
+    meth = getattr(obj, name, None)
+    if fixtures.getfixturemarker(meth) is None:
+        return meth
 
 
 class Class(PyCollector):
     """ Collector for test methods. """
     def collect(self):
+        if not safe_getattr(self.obj, "__test__", True):
+            return []
         if hasinit(self.obj):
             self.warn("C1", "cannot collect test class %r because it has a "
                 "__init__ constructor" % self.obj.__name__)
             return []
+        elif hasnew(self.obj):
+            self.warn("C1", "cannot collect test class %r because it has a "
+                            "__new__ constructor" % self.obj.__name__)
+            return []
         return [self._getcustomclass("Instance")(name="()", parent=self)]
 
     def setup(self):
-        setup_class = xunitsetup(self.obj, 'setup_class')
+        setup_class = _get_xunit_func(self.obj, 'setup_class')
         if setup_class is not None:
             setup_class = getattr(setup_class, 'im_func', setup_class)
             setup_class = getattr(setup_class, '__func__', setup_class)
             setup_class(self.obj)
 
         fin_class = getattr(self.obj, 'teardown_class', None)
         if fin_class is not None:
             fin_class = getattr(fin_class, 'im_func', fin_class)
             fin_class = getattr(fin_class, '__func__', fin_class)
             self.addfinalizer(lambda: fin_class(self.obj))
 
 class Instance(PyCollector):
     def _getobj(self):
-        obj = self.parent.obj()
-        return obj
+        return self.parent.obj()
 
     def collect(self):
         self.session._fixturemanager.parsefactories(self)
         return super(Instance, self).collect()
 
     def newinstance(self):
         self.obj = self._getobj()
         return self.obj
@@ -703,22 +543,22 @@ class FunctionMixin(PyobjMixin):
         else:
             obj = self.parent.obj
         if inspect.ismethod(self.obj):
             setup_name = 'setup_method'
             teardown_name = 'teardown_method'
         else:
             setup_name = 'setup_function'
             teardown_name = 'teardown_function'
-        setup_func_or_method = xunitsetup(obj, setup_name)
+        setup_func_or_method = _get_xunit_setup_teardown(obj, setup_name, param_obj=self.obj)
         if setup_func_or_method is not None:
-            setup_func_or_method(self.obj)
-        fin = getattr(obj, teardown_name, None)
-        if fin is not None:
-            self.addfinalizer(lambda: fin(self.obj))
+            setup_func_or_method()
+        teardown_func_or_method = _get_xunit_setup_teardown(obj, teardown_name, param_obj=self.obj)
+        if teardown_func_or_method is not None:
+            self.addfinalizer(teardown_func_or_method)
 
     def _prunetraceback(self, excinfo):
         if hasattr(self, '_obj') and not self.config.option.fulltrace:
             code = _pytest._code.Code(get_real_func(self.obj))
             path, firstlineno = code.path, code.firstlineno
             traceback = excinfo.traceback
             ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
             if ntraceback == traceback:
@@ -733,17 +573,17 @@ class FunctionMixin(PyobjMixin):
             # issue364: mark all but first and last frames to
             # only show a single-line message for each frame
             if self.config.option.tbstyle == "auto":
                 if len(excinfo.traceback) > 2:
                     for entry in excinfo.traceback[1:-1]:
                         entry.set_repr_style('short')
 
     def _repr_failure_py(self, excinfo, style="long"):
-        if excinfo.errisinstance(pytest.fail.Exception):
+        if excinfo.errisinstance(fail.Exception):
             if not excinfo.value.pytrace:
                 return py._builtin._totext(excinfo.value)
         return super(FunctionMixin, self)._repr_failure_py(excinfo,
             style=style)
 
     def repr_failure(self, excinfo, outerr=None):
         assert outerr is None, "XXX outerr usage is deprecated"
         style = self.config.option.tbstyle
@@ -752,16 +592,17 @@ class FunctionMixin(PyobjMixin):
         return self._repr_failure_py(excinfo, style=style)
 
 
 class Generator(FunctionMixin, PyCollector):
     def collect(self):
         # test generators are seen as collectors but they also
         # invoke setup/teardown on popular request
         # (induced by the common "test_*" naming shared with normal tests)
+        from _pytest import deprecated
         self.session._setupstate.prepare(self)
         # see FunctionMixin.setup and test_setupstate_is_preserved_134
         self._preservedparent = self.parent.obj
         l = []
         seen = {}
         for i, x in enumerate(self.obj()):
             name, call, args = self.getcallargs(x)
             if not callable(call):
@@ -769,72 +610,53 @@ class Generator(FunctionMixin, PyCollect
             if name is None:
                 name = "[%d]" % i
             else:
                 name = "['%s']" % name
             if name in seen:
                 raise ValueError("%r generated tests with non-unique name %r" %(self, name))
             seen[name] = True
             l.append(self.Function(name, self, args=args, callobj=call))
+            self.config.warn('C1', deprecated.YIELD_TESTS, fslocation=self.fspath)
         return l
 
     def getcallargs(self, obj):
         if not isinstance(obj, (tuple, list)):
             obj = (obj,)
-        # explict naming
+        # explicit naming
         if isinstance(obj[0], py.builtin._basestring):
             name = obj[0]
             obj = obj[1:]
         else:
             name = None
         call, args = obj[0], obj[1:]
         return name, call, args
 
 
 def hasinit(obj):
     init = getattr(obj, '__init__', None)
     if init:
-        if init != object.__init__:
-            return True
-
+        return init != object.__init__
 
 
-def fillfixtures(function):
-    """ fill missing funcargs for a test function. """
-    try:
-        request = function._request
-    except AttributeError:
-        # XXX this special code path is only expected to execute
-        # with the oejskit plugin.  It uses classes with funcargs
-        # and we thus have to work a bit to allow this.
-        fm = function.session._fixturemanager
-        fi = fm.getfixtureinfo(function.parent, function.obj, None)
-        function._fixtureinfo = fi
-        request = function._request = FixtureRequest(function)
-        request._fillfixtures()
-        # prune out funcargs for jstests
-        newfuncargs = {}
-        for name in fi.argnames:
-            newfuncargs[name] = function.funcargs[name]
-        function.funcargs = newfuncargs
-    else:
-        request._fillfixtures()
+def hasnew(obj):
+    new = getattr(obj, '__new__', None)
+    if new:
+        return new != object.__new__
 
 
-_notexists = object()
-
 class CallSpec2(object):
     def __init__(self, metafunc):
         self.metafunc = metafunc
         self.funcargs = {}
         self._idlist = []
         self.params = {}
-        self._globalid = _notexists
+        self._globalid = NOTSET
         self._globalid_args = set()
-        self._globalparam = _notexists
+        self._globalparam = NOTSET
         self._arg2scopenum = {}  # used for sorting parametrized resources
         self.keywords = {}
         self.indices = {}
 
     def copy(self, metafunc):
         cs = CallSpec2(self.metafunc)
         cs.funcargs.update(self.funcargs)
         cs.params.update(self.params)
@@ -850,17 +672,17 @@ class CallSpec2(object):
     def _checkargnotcontained(self, arg):
         if arg in self.params or arg in self.funcargs:
             raise ValueError("duplicate %r" %(arg,))
 
     def getparam(self, name):
         try:
             return self.params[name]
         except KeyError:
-            if self._globalparam is _notexists:
+            if self._globalparam is NOTSET:
                 raise ValueError(name)
             return self._globalparam
 
     @property
     def id(self):
         return "-".join(map(str, filter(None, self._idlist)))
 
     def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
@@ -873,65 +695,51 @@ class CallSpec2(object):
             self._arg2scopenum[arg] = scopenum
         self._idlist.append(id)
         self.keywords.update(keywords)
 
     def setall(self, funcargs, id, param):
         for x in funcargs:
             self._checkargnotcontained(x)
         self.funcargs.update(funcargs)
-        if id is not _notexists:
+        if id is not NOTSET:
             self._idlist.append(id)
-        if param is not _notexists:
-            assert self._globalparam is _notexists
+        if param is not NOTSET:
+            assert self._globalparam is NOTSET
             self._globalparam = param
         for arg in funcargs:
-            self._arg2scopenum[arg] = scopenum_function
+            self._arg2scopenum[arg] = fixtures.scopenum_function
 
 
-class FuncargnamesCompatAttr:
-    """ helper class so that Metafunc, Function and FixtureRequest
-    don't need to each define the "funcargnames" compatibility attribute.
-    """
-    @property
-    def funcargnames(self):
-        """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
-        return self.fixturenames
-
-class Metafunc(FuncargnamesCompatAttr):
+class Metafunc(fixtures.FuncargnamesCompatAttr):
     """
     Metafunc objects are passed to the ``pytest_generate_tests`` hook.
     They help to inspect a test function and to generate tests according to
     test configuration or values specified in the class or module where a
     test function is defined.
-
-    :ivar fixturenames: set of fixture names required by the test function
-
-    :ivar function: underlying python test function
-
-    :ivar cls: class object where the test function is defined in or ``None``.
-
-    :ivar module: the module object where the test function is defined in.
-
-    :ivar config: access to the :class:`_pytest.config.Config` object for the
-        test session.
-
-    :ivar funcargnames:
-        .. deprecated:: 2.3
-            Use ``fixturenames`` instead.
     """
     def __init__(self, function, fixtureinfo, config, cls=None, module=None):
+        #: access to the :class:`_pytest.config.Config` object for the test session
         self.config = config
+
+        #: the module object where the test function is defined in.
         self.module = module
+
+        #: underlying python test function
         self.function = function
+
+        #: set of fixture names required by the test function
         self.fixturenames = fixtureinfo.names_closure
-        self._arg2fixturedefs = fixtureinfo.name2fixturedefs
+
+        #: class object where the test function is defined in or ``None``.
         self.cls = cls
+
         self._calls = []
         self._ids = py.builtin.set()
+        self._arg2fixturedefs = fixtureinfo.name2fixturedefs
 
     def parametrize(self, argnames, argvalues, indirect=False, ids=None,
         scope=None):
         """ Add new invocations to the underlying test function using the list
         of argvalues for the given argnames.  Parametrization is performed
         during the collection phase.  If you need to setup expensive resources
         see about setting indirect to do it rather at test setup time.
 
@@ -949,100 +757,109 @@ class Metafunc(FuncargnamesCompatAttr):
             names (subset of argnames). If True the list contains all names from
             the argnames. Each argvalue corresponding to an argname in this list will
             be passed as request.param to its respective argname fixture
             function so that it can perform more expensive setups during the
             setup phase of a test rather than at collection time.
 
         :arg ids: list of string ids, or a callable.
             If strings, each is corresponding to the argvalues so that they are
-            part of the test id.
+            part of the test id. If None is given as id of specific test, the
+            automatically generated id for that argument will be used.
             If callable, it should take one argument (a single argvalue) and return
             a string or return None. If None, the automatically generated id for that
             argument will be used.
             If no ids are provided they will be generated automatically from
             the argvalues.
 
         :arg scope: if specified it denotes the scope of the parameters.
             The scope is used for grouping tests by parameter instances.
             It will also override any fixture-function defined scope, allowing
             to set a dynamic scope using test context or configuration.
         """
-
-        # individual parametrized argument sets can be wrapped in a series
-        # of markers in which case we unwrap the values and apply the mark
-        # at Function init
-        newkeywords = {}
-        unwrapped_argvalues = []
-        for i, argval in enumerate(argvalues):
-            while isinstance(argval, MarkDecorator):
-                newmark = MarkDecorator(argval.markname,
-                                        argval.args[:-1], argval.kwargs)
-                newmarks = newkeywords.setdefault(i, {})
-                newmarks[newmark.markname] = newmark
-                argval = argval.args[-1]
-            unwrapped_argvalues.append(argval)
-        argvalues = unwrapped_argvalues
+        from _pytest.fixtures import scope2index
+        from _pytest.mark import MARK_GEN, ParameterSet
+        from py.io import saferepr
 
         if not isinstance(argnames, (tuple, list)):
             argnames = [x.strip() for x in argnames.split(",") if x.strip()]
-            if len(argnames) == 1:
-                argvalues = [(val,) for val in argvalues]
-        if not argvalues:
-            argvalues = [(_notexists,) * len(argnames)]
-            # we passed a empty list to parameterize, skip that test
-            #
+            force_tuple = len(argnames) == 1
+        else:
+            force_tuple = False
+        parameters = [
+            ParameterSet.extract_from(x, legacy_force_tuple=force_tuple)
+            for x in argvalues]
+        del argvalues
+
+        if not parameters:
             fs, lineno = getfslineno(self.function)
-            newmark = pytest.mark.skip(
-                reason="got empty parameter set %r, function %s at %s:%d" % (
-                    argnames, self.function.__name__, fs, lineno))
-            newmarks = newkeywords.setdefault(0, {})
-            newmarks[newmark.markname] = newmark
-
+            reason = "got empty parameter set %r, function %s at %s:%d" % (
+                    argnames, self.function.__name__, fs, lineno)
+            mark = MARK_GEN.skip(reason=reason)
+            parameters.append(ParameterSet(
+                values=(NOTSET,) * len(argnames),
+                marks=[mark],
+                id=None,
+            ))
 
         if scope is None:
-            scope = "function"
-        scopenum = scopes.index(scope)
+            scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
+
+        scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize))
         valtypes = {}
         for arg in argnames:
             if arg not in self.fixturenames:
-                raise ValueError("%r uses no fixture %r" %(self.function, arg))
+                if isinstance(indirect, (tuple, list)):
+                    name = 'fixture' if arg in indirect else 'argument'
+                else:
+                    name = 'fixture' if indirect else 'argument'
+                raise ValueError(
+                    "%r uses no %s %r" % (
+                            self.function, name, arg))
 
         if indirect is True:
             valtypes = dict.fromkeys(argnames, "params")
         elif indirect is False:
             valtypes = dict.fromkeys(argnames, "funcargs")
         elif isinstance(indirect, (tuple, list)):
             valtypes = dict.fromkeys(argnames, "funcargs")
             for arg in indirect:
                 if arg not in argnames:
-                    raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
+                    raise ValueError("indirect given to %r: fixture %r doesn't exist" % (
                                      self.function, arg))
                 valtypes[arg] = "params"
         idfn = None
         if callable(ids):
             idfn = ids
             ids = None
-        if ids and len(ids) != len(argvalues):
-            raise ValueError('%d tests specified with %d ids' %(
-                             len(argvalues), len(ids)))
-        if not ids:
-            ids = idmaker(argnames, argvalues, idfn)
+        if ids:
+            if len(ids) != len(parameters):
+                raise ValueError('%d tests specified with %d ids' % (
+                                 len(parameters), len(ids)))
+            for id_value in ids:
+                if id_value is not None and not isinstance(id_value, py.builtin._basestring):
+                    msg = 'ids must be list of strings, found: %s (type: %s)'
+                    raise ValueError(msg % (saferepr(id_value), type(id_value).__name__))
+        ids = idmaker(argnames, parameters, idfn, ids, self.config)
         newcalls = []
         for callspec in self._calls or [CallSpec2(self)]:
-            for param_index, valset in enumerate(argvalues):
-                assert len(valset) == len(argnames)
+            elements = zip(ids, parameters, count())
+            for a_id, param, param_index in elements:
+                if len(param.values) != len(argnames):
+                    raise ValueError(
+                        'In "parametrize" the number of values ({0}) must be '
+                        'equal to the number of names ({1})'.format(
+                            param.values, argnames))
                 newcallspec = callspec.copy(self)
-                newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
-                                     newkeywords.get(param_index, {}), scopenum,
-                                     param_index)
+                newcallspec.setmulti(valtypes, argnames, param.values, a_id,
+                                     param.deprecated_arg_dict, scopenum, param_index)
                 newcalls.append(newcallspec)
         self._calls = newcalls
 
-    def addcall(self, funcargs=None, id=_notexists, param=_notexists):
+    def addcall(self, funcargs=None, id=NOTSET, param=NOTSET):
         """ (deprecated, use parametrize) Add a new call to the underlying
         test function during the collection phase of a test run.  Note that
         request.addcall() is called during the test collection phase prior and
         independently to actual test execution.  You should only use addcall()
         if you need to specify multiple arguments of a test function.
 
         :arg funcargs: argument keyword dictionary used when invoking
             the test function.
@@ -1052,129 +869,203 @@ class Metafunc(FuncargnamesCompatAttr):
 
         :arg param: a parameter which will be exposed to a later fixture function
             invocation through the ``request.param`` attribute.
         """
         assert funcargs is None or isinstance(funcargs, dict)
         if funcargs is not None:
             for name in funcargs:
                 if name not in self.fixturenames:
-                    pytest.fail("funcarg %r not used in this function." % name)
+                    fail("funcarg %r not used in this function." % name)
         else:
             funcargs = {}
         if id is None:
             raise ValueError("id=None not allowed")
-        if id is _notexists:
+        if id is NOTSET:
             id = len(self._calls)
         id = str(id)
         if id in self._ids:
             raise ValueError("duplicate id %r" % id)
         self._ids.add(id)
 
         cs = CallSpec2(self)
         cs.setall(funcargs, id, param)
         self._calls.append(cs)
 
 
-if _PY3:
-    import codecs
+def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
+    """Find the most appropriate scope for a parametrized call based on its arguments.
 
-    def _escape_bytes(val):
-        """
-        If val is pure ascii, returns it as a str(), otherwise escapes
-        into a sequence of escaped bytes:
-        b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
+    When there's at least one direct argument, always use "function" scope.
+
+    When a test function is parametrized and all its arguments are indirect
+    (e.g. fixtures), return the most narrow scope based on the fixtures used.
 
-        note:
-           the obvious "v.decode('unicode-escape')" will return
-           valid utf-8 unicode if it finds them in the string, but we
-           want to return escaped bytes for any byte, even if they match
-           a utf-8 string.
-        """
-        if val:
-            # source: http://goo.gl/bGsnwC
-            encoded_bytes, _ = codecs.escape_encode(val)
-            return encoded_bytes.decode('ascii')
-        else:
-            # empty bytes crashes codecs.escape_encode (#1087)
-            return ''
-else:
-    def _escape_bytes(val):
-        """
-        In py2 bytes and str are the same type, so return it unchanged if it
-        is a full ascii string, otherwise escape it into its binary form.
-        """
-        try:
-            return val.decode('ascii')
-        except UnicodeDecodeError:
-            return val.encode('string-escape')
+    Related to issue #1832, based on code posted by @Kingdread.
+    """
+    from _pytest.fixtures import scopes
+    indirect_as_list = isinstance(indirect, (list, tuple))
+    all_arguments_are_fixtures = indirect is True or \
+                                 indirect_as_list and len(indirect) == argnames
+    if all_arguments_are_fixtures:
+        fixturedefs = arg2fixturedefs or {}
+        used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()]
+        if used_scopes:
+            # Takes the most narrow scope from used fixtures
+            for scope in reversed(scopes):
+                if scope in used_scopes:
+                    return scope
+
+    return 'function'
 
 
-def _idval(val, argname, idx, idfn):
+def _idval(val, argname, idx, idfn, config=None):
     if idfn:
+        s = None
         try:
             s = idfn(val)
-            if s:
-                return s
         except Exception:
-            pass
+            # See issue https://github.com/pytest-dev/pytest/issues/2169
+            import warnings
+            msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx)
+            msg += '\nUpdate your code as this will raise an error in pytest-4.0.'
+            warnings.warn(msg, DeprecationWarning)
+        if s:
+            return _escape_strings(s)
 
-    if isinstance(val, bytes):
-        return _escape_bytes(val)
-    elif isinstance(val, (float, int, str, bool, NoneType)):
+    if config:
+        hook_id = config.hook.pytest_make_parametrize_id(
+            config=config, val=val, argname=argname)
+        if hook_id:
+            return hook_id
+
+    if isinstance(val, STRING_TYPES):
+        return _escape_strings(val)
+    elif isinstance(val, (float, int, bool, NoneType)):
         return str(val)
     elif isinstance(val, REGEX_TYPE):
-        return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
+        return _escape_strings(val.pattern)
     elif enum is not None and isinstance(val, enum.Enum):
         return str(val)
     elif isclass(val) and hasattr(val, '__name__'):
         return val.__name__
-    elif _PY2 and isinstance(val, unicode):
-        # special case for python 2: if a unicode string is
-        # convertible to ascii, return it as an str() object instead
-        try:
-            return str(val)
-        except UnicodeError:
-            # fallthrough
-            pass
     return str(argname)+str(idx)
 
-def _idvalset(idx, valset, argnames, idfn):
-    this_id = [_idval(val, argname, idx, idfn)
-               for val, argname in zip(valset, argnames)]
-    return "-".join(this_id)
+
+def _idvalset(idx, parameterset, argnames, idfn, ids, config=None):
+    if parameterset.id is not None:
+        return parameterset.id
+    if ids is None or (idx >= len(ids) or ids[idx] is None):
+        this_id = [_idval(val, argname, idx, idfn, config)
+                   for val, argname in zip(parameterset.values, argnames)]
+        return "-".join(this_id)
+    else:
+        return _escape_strings(ids[idx])
+
+
+def idmaker(argnames, parametersets, idfn=None, ids=None, config=None):
+    ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config)
+           for valindex, parameterset in enumerate(parametersets)]
+    if len(set(ids)) != len(ids):
+        # The ids are not unique
+        duplicates = [testid for testid in ids if ids.count(testid) > 1]
+        counters = collections.defaultdict(lambda: 0)
+        for index, testid in enumerate(ids):
+            if testid in duplicates:
+                ids[index] = testid + str(counters[testid])
+                counters[testid] += 1
+    return ids
+
+
+def show_fixtures_per_test(config):
+    from _pytest.main import wrap_session
+    return wrap_session(config, _show_fixtures_per_test)
+
+
+def _show_fixtures_per_test(config, session):
+    import _pytest.config
+    session.perform_collect()
+    curdir = py.path.local()
+    tw = _pytest.config.create_terminal_writer(config)
+    verbose = config.getvalue("verbose")
+
+    def get_best_rel(func):
+        loc = getlocation(func, curdir)
+        return curdir.bestrelpath(loc)
 
-def idmaker(argnames, argvalues, idfn=None):
-    ids = [_idvalset(valindex, valset, argnames, idfn)
-           for valindex, valset in enumerate(argvalues)]
-    if len(set(ids)) < len(ids):
-        # user may have provided a bad idfn which means the ids are not unique
-        ids = [str(i) + testid for i, testid in enumerate(ids)]
-    return ids
+    def write_fixture(fixture_def):
+        argname = fixture_def.argname
+
+        if verbose <= 0 and argname.startswith("_"):
+            return
+        if verbose > 0:
+            bestrel = get_best_rel(fixture_def.func)
+            funcargspec = "{0} -- {1}".format(argname, bestrel)
+        else:
+            funcargspec = argname
+        tw.line(funcargspec, green=True)
+
+        INDENT = '    {0}'
+        fixture_doc = fixture_def.func.__doc__
+
+        if fixture_doc:
+            for line in fixture_doc.strip().split('\n'):
+                tw.line(INDENT.format(line.strip()))
+        else:
+            tw.line(INDENT.format('no docstring available'), red=True)
+
+    def write_item(item):
+        name2fixturedefs = item._fixtureinfo.name2fixturedefs
+
+        if not name2fixturedefs:
+            # The given test item does not use any fixtures
+            return
+        bestrel = get_best_rel(item.function)
+
+        tw.line()
+        tw.sep('-', 'fixtures used by {0}'.format(item.name))
+        tw.sep('-', '({0})'.format(bestrel))
+        for argname, fixture_defs in sorted(name2fixturedefs.items()):
+            assert fixture_defs is not None
+            if not fixture_defs:
+                continue
+            # The last fixture def item in the list is expected
+            # to be the one used by the test item
+            write_fixture(fixture_defs[-1])
+
+    for item in session.items:
+        write_item(item)
+
 
 def showfixtures(config):
     from _pytest.main import wrap_session
     return wrap_session(config, _showfixtures_main)
 
+
 def _showfixtures_main(config, session):
     import _pytest.config
     session.perform_collect()
     curdir = py.path.local()
     tw = _pytest.config.create_terminal_writer(config)
     verbose = config.getvalue("verbose")
 
     fm = session._fixturemanager
 
     available = []
+    seen = set()
+
     for argname, fixturedefs in fm._arg2fixturedefs.items():
         assert fixturedefs is not None
         if not fixturedefs:
             continue
         for fixturedef in fixturedefs:
             loc = getlocation(fixturedef.func, curdir)
+            if (fixturedef.argname, loc) in seen:
+                continue
+            seen.add((fixturedef.argname, loc))
             available.append((len(fixturedef.baseid),
                               fixturedef.func.__module__,
                               curdir.bestrelpath(loc),
                               fixturedef.argname, fixturedef))
 
     available.sort()
     currentmodule = None
     for baseid, module, bestrel, argname, fixturedef in available:
@@ -1194,59 +1085,76 @@ def _showfixtures_main(config, session):
         doc = fixturedef.func.__doc__ or ""
         if doc:
             for line in doc.strip().split("\n"):
                 tw.line("    " + line.strip())
         else:
             tw.line("    %s: no docstring available" %(loc,),
                 red=True)
 
-def getlocation(function, curdir):
-    import inspect
-    fn = py.path.local(inspect.getfile(function))
-    lineno = py.builtin._getcode(function).co_firstlineno
-    if fn.relto(curdir):
-        fn = fn.relto(curdir)
-    return "%s:%d" %(fn, lineno+1)
 
 # builtin pytest.raises helper
 
 def raises(expected_exception, *args, **kwargs):
-    """ assert that a code block/function call raises ``expected_exception``
+    """
+    Assert that a code block/function call raises ``expected_exception``
     and raise a failure exception otherwise.
 
     This helper produces a ``ExceptionInfo()`` object (see below).
 
     If using Python 2.5 or above, you may use this function as a
     context manager::
 
         >>> with raises(ZeroDivisionError):
         ...    1/0
 
+    .. versionchanged:: 2.10
+
+    In the context manager form you may use the keyword argument
+    ``message`` to specify a custom failure message::
+
+        >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
+        ...    pass
+        Traceback (most recent call last):
+          ...
+        Failed: Expecting ZeroDivisionError
+
+
     .. note::
 
        When using ``pytest.raises`` as a context manager, it's worthwhile to
        note that normal context manager rules apply and that the exception
        raised *must* be the final line in the scope of the context manager.
        Lines of code after that, within the scope of the context manager will
        not be executed. For example::
 
-           >>> with raises(OSError) as exc_info:
-                   assert 1 == 1  # this will execute as expected
-                   raise OSError(errno.EEXISTS, 'directory exists')
-                   assert exc_info.value.errno == errno.EEXISTS  # this will not execute
+           >>> value = 15
+           >>> with raises(ValueError) as exc_info:
+           ...     if value > 10:
+           ...         raise ValueError("value must be <= 10")
+           ...     assert exc_info.type == ValueError  # this will not execute
 
        Instead, the following approach must be taken (note the difference in
        scope)::
 
-           >>> with raises(OSError) as exc_info:
-                   assert 1 == 1  # this will execute as expected
-                   raise OSError(errno.EEXISTS, 'directory exists')
+           >>> with raises(ValueError) as exc_info:
+           ...     if value > 10:
+           ...         raise ValueError("value must be <= 10")
+           ...
+           >>> assert exc_info.type == ValueError
 
-               assert exc_info.value.errno == errno.EEXISTS  # this will now execute
+    Or you can use the keyword argument ``match`` to assert that the
+    exception matches a text or regex::
+
+        >>> with raises(ValueError, match='must be 0 or None'):
+        ...     raise ValueError("value must be 0 or None")
+
+        >>> with raises(ValueError, match=r'must be \d+$'):
+        ...     raise ValueError("value must be 42")
+
 
     Or you can specify a callable by passing a to-be-called lambda::
 
         >>> raises(ZeroDivisionError, lambda: 1/0)
         <ExceptionInfo ...>
 
     or you can specify an arbitrary callable with arguments::
 
@@ -1276,33 +1184,35 @@ def raises(expected_exception, *args, **
         ``ExceptionInfo``) which makes Python keep all objects referenced
         from that cycle (including all local variables in the current
         frame) alive until the next cyclic garbage collection run. See the
         official Python ``try`` statement documentation for more detailed
         information.
 
     """
     __tracebackhide__ = True
-    if expected_exception is AssertionError:
-        # we want to catch a AssertionError
-        # replace our subclass with the builtin one
-        # see https://github.com/pytest-dev/pytest/issues/176
-        from _pytest.assertion.util import BuiltinAssertionError \
-            as expected_exception
     msg = ("exceptions must be old-style classes or"
            " derived from BaseException, not %s")
     if isinstance(expected_exception, tuple):
         for exc in expected_exception:
             if not isclass(exc):
                 raise TypeError(msg % type(exc))
     elif not isclass(expected_exception):
         raise TypeError(msg % type(expected_exception))
 
+    message = "DID NOT RAISE {0}".format(expected_exception)
+    match_expr = None
+
     if not args:
-        return RaisesContext(expected_exception)
+        if "message" in kwargs:
+            message = kwargs.pop("message")
+        if "match" in kwargs:
+            match_expr = kwargs.pop("match")
+            message += " matching '{0}'".format(match_expr)
+        return RaisesContext(expected_exception, message, match_expr)
     elif isinstance(args[0], str):
         code, = args
         assert isinstance(code, str)
         frame = sys._getframe(1)
         loc = frame.f_locals.copy()
         loc.update(kwargs)
         #print "raises frame scope: %r" % frame.f_locals
         try:
@@ -1313,53 +1223,315 @@ def raises(expected_exception, *args, **
         except expected_exception:
             return _pytest._code.ExceptionInfo()
     else:
         func = args[0]
         try:
             func(*args[1:], **kwargs)
         except expected_exception:
             return _pytest._code.ExceptionInfo()
-    pytest.fail("DID NOT RAISE {0}".format(expected_exception))
+    fail(message)
+
+
+raises.Exception = fail.Exception
+
 
 class RaisesContext(object):
-    def __init__(self, expected_exception):
+    def __init__(self, expected_exception, message, match_expr):
         self.expected_exception = expected_exception
+        self.message = message
+        self.match_expr = match_expr
         self.excinfo = None
 
     def __enter__(self):
         self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
         return self.excinfo
 
     def __exit__(self, *tp):
         __tracebackhide__ = True
         if tp[0] is None:
-            pytest.fail("DID NOT RAISE")
+            fail(self.message)
         if sys.version_info < (2, 7):
             # py26: on __exit__() exc_value often does not contain the
             # exception value.
             # http://bugs.python.org/issue7853
             if not isinstance(tp[1], BaseException):
                 exc_type, value, traceback = tp
                 tp = exc_type, exc_type(value), traceback
         self.excinfo.__init__(tp)
-        return issubclass(self.excinfo.type, self.expected_exception)
+        suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
+        if sys.version_info[0] == 2 and suppress_exception:
+            sys.exc_clear()
+        if self.match_expr:
+            self.excinfo.match(self.match_expr)
+        return suppress_exception
+
+
+# builtin pytest.approx helper
+
+class approx(object):
+    """
+    Assert that two numbers (or two sets of numbers) are equal to each other
+    within some tolerance.
+
+    Due to the `intricacies of floating-point arithmetic`__, numbers that we
+    would intuitively expect to be equal are not always so::
+
+        >>> 0.1 + 0.2 == 0.3
+        False
+
+    __ https://docs.python.org/3/tutorial/floatingpoint.html
+
+    This problem is commonly encountered when writing tests, e.g. when making
+    sure that floating-point values are what you expect them to be.  One way to
+    deal with this problem is to assert that two floating-point numbers are
+    equal to within some appropriate tolerance::
+
+        >>> abs((0.1 + 0.2) - 0.3) < 1e-6
+        True
+
+    However, comparisons like this are tedious to write and difficult to
+    understand.  Furthermore, absolute comparisons like the one above are
+    usually discouraged because there's no tolerance that works well for all
+    situations.  ``1e-6`` is good for numbers around ``1``, but too small for
+    very big numbers and too big for very small ones.  It's better to express
+    the tolerance as a fraction of the expected value, but relative comparisons
+    like that are even more difficult to write correctly and concisely.
+
+    The ``approx`` class performs floating-point comparisons using a syntax
+    that's as intuitive as possible::
+
+        >>> from pytest import approx
+        >>> 0.1 + 0.2 == approx(0.3)
+        True
+
+    The same syntax also works on sequences of numbers::
+
+        >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
+        True
+
+    By default, ``approx`` considers numbers within a relative tolerance of
+    ``1e-6`` (i.e. one part in a million) of its expected value to be equal.
+    This treatment would lead to surprising results if the expected value was
+    ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
+    To handle this case less surprisingly, ``approx`` also considers numbers
+    within an absolute tolerance of ``1e-12`` of its expected value to be
+    equal.  Infinite numbers are another special case.  They are only
+    considered equal to themselves, regardless of the relative tolerance.  Both
+    the relative and absolute tolerances can be changed by passing arguments to
+    the ``approx`` constructor::
+
+        >>> 1.0001 == approx(1)
+        False
+        >>> 1.0001 == approx(1, rel=1e-3)
+        True
+        >>> 1.0001 == approx(1, abs=1e-3)
+        True
+
+    If you specify ``abs`` but not ``rel``, the comparison will not consider
+    the relative tolerance at all.  In other words, two numbers that are within
+    the default relative tolerance of ``1e-6`` will still be considered unequal
+    if they exceed the specified absolute tolerance.  If you specify both
+    ``abs`` and ``rel``, the numbers will be considered equal if either
+    tolerance is met::
+
+        >>> 1 + 1e-8 == approx(1)
+        True
+        >>> 1 + 1e-8 == approx(1, abs=1e-12)
+        False
+        >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
+        True
+
+    If you're thinking about using ``approx``, then you might want to know how
+    it compares to other good ways of comparing floating-point numbers.  All of
+    these algorithms are based on relative and absolute tolerances and should
+    agree for the most part, but they do have meaningful differences:
+
+    - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``:  True if the relative
+      tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
+      tolerance is met.  Because the relative tolerance is calculated w.r.t.
+      both ``a`` and ``b``, this test is symmetric (i.e.  neither ``a`` nor
+      ``b`` is a "reference value").  You have to specify an absolute tolerance
+      if you want to compare to ``0.0`` because there is no tolerance by
+      default.  Only available in python>=3.5.  `More information...`__
+
+      __ https://docs.python.org/3/library/math.html#math.isclose
+
+    - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
+      between ``a`` and ``b`` is less that the sum of the relative tolerance
+      w.r.t. ``b`` and the absolute tolerance.  Because the relative tolerance
+      is only calculated w.r.t. ``b``, this test is asymmetric and you can
+      think of ``b`` as the reference value.  Support for comparing sequences
+      is provided by ``numpy.allclose``.  `More information...`__
+
+      __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
+
+    - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
+      are within an absolute tolerance of ``1e-7``.  No relative tolerance is
+      considered and the absolute tolerance cannot be changed, so this function
+      is not appropriate for very large or very small numbers.  Also, it's only
+      available in subclasses of ``unittest.TestCase`` and it's ugly because it
+      doesn't follow PEP8.  `More information...`__
+
+      __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
+
+    - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
+      tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
+      Because the relative tolerance is only calculated w.r.t. ``b``, this test
+      is asymmetric and you can think of ``b`` as the reference value.  In the
+      special case that you explicitly specify an absolute tolerance but not a
+      relative tolerance, only the absolute tolerance is considered.
+    """
+
+    def __init__(self, expected, rel=None, abs=None):
+        self.expected = expected
+        self.abs = abs
+        self.rel = rel
+
+    def __repr__(self):
+        return ', '.join(repr(x) for x in self.expected)
+
+    def __eq__(self, actual):
+        from collections import Iterable
+        if not isinstance(actual, Iterable):
+            actual = [actual]
+        if len(actual) != len(self.expected):
+            return False
+        return all(a == x for a, x in zip(actual, self.expected))
+
+    __hash__ = None
+
+    def __ne__(self, actual):
+        return not (actual == self)
+
+    @property
+    def expected(self):
+        # Regardless of whether the user-specified expected value is a number
+        # or a sequence of numbers, return a list of ApproxNotIterable objects
+        # that can be compared against.
+        from collections import Iterable
+        approx_non_iter = lambda x: ApproxNonIterable(x, self.rel, self.abs)
+        if isinstance(self._expected, Iterable):
+            return [approx_non_iter(x) for x in self._expected]
+        else:
+            return [approx_non_iter(self._expected)]
+
+    @expected.setter
+    def expected(self, expected):
+        self._expected = expected
+
+
+class ApproxNonIterable(object):
+    """
+    Perform approximate comparisons for single numbers only.
+
+    In other words, the ``expected`` attribute for objects of this class must
+    be some sort of number.  This is in contrast to the ``approx`` class, where
+    the ``expected`` attribute can either be a number of a sequence of numbers.
+    This class is responsible for making comparisons, while ``approx`` is
+    responsible for abstracting the difference between numbers and sequences of
+    numbers.  Although this class can stand on its own, it's only meant to be
+    used within ``approx``.
+    """
+
+    def __init__(self, expected, rel=None, abs=None):
+        self.expected = expected
+        self.abs = abs
+        self.rel = rel
+
+    def __repr__(self):
+        if isinstance(self.expected, complex):
+            return str(self.expected)
+
+        # Infinities aren't compared using tolerances, so don't show a
+        # tolerance.
+        if math.isinf(self.expected):
+            return str(self.expected)
+
+        # If a sensible tolerance can't be calculated, self.tolerance will
+        # raise a ValueError.  In this case, display '???'.
+        try:
+            vetted_tolerance = '{:.1e}'.format(self.tolerance)
+        except ValueError:
+            vetted_tolerance = '???'
+
+        if sys.version_info[0] == 2:
+            return '{0} +- {1}'.format(self.expected, vetted_tolerance)
+        else:
+            return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
+
+    def __eq__(self, actual):
+        # Short-circuit exact equality.
+        if actual == self.expected:
+            return True
+
+        # Infinity shouldn't be approximately equal to anything but itself, but
+        # if there's a relative tolerance, it will be infinite and infinity
+        # will seem approximately equal to everything.  The equal-to-itself
+        # case would have been short circuited above, so here we can just
+        # return false if the expected value is infinite.  The abs() call is
+        # for compatibility with complex numbers.
+        if math.isinf(abs(self.expected)):
+            return False
+
+        # Return true if the two numbers are within the tolerance.
+        return abs(self.expected - actual) <= self.tolerance
+
+    __hash__ = None
+
+    def __ne__(self, actual):
+        return not (actual == self)
+
+    @property
+    def tolerance(self):
+        set_default = lambda x, default: x if x is not None else default
+
+        # Figure out what the absolute tolerance should be.  ``self.abs`` is
+        # either None or a value specified by the user.
+        absolute_tolerance = set_default(self.abs, 1e-12)
+
+        if absolute_tolerance < 0:
+            raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance))
+        if math.isnan(absolute_tolerance):
+            raise ValueError("absolute tolerance can't be NaN.")
+
+        # If the user specified an absolute tolerance but not a relative one,
+        # just return the absolute tolerance.
+        if self.rel is None:
+            if self.abs is not None:
+                return absolute_tolerance
+
+        # Figure out what the relative tolerance should be.  ``self.rel`` is
+        # either None or a value specified by the user.  This is done after
+        # we've made sure the user didn't ask for an absolute tolerance only,
+        # because we don't want to raise errors about the relative tolerance if
+        # we aren't even going to use it.
+        relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected)
+
+        if relative_tolerance < 0:
+            raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance))
+        if math.isnan(relative_tolerance):
+            raise ValueError("relative tolerance can't be NaN.")
+
+        # Return the larger of the relative and absolute tolerances.
+        return max(relative_tolerance, absolute_tolerance)
+
 
 #
 #  the basic pytest Function item
 #
 
-class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
+class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr):
     """ a Function Item is responsible for setting up and executing a
     Python test function.
     """
     _genid = None
     def __init__(self, name, parent, args=None, config=None,
                  callspec=None, callobj=NOTSET, keywords=None, session=None,
-                 fixtureinfo=None):
+                 fixtureinfo=None, originalname=None):
         super(Function, self).__init__(name, parent, config=config,
                                        session=session)
         self._args = args
         if callobj is not NOTSET:
             self.obj = callobj
 
         self.keywords.update(self.obj.__dict__)
         if callspec:
@@ -1371,29 +1543,35 @@ class Function(FunctionMixin, pytest.Ite
         if fixtureinfo is None:
             fixtureinfo = self.session._fixturemanager.getfixtureinfo(
                 self.parent, self.obj, self.cls,
                 funcargs=not self._isyieldedfunction())
         self._fixtureinfo = fixtureinfo
         self.fixturenames = fixtureinfo.names_closure
         self._initrequest()
 
+        #: original function name, without any decorations (for example
+        #: parametrization adds a ``"[...]"`` suffix to function names).
+        #:
+        #: .. versionadded:: 3.0
+        self.originalname = originalname
+
     def _initrequest(self):
         self.funcargs = {}
         if self._isyieldedfunction():
             assert not hasattr(self, "callspec"), (
                 "yielded functions (deprecated) cannot have funcargs")
         else:
             if hasattr(self, "callspec"):
                 callspec = self.callspec
                 assert not callspec.funcargs
                 self._genid = callspec.id
                 if hasattr(callspec, "param"):
                     self.param = callspec.param
-        self._request = FixtureRequest(self)
+        self._request = fixtures.FixtureRequest(self)
 
     @property
     def function(self):
         "underlying python 'function' object"
         return getattr(self.obj, 'im_func', self.obj)
 
     def _getobj(self):
         name = self.name
@@ -1411,890 +1589,9 @@ class Function(FunctionMixin, pytest.Ite
         return getattr(self, "_args", None) is not None
 
     def runtest(self):
         """ execute the underlying test function. """
         self.ihook.pytest_pyfunc_call(pyfuncitem=self)
 
     def setup(self):
         super(Function, self).setup()
-        fillfixtures(self)
-
-
-scope2props = dict(session=())
-scope2props["module"] = ("fspath", "module")
-scope2props["class"] = scope2props["module"] + ("cls",)
-scope2props["instance"] = scope2props["class"] + ("instance", )
-scope2props["function"] = scope2props["instance"] + ("function", "keywords")
-
-def scopeproperty(name=None, doc=None):
-    def decoratescope(func):
-        scopename = name or func.__name__
-        def provide(self):
-            if func.__name__ in scope2props[self.scope]:
-                return func(self)
-            raise AttributeError("%s not available in %s-scoped context" % (
-                scopename, self.scope))
-        return property(provide, None, None, func.__doc__)
-    return decoratescope
-
-
-class FixtureRequest(FuncargnamesCompatAttr):
-    """ A request for a fixture from a test or fixture function.
-
-    A request object gives access to the requesting test context
-    and has an optional ``param`` attribute in case
-    the fixture is parametrized indirectly.
-    """
-
-    def __init__(self, pyfuncitem):
-        self._pyfuncitem = pyfuncitem
-        #: fixture for which this request is being performed
-        self.fixturename = None
-        #: Scope string, one of "function", "class", "module", "session"
-        self.scope = "function"
-        self._funcargs  = {}
-        self._fixturedefs = {}
-        fixtureinfo = pyfuncitem._fixtureinfo
-        self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
-        self._arg2index = {}
-        self.fixturenames = fixtureinfo.names_closure
-        self._fixturemanager = pyfuncitem.session._fixturemanager
-
-    @property
-    def node(self):
-        """ underlying collection node (depends on current request scope)"""
-        return self._getscopeitem(self.scope)
-
-
-    def _getnextfixturedef(self, argname):
-        fixturedefs = self._arg2fixturedefs.get(argname, None)
-        if fixturedefs is None:
-            # we arrive here because of a  a dynamic call to
-            # getfuncargvalue(argname) usage which was naturally
-            # not known at parsing/collection time
-            fixturedefs = self._fixturemanager.getfixturedefs(
-                            argname, self._pyfuncitem.parent.nodeid)
-            self._arg2fixturedefs[argname] = fixturedefs
-        # fixturedefs list is immutable so we maintain a decreasing index
-        index = self._arg2index.get(argname, 0) - 1
-        if fixturedefs is None or (-index > len(fixturedefs)):
-            raise FixtureLookupError(argname, self)
-        self._arg2index[argname] = index
-        return fixturedefs[index]
-
-    @property
-    def config(self):
-        """ the pytest config object associated with this request. """
-        return self._pyfuncitem.config
-
-
-    @scopeproperty()
-    def function(self):
-        """ test function object if the request has a per-function scope. """
-        return self._pyfuncitem.obj
-
-    @scopeproperty("class")
-    def cls(self):
-        """ class (can be None) where the test function was collected. """
-        clscol = self._pyfuncitem.getparent(pytest.Class)
-        if clscol:
-            return clscol.obj
-
-    @property
-    def instance(self):
-        """ instance (can be None) on which test function was collected. """
-        # unittest support hack, see _pytest.unittest.TestCaseFunction
-        try:
-            return self._pyfuncitem._testcase
-        except AttributeError:
-            function = getattr(self, "function", None)
-            if function is not None:
-                return py.builtin._getimself(function)
-
-    @scopeproperty()
-    def module(self):
-        """ python module object where the test function was collected. """
-        return self._pyfuncitem.getparent(pytest.Module).obj
-
-    @scopeproperty()
-    def fspath(self):
-        """ the file system path of the test module which collected this test. """
-        return self._pyfuncitem.fspath
-
-    @property
-    def keywords(self):
-        """ keywords/markers dictionary for the underlying node. """
-        return self.node.keywords
-
-    @property
-    def session(self):
-        """ pytest session object. """
-        return self._pyfuncitem.session
-
-    def addfinalizer(self, finalizer):
-        """ add finalizer/teardown function to be called after the
-        last test within the requesting test context finished
-        execution. """
-        # XXX usually this method is shadowed by fixturedef specific ones
-        self._addfinalizer(finalizer, scope=self.scope)
-
-    def _addfinalizer(self, finalizer, scope):
-        colitem = self._getscopeitem(scope)
-        self._pyfuncitem.session._setupstate.addfinalizer(
-            finalizer=finalizer, colitem=colitem)
-
-    def applymarker(self, marker):
-        """ Apply a marker to a single test function invocation.
-        This method is useful if you don't want to have a keyword/marker
-        on all function invocations.
-
-        :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
-            created by a call to ``pytest.mark.NAME(...)``.
-        """
-        try:
-            self.node.keywords[marker.markname] = marker
-        except AttributeError:
-            raise ValueError(marker)
-
-    def raiseerror(self, msg):
-        """ raise a FixtureLookupError with the given message. """
-        raise self._fixturemanager.FixtureLookupError(None, self, msg)
-
-    def _fillfixtures(self):
-        item = self._pyfuncitem
-        fixturenames = getattr(item, "fixturenames", self.fixturenames)
-        for argname in fixturenames:
-            if argname not in item.funcargs:
-                item.funcargs[argname] = self.getfuncargvalue(argname)
-
-    def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
-        """ (deprecated) Return a testing resource managed by ``setup`` &
-        ``teardown`` calls.  ``scope`` and ``extrakey`` determine when the
-        ``teardown`` function will be called so that subsequent calls to
-        ``setup`` would recreate the resource.  With pytest-2.3 you often
-        do not need ``cached_setup()`` as you can directly declare a scope
-        on a fixture function and register a finalizer through
-        ``request.addfinalizer()``.
-
-        :arg teardown: function receiving a previously setup resource.
-        :arg setup: a no-argument function creating a resource.
-        :arg scope: a string value out of ``function``, ``class``, ``module``
-            or ``session`` indicating the caching lifecycle of the resource.
-        :arg extrakey: added to internal caching key of (funcargname, scope).
-        """
-        if not hasattr(self.config, '_setupcache'):
-            self.config._setupcache = {} # XXX weakref?
-        cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
-        cache = self.config._setupcache
-        try:
-            val = cache[cachekey]
-        except KeyError:
-            self._check_scope(self.fixturename, self.scope, scope)
-            val = setup()
-            cache[cachekey] = val
-            if teardown is not None:
-                def finalizer():
-                    del cache[cachekey]
-                    teardown(val)
-                self._addfinalizer(finalizer, scope=scope)
-        return val
-
-    def getfuncargvalue(self, argname):
-        """ Dynamically retrieve a named fixture function argument.
-
-        As of pytest-2.3, it is easier and usually better to access other
-        fixture values by stating it as an input argument in the fixture
-        function.  If you only can decide about using another fixture at test
-        setup time, you may use this function to retrieve it inside a fixture
-        function body.
-        """
-        return self._get_active_fixturedef(argname).cached_result[0]
-
-    def _get_active_fixturedef(self, argname):
-        try:
-            return self._fixturedefs[argname]
-        except KeyError:
-            try:
-                fixturedef = self._getnextfixturedef(argname)
-            except FixtureLookupError:
-                if argname == "request":
-                    class PseudoFixtureDef:
-                        cached_result = (self, [0], None)
-                        scope = "function"
-                    return PseudoFixtureDef
-                raise
-        # remove indent to prevent the python3 exception
-        # from leaking into the call
-        result = self._getfuncargvalue(fixturedef)
-        self._funcargs[argname] = result
-        self._fixturedefs[argname] = fixturedef
-        return fixturedef
-
-    def _get_fixturestack(self):
-        current = self
-        l = []
-        while 1:
-            fixturedef = getattr(current, "_fixturedef", None)
-            if fixturedef is None:
-                l.reverse()
-                return l
-            l.append(fixturedef)
-            current = current._parent_request
-
-    def _getfuncargvalue(self, fixturedef):
-        # prepare a subrequest object before calling fixture function
-        # (latter managed by fixturedef)
-        argname = fixturedef.argname
-        funcitem = self._pyfuncitem
-        scope = fixturedef.scope
-        try:
-            param = funcitem.callspec.getparam(argname)
-        except (AttributeError, ValueError):
-            param = NOTSET
-            param_index = 0
-        else:
-            # indices might not be set if old-style metafunc.addcall() was used
-            param_index = funcitem.callspec.indices.get(argname, 0)
-            # if a parametrize invocation set a scope it will override
-            # the static scope defined with the fixture function
-            paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
-            if paramscopenum is not None:
-                scope = scopes[paramscopenum]
-
-        subrequest = SubRequest(self, scope, param, param_index, fixturedef)
-
-        # check if a higher-level scoped fixture accesses a lower level one
-        subrequest._check_scope(argname, self.scope, scope)
-
-        # clear sys.exc_info before invoking the fixture (python bug?)
-        # if its not explicitly cleared it will leak into the call
-        exc_clear()
-        try:
-            # call the fixture function
-            val = fixturedef.execute(request=subrequest)
-        finally:
-            # if fixture function failed it might have registered finalizers
-            self.session._setupstate.addfinalizer(fixturedef.finish,
-                                                  subrequest.node)
-        return val
-
-    def _check_scope(self, argname, invoking_scope, requested_scope):
-        if argname == "request":
-            return
-        if scopemismatch(invoking_scope, requested_scope):
-            # try to report something helpful
-            lines = self._factorytraceback()
-            pytest.fail("ScopeMismatch: You tried to access the %r scoped "
-                "fixture %r with a %r scoped request object, "
-                "involved factories\n%s" %(
-                (requested_scope, argname, invoking_scope, "\n".join(lines))),
-                pytrace=False)
-
-    def _factorytraceback(self):
-        lines = []
-        for fixturedef in self._get_fixturestack():
-            factory = fixturedef.func
-            fs, lineno = getfslineno(factory)
-            p = self._pyfuncitem.session.fspath.bestrelpath(fs)
-            args = _format_args(factory)
-            lines.append("%s:%d:  def %s%s" %(
-                p, lineno, factory.__name__, args))
-        return lines
-
-    def _getscopeitem(self, scope):
-        if scope == "function":
-            # this might also be a non-function Item despite its attribute name
-            return self._pyfuncitem
-        node = get_scope_node(self._pyfuncitem, scope)
-        if node is None and scope == "class":
-            # fallback to function item itself
-            node = self._pyfuncitem
-        assert node
-        return node
-
-    def __repr__(self):
-        return "<FixtureRequest for %r>" %(self.node)
-
-
-class SubRequest(FixtureRequest):
-    """ a sub request for handling getting a fixture from a
-    test function/fixture. """
-    def __init__(self, request, scope, param, param_index, fixturedef):
-        self._parent_request = request
-        self.fixturename = fixturedef.argname
-        if param is not NOTSET:
-            self.param = param
-        self.param_index = param_index
-        self.scope = scope
-        self._fixturedef = fixturedef
-        self.addfinalizer = fixturedef.addfinalizer
-        self._pyfuncitem = request._pyfuncitem
-        self._funcargs  = request._funcargs
-        self._fixturedefs = request._fixturedefs
-        self._arg2fixturedefs = request._arg2fixturedefs
-        self._arg2index = request._arg2index
-        self.fixturenames = request.fixturenames
-        self._fixturemanager = request._fixturemanager
-
-    def __repr__(self):
-        return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
-
-
-class ScopeMismatchError(Exception):
-    """ A fixture function tries to use a different fixture function which
-    which has a lower scope (e.g. a Session one calls a function one)
-    """
-
-scopes = "session module class function".split()
-scopenum_function = scopes.index("function")
-def scopemismatch(currentscope, newscope):
-    return scopes.index(newscope) > scopes.index(currentscope)
-
-
-class FixtureLookupError(LookupError):
-    """ could not return a requested Fixture (missing or invalid). """
-    def __init__(self, argname, request, msg=None):
-        self.argname = argname
-        self.request = request
-        self.fixturestack = request._get_fixturestack()
-        self.msg = msg
-
-    def formatrepr(self):
-        tblines = []
-        addline = tblines.append
-        stack = [self.request._pyfuncitem.obj]
-        stack.extend(map(lambda x: x.func, self.fixturestack))
-        msg = self.msg
-        if msg is not None:
-            # the last fixture raise an error, let's present
-            # it at the requesting side
-            stack = stack[:-1]
-        for function in stack:
-            fspath, lineno = getfslineno(function)
-            try:
-                lines, _ = inspect.getsourcelines(get_real_func(function))
-            except (IOError, IndexError):
-                error_msg = "file %s, line %s: source code not available"
-                addline(error_msg % (fspath, lineno+1))
-            else:
-                addline("file %s, line %s" % (fspath, lineno+1))
-                for i, line in enumerate(lines):
-                    line = line.rstrip()
-                    addline("  " + line)
-                    if line.lstrip().startswith('def'):
-                        break
-
-        if msg is None:
-            fm = self.request._fixturemanager
-            available = []
-            for name, fixturedef in fm._arg2fixturedefs.items():
-                parentid = self.request._pyfuncitem.parent.nodeid
-                faclist = list(fm._matchfactories(fixturedef, parentid))
-                if faclist:
-                    available.append(name)
-            msg = "fixture %r not found" % (self.argname,)
-            msg += "\n available fixtures: %s" %(", ".join(available),)
-            msg += "\n use 'py.test --fixtures [testpath]' for help on them."
-
-        return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
-
-class FixtureLookupErrorRepr(TerminalRepr):
-    def __init__(self, filename, firstlineno, tblines, errorstring, argname):
-        self.tblines = tblines
-        self.errorstring = errorstring
-        self.filename = filename
-        self.firstlineno = firstlineno
-        self.argname = argname
-
-    def toterminal(self, tw):
-        #tw.line("FixtureLookupError: %s" %(self.argname), red=True)
-        for tbline in self.tblines:
-            tw.line(tbline.rstrip())
-        for line in self.errorstring.split("\n"):
-            tw.line("        " + line.strip(), red=True)
-        tw.line()
-        tw.line("%s:%d" % (self.filename, self.firstlineno+1))
-
-class FixtureManager:
-    """
-    pytest fixtures definitions and information is stored and managed
-    from this class.
-
-    During collection fm.parsefactories() is called multiple times to parse
-    fixture function definitions into FixtureDef objects and internal
-    data structures.
-
-    During collection of test functions, metafunc-mechanics instantiate
-    a FuncFixtureInfo object which is cached per node/func-name.
-    This FuncFixtureInfo object is later retrieved by Function nodes
-    which themselves offer a fixturenames attribute.
-
-    The FuncFixtureInfo object holds information about fixtures and FixtureDefs
-    relevant for a particular function.  An initial list of fixtures is
-    assembled like this:
-
-    - ini-defined usefixtures
-    - autouse-marked fixtures along the collection chain up from the function
-    - usefixtures markers at module/class/function level
-    - test function funcargs
-
-    Subsequently the funcfixtureinfo.fixturenames attribute is computed
-    as the closure of the fixtures needed to setup the initial fixtures,
-    i. e. fixtures needed by fixture functions themselves are appended
-    to the fixturenames list.
-
-    Upon the test-setup phases all fixturenames are instantiated, retrieved
-    by a lookup of their FuncFixtureInfo.
-    """
-
-    _argprefix = "pytest_funcarg__"
-    FixtureLookupError = FixtureLookupError
-    FixtureLookupErrorRepr = FixtureLookupErrorRepr
-
-    def __init__(self, session):
-        self.session = session
-        self.config = session.config
-        self._arg2fixturedefs = {}
-        self._holderobjseen = set()
-        self._arg2finish = {}
-        self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
-        session.config.pluginmanager.register(self, "funcmanage")
-
-
-    def getfixtureinfo(self, node, func, cls, funcargs=True):
-        if funcargs and not hasattr(node, "nofuncargs"):
-            if cls is not None:
-                startindex = 1
-            else:
-                startindex = None
-            argnames = getfuncargnames(func, startindex)
-        else:
-            argnames = ()
-        usefixtures = getattr(func, "usefixtures", None)
-        initialnames = argnames
-        if usefixtures is not None:
-            initialnames = usefixtures.args + initialnames
-        fm = node.session._fixturemanager
-        names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
-                                                              node)
-        return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
-
-    def pytest_plugin_registered(self, plugin):
-        nodeid = None
-        try:
-            p = py.path.local(plugin.__file__)
-        except AttributeError:
-            pass
-        else:
-            # construct the base nodeid which is later used to check
-            # what fixtures are visible for particular tests (as denoted
-            # by their test id)
-            if p.basename.startswith("conftest.py"):
-                nodeid = p.dirpath().relto(self.config.rootdir)
-                if p.sep != "/":
-                    nodeid = nodeid.replace(p.sep, "/")
-        self.parsefactories(plugin, nodeid)
-
-    def _getautousenames(self, nodeid):
-        """ return a tuple of fixture names to be used. """
-        autousenames = []
-        for baseid, basenames in self._nodeid_and_autousenames:
-            if nodeid.startswith(baseid):
-                if baseid:
-                    i = len(baseid)
-                    nextchar = nodeid[i:i+1]
-                    if nextchar and nextchar not in ":/":
-                        continue
-                autousenames.extend(basenames)
-        # make sure autousenames are sorted by scope, scopenum 0 is session
-        autousenames.sort(
-            key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
-        return autousenames
-
-    def getfixtureclosure(self, fixturenames, parentnode):
-        # collect the closure of all fixtures , starting with the given
-        # fixturenames as the initial set.  As we have to visit all
-        # factory definitions anyway, we also return a arg2fixturedefs
-        # mapping so that the caller can reuse it and does not have
-        # to re-discover fixturedefs again for each fixturename
-        # (discovering matching fixtures for a given name/node is expensive)
-
-        parentid = parentnode.nodeid
-        fixturenames_closure = self._getautousenames(parentid)
-        def merge(otherlist):
-            for arg in otherlist:
-                if arg not in fixturenames_closure:
-                    fixturenames_closure.append(arg)
-        merge(fixturenames)
-        arg2fixturedefs = {}
-        lastlen = -1
-        while lastlen != len(fixturenames_closure):
-            lastlen = len(fixturenames_closure)
-            for argname in fixturenames_closure:
-                if argname in arg2fixturedefs:
-                    continue
-                fixturedefs = self.getfixturedefs(argname, parentid)
-                if fixturedefs:
-                    arg2fixturedefs[argname] = fixturedefs
-                    merge(fixturedefs[-1].argnames)
-        return fixturenames_closure, arg2fixturedefs
-
-    def pytest_generate_tests(self, metafunc):
-        for argname in metafunc.fixturenames:
-            faclist = metafunc._arg2fixturedefs.get(argname)
-            if faclist:
-                fixturedef = faclist[-1]
-                if fixturedef.params is not None:
-                    func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
-                    # skip directly parametrized arguments
-                    argnames = func_params[0]
-                    if not isinstance(argnames, (tuple, list)):
-                        argnames = [x.strip() for x in argnames.split(",") if x.strip()]
-                    if argname not in func_params and argname not in argnames:
-                        metafunc.parametrize(argname, fixturedef.params,
-                                             indirect=True, scope=fixturedef.scope,
-                                             ids=fixturedef.ids)
-            else:
-                continue # will raise FixtureLookupError at setup time
-
-    def pytest_collection_modifyitems(self, items):
-        # separate parametrized setups
-        items[:] = reorder_items(items)
-
-    def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
-        if nodeid is not NOTSET:
-            holderobj = node_or_obj
-        else:
-            holderobj = node_or_obj.obj
-            nodeid = node_or_obj.nodeid
-        if holderobj in self._holderobjseen:
-            return
-        self._holderobjseen.add(holderobj)
-        autousenames = []
-        for name in dir(holderobj):
-            obj = getattr(holderobj, name, None)
-            # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
-            # or are "@pytest.fixture" marked
-            marker = getfixturemarker(obj)
-            if marker is None:
-                if not name.startswith(self._argprefix):
-                    continue
-                if not callable(obj):
-                    continue
-                marker = defaultfuncargprefixmarker
-                name = name[len(self._argprefix):]
-            elif not isinstance(marker, FixtureFunctionMarker):
-                # magic globals  with __getattr__ might have got us a wrong
-                # fixture attribute
-                continue
-            else:
-                assert not name.startswith(self._argprefix)
-            fixturedef = FixtureDef(self, nodeid, name, obj,
-                                    marker.scope, marker.params,
-                                    yieldctx=marker.yieldctx,
-                                    unittest=unittest, ids=marker.ids)
-            faclist = self._arg2fixturedefs.setdefault(name, [])
-            if fixturedef.has_location:
-                faclist.append(fixturedef)
-            else:
-                # fixturedefs with no location are at the front
-                # so this inserts the current fixturedef after the
-                # existing fixturedefs from external plugins but
-                # before the fixturedefs provided in conftests.
-                i = len([f for f in faclist if not f.has_location])
-                faclist.insert(i, fixturedef)
-            if marker.autouse:
-                autousenames.append(name)
-        if autousenames:
-            self._nodeid_and_autousenames.append((nodeid or '', autousenames))
-
-    def getfixturedefs(self, argname, nodeid):
-        try:
-            fixturedefs = self._arg2fixturedefs[argname]
-        except KeyError:
-            return None
-        else:
-            return tuple(self._matchfactories(fixturedefs, nodeid))
-
-    def _matchfactories(self, fixturedefs, nodeid):
-        for fixturedef in fixturedefs:
-            if nodeid.startswith(fixturedef.baseid):
-                yield fixturedef
-
-
-def fail_fixturefunc(fixturefunc, msg):
-    fs, lineno = getfslineno(fixturefunc)
-    location = "%s:%s" % (fs, lineno+1)
-    source = _pytest._code.Source(fixturefunc)
-    pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
-                pytrace=False)
-
-def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
-    if yieldctx:
-        if not is_generator(fixturefunc):
-            fail_fixturefunc(fixturefunc,
-                msg="yield_fixture requires yield statement in function")
-        iter = fixturefunc(**kwargs)
-        next = getattr(iter, "__next__", None)
-        if next is None:
-            next = getattr(iter, "next")
-        res = next()
-        def teardown():
-            try:
-                next()
-            except StopIteration:
-                pass
-            else:
-                fail_fixturefunc(fixturefunc,
-                    "yield_fixture function has more than one 'yield'")
-        request.addfinalizer(teardown)
-    else:
-        if is_generator(fixturefunc):
-            fail_fixturefunc(fixturefunc,
-                msg="pytest.fixture functions cannot use ``yield``. "
-                    "Instead write and return an inner function/generator "
-                    "and let the consumer call and iterate over it.")
-        res = fixturefunc(**kwargs)
-    return res
-
-class FixtureDef:
-    """ A container for a factory definition. """
-    def __init__(self, fixturemanager, baseid, argname, func, scope, params,
-                 yieldctx, unittest=False, ids=None):
-        self._fixturemanager = fixturemanager
-        self.baseid = baseid or ''
-        self.has_location = baseid is not None
-        self.func = func
-        self.argname = argname
-        self.scope = scope
-        self.scopenum = scopes.index(scope or "function")
-        self.params = params
-        startindex = unittest and 1 or None
-        self.argnames = getfuncargnames(func, startindex=startindex)
-        self.yieldctx = yieldctx
-        self.unittest = unittest
-        self.ids = ids
-        self._finalizer = []
-
-    def addfinalizer(self, finalizer):
-        self._finalizer.append(finalizer)
-
-    def finish(self):
-        try:
-            while self._finalizer:
-                func = self._finalizer.pop()
-                func()
-        finally:
-            # even if finalization fails, we invalidate
-            # the cached fixture value
-            if hasattr(self, "cached_result"):
-                del self.cached_result
-
-    def execute(self, request):
-        # get required arguments and register our own finish()
-        # with their finalization
-        kwargs = {}
-        for argname in self.argnames:
-            fixturedef = request._get_active_fixturedef(argname)
-            result, arg_cache_key, exc = fixturedef.cached_result
-            request._check_scope(argname, request.scope, fixturedef.scope)
-            kwargs[argname] = result
-            if argname != "request":
-                fixturedef.addfinalizer(self.finish)
-
-        my_cache_key = request.param_index
-        cached_result = getattr(self, "cached_result", None)
-        if cached_result is not None:
-            result, cache_key, err = cached_result
-            if my_cache_key == cache_key:
-                if err is not None:
-                    py.builtin._reraise(*err)
-                else:
-                    return result
-            # we have a previous but differently parametrized fixture instance
-            # so we need to tear it down before creating a new one
-            self.finish()
-            assert not hasattr(self, "cached_result")
-
-        fixturefunc = self.func
-
-        if self.unittest:
-            if request.instance is not None:
-                # bind the unbound method to the TestCase instance
-                fixturefunc = self.func.__get__(request.instance)
-        else:
-            # the fixture function needs to be bound to the actual
-            # request.instance so that code working with "self" behaves
-            # as expected.
-            if request.instance is not None:
-                fixturefunc = getimfunc(self.func)
-                if fixturefunc != self.func:
-                    fixturefunc = fixturefunc.__get__(request.instance)
-
-        try:
-            result = call_fixture_func(fixturefunc, request, kwargs,
-                                       self.yieldctx)
-        except Exception:
-            self.cached_result = (None, my_cache_key, sys.exc_info())
-            raise
-        self.cached_result = (result, my_cache_key, None)
-        return result
-
-    def __repr__(self):
-        return ("<FixtureDef name=%r scope=%r baseid=%r >" %
-                (self.argname, self.scope, self.baseid))
-
-def num_mock_patch_args(function):
-    """ return number of arguments used up by mock arguments (if any) """
-    patchings = getattr(function, "patchings", None)
-    if not patchings:
-        return 0
-    mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
-    if mock is not None:
-        return len([p for p in patchings
-                        if not p.attribute_name and p.new is mock.DEFAULT])
-    return len(patchings)
-
-
-def getfuncargnames(function, startindex=None):
-    # XXX merge with main.py's varnames
-    #assert not isclass(function)
-    realfunction = function
-    while hasattr(realfunction, "__wrapped__"):
-        realfunction = realfunction.__wrapped__
-    if startindex is None:
-        startindex = inspect.ismethod(function) and 1 or 0
-    if realfunction != function:
-        startindex += num_mock_patch_args(function)
-        function = realfunction
-    if isinstance(function, functools.partial):
-        argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
-        partial = function
-        argnames = argnames[len(partial.args):]
-        if partial.keywords:
-            for kw in partial.keywords:
-                argnames.remove(kw)
-    else:
-        argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
-    defaults = getattr(function, 'func_defaults',
-                       getattr(function, '__defaults__', None)) or ()
-    numdefaults = len(defaults)
-    if numdefaults:
-        return tuple(argnames[startindex:-numdefaults])
-    return tuple(argnames[startindex:])
-
-# algorithm for sorting on a per-parametrized resource setup basis
-# it is called for scopenum==0 (session) first and performs sorting
-# down to the lower scopes such as to minimize number of "high scope"
-# setups and teardowns
-
-def reorder_items(items):
-    argkeys_cache = {}
-    for scopenum in range(0, scopenum_function):
-        argkeys_cache[scopenum] = d = {}
-        for item in items:
-            keys = set(get_parametrized_fixture_keys(item, scopenum))
-            if keys:
-                d[item] = keys
-    return reorder_items_atscope(items, set(), argkeys_cache, 0)
-
-def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
-    if scopenum >= scopenum_function or len(items) < 3:
-        return items
-    items_done = []
-    while 1:
-        items_before, items_same, items_other, newignore = \
-                slice_items(items, ignore, argkeys_cache[scopenum])
-        items_before = reorder_items_atscope(
-                            items_before, ignore, argkeys_cache,scopenum+1)
-        if items_same is None:
-            # nothing to reorder in this scope
-            assert items_other is None
-            return items_done + items_before
-        items_done.extend(items_before)
-        items = items_same + items_other
-        ignore = newignore
-
-
-def slice_items(items, ignore, scoped_argkeys_cache):
-    # we pick the first item which uses a fixture instance in the
-    # requested scope and which we haven't seen yet.  We slice the input
-    # items list into a list of items_nomatch, items_same and
-    # items_other
-    if scoped_argkeys_cache:  # do we need to do work at all?
-        it = iter(items)
-        # first find a slicing key
-        for i, item in enumerate(it):
-            argkeys = scoped_argkeys_cache.get(item)
-            if argkeys is not None:
-                argkeys = argkeys.difference(ignore)
-                if argkeys:  # found a slicing key
-                    slicing_argkey = argkeys.pop()
-                    items_before = items[:i]
-                    items_same = [item]
-                    items_other = []
-                    # now slice the remainder of the list
-                    for item in it:
-                        argkeys = scoped_argkeys_cache.get(item)
-                        if argkeys and slicing_argkey in argkeys and \
-                            slicing_argkey not in ignore:
-                            items_same.append(item)
-                        else:
-                            items_other.append(item)
-                    newignore = ignore.copy()
-                    newignore.add(slicing_argkey)
-                    return (items_before, items_same, items_other, newignore)
-    return items, None, None, None
-
-def get_parametrized_fixture_keys(item, scopenum):
-    """ return list of keys for all parametrized arguments which match
-    the specified scope. """
-    assert scopenum < scopenum_function  # function
-    try:
-        cs = item.callspec
-    except AttributeError:
-        pass
-    else:
-        # cs.indictes.items() is random order of argnames but
-        # then again different functions (items) can change order of
-        # arguments so it doesn't matter much probably
-        for argname, param_index in cs.indices.items():
-            if cs._arg2scopenum[argname] != scopenum:
-                continue
-            if scopenum == 0:    # session
-                key = (argname, param_index)
-            elif scopenum == 1:  # module
-                key = (argname, param_index, item.fspath)
-            elif scopenum == 2:  # class
-                key = (argname, param_index, item.fspath, item.cls)
-            yield key
-
-
-def xunitsetup(obj, name):
-    meth = getattr(obj, name, None)
-    if getfixturemarker(meth) is None:
-        return meth
-
-def getfixturemarker(obj):
-    """ return fixturemarker or None if it doesn't exist or raised
-    exceptions."""
-    try:
-        return getattr(obj, "_pytestfixturefunction", None)
-    except KeyboardInterrupt:
-        raise
-    except Exception:
-        # some objects raise errors like request (from flask import request)
-        # we don't expect them to be fixture functions
-        return None
-
-scopename2class = {
-    'class': Class,
-    'module': Module,
-    'function': pytest.Item,
-}
-def get_scope_node(node, scope):
-    cls = scopename2class.get(scope)
-    if cls is None:
-        if scope == "session":
-            return node.session
-        raise ValueError("unknown scope")
-    return node.getparent(cls)
+        fixtures.fillfixtures(self)
--- a/third_party/python/pytest/_pytest/recwarn.py
+++ b/third_party/python/pytest/_pytest/recwarn.py
@@ -1,83 +1,89 @@
 """ recording warnings during test function execution. """
+from __future__ import absolute_import, division, print_function
 
 import inspect
 
 import _pytest._code
 import py
 import sys
 import warnings
-import pytest
+from _pytest.fixtures import yield_fixture
 
 
-@pytest.yield_fixture
-def recwarn(request):
+@yield_fixture
+def recwarn():
     """Return a WarningsRecorder instance that provides these methods:
 
     * ``pop(category=None)``: return last warning matching the category.
     * ``clear()``: clear list of warnings
 
     See http://docs.python.org/library/warnings.html for information
     on warning categories.
     """
     wrec = WarningsRecorder()
     with wrec:
         warnings.simplefilter('default')
         yield wrec
 
 
-def pytest_namespace():
-    return {'deprecated_call': deprecated_call,
-            'warns': warns}
-
+def deprecated_call(func=None, *args, **kwargs):
+    """context manager that can be used to ensure a block of code triggers a
+    ``DeprecationWarning`` or ``PendingDeprecationWarning``::
 
-def deprecated_call(func=None, *args, **kwargs):
-    """ assert that calling ``func(*args, **kwargs)`` triggers a
-    ``DeprecationWarning`` or ``PendingDeprecationWarning``.
-
-    This function can be used as a context manager::
+        >>> import warnings
+        >>> def api_call_v2():
+        ...     warnings.warn('use v3 of this api', DeprecationWarning)
+        ...     return 200
 
         >>> with deprecated_call():
-        ...    myobject.deprecated_method()
+        ...    assert api_call_v2() == 200
 
-    Note: we cannot use WarningsRecorder here because it is still subject
-    to the mechanism that prevents warnings of the same type from being
-    triggered twice for the same module. See #1190.
+    ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``,
+    in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings
+    types above.
     """
     if not func:
-        return WarningsChecker(expected_warning=DeprecationWarning)
+        return _DeprecatedCallContext()
+    else:
+        __tracebackhide__ = True
+        with _DeprecatedCallContext():
+            return func(*args, **kwargs)
 
-    categories = []
 
-    def warn_explicit(message, category, *args, **kwargs):
-        categories.append(category)
-        old_warn_explicit(message, category, *args, **kwargs)
+class _DeprecatedCallContext(object):
+    """Implements the logic to capture deprecation warnings as a context manager."""
 
-    def warn(message, category=None, *args, **kwargs):
-        if isinstance(message, Warning):
-            categories.append(message.__class__)
-        else:
-            categories.append(category)
-        old_warn(message, category, *args, **kwargs)
+    def __enter__(self):
+        self._captured_categories = []
+        self._old_warn = warnings.warn
+        self._old_warn_explicit = warnings.warn_explicit
+        warnings.warn_explicit = self._warn_explicit
+        warnings.warn = self._warn
 
-    old_warn = warnings.warn
-    old_warn_explicit = warnings.warn_explicit
-    warnings.warn_explicit = warn_explicit
-    warnings.warn = warn
-    try:
-        ret = func(*args, **kwargs)
-    finally:
-        warnings.warn_explicit = old_warn_explicit
-        warnings.warn = old_warn
-    deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
-    if not any(issubclass(c, deprecation_categories) for c in categories):
-        __tracebackhide__ = True
-        raise AssertionError("%r did not produce DeprecationWarning" % (func,))
-    return ret
+    def _warn_explicit(self, message, category, *args, **kwargs):
+        self._captured_categories.append(category)
+
+    def _warn(self, message, category=None, *args, **kwargs):
+        if isinstance(message, Warning):
+            self._captured_categories.append(message.__class__)
+        else:
+            self._captured_categories.append(category)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        warnings.warn_explicit = self._old_warn_explicit
+        warnings.warn = self._old_warn
+
+        if exc_type is None:
+            deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
+            if not any(issubclass(c, deprecation_categories) for c in self._captured_categories):
+                __tracebackhide__ = True
+                msg = "Did not produce DeprecationWarning or PendingDeprecationWarning"
+                raise AssertionError(msg)
 
 
 def warns(expected_warning, *args, **kwargs):
     """Assert that code raises a particular class of warning.
 
     Specifically, the input @expected_warning can be a warning class or
     tuple of warning classes, and the code must return that warning
     (if a single class) or one of those warnings (if a tuple).
@@ -105,34 +111,24 @@ def warns(expected_warning, *args, **kwa
             code = _pytest._code.Source(code).compile()
             py.builtin.exec_(code, frame.f_globals, loc)
     else:
         func = args[0]
         with wcheck:
             return func(*args[1:], **kwargs)
 
 
-class RecordedWarning(object):
-    def __init__(self, message, category, filename, lineno, file, line):
-        self.message = message
-        self.category = category
-        self.filename = filename
-        self.lineno = lineno
-        self.file = file
-        self.line = line
-
-
-class WarningsRecorder(object):
+class WarningsRecorder(warnings.catch_warnings):
     """A context manager to record raised warnings.
 
     Adapted from `warnings.catch_warnings`.
     """
 
-    def __init__(self, module=None):
-        self._module = sys.modules['warnings'] if module is None else module
+    def __init__(self):
+        super(WarningsRecorder, self).__init__(record=True)
         self._entered = False
         self._list = []
 
     @property
     def list(self):
         """The list of recorded warnings."""
         return self._list
 
@@ -159,48 +155,30 @@ class WarningsRecorder(object):
     def clear(self):
         """Clear the list of recorded warnings."""
         self._list[:] = []
 
     def __enter__(self):
         if self._entered:
             __tracebackhide__ = True
             raise RuntimeError("Cannot enter %r twice" % self)
-        self._entered = True
-        self._filters = self._module.filters
-        self._module.filters = self._filters[:]
-        self._showwarning = self._module.showwarning
-
-        def showwarning(message, category, filename, lineno,
-                        file=None, line=None):
-            self._list.append(RecordedWarning(
-                message, category, filename, lineno, file, line))
-
-            # still perform old showwarning functionality
-            self._showwarning(
-                message, category, filename, lineno, file=file, line=line)
-
-        self._module.showwarning = showwarning
-
-        # allow the same warning to be raised more than once
-
-        self._module.simplefilter('always')
+        self._list = super(WarningsRecorder, self).__enter__()
+        warnings.simplefilter('always')
         return self
 
     def __exit__(self, *exc_info):
         if not self._entered:
             __tracebackhide__ = True
             raise RuntimeError("Cannot exit %r without entering first" % self)
-        self._module.filters = self._filters
-        self._module.showwarning = self._showwarning
+        super(WarningsRecorder, self).__exit__(*exc_info)
 
 
 class WarningsChecker(WarningsRecorder):
-    def __init__(self, expected_warning=None, module=None):
-        super(WarningsChecker, self).__init__(module=module)
+    def __init__(self, expected_warning=None):
+        super(WarningsChecker, self).__init__()
 
         msg = ("exceptions must be old-style classes or "
                "derived from Warning, not %s")
         if isinstance(expected_warning, tuple):
             for exc in expected_warning:
                 if not inspect.isclass(exc):
                     raise TypeError(msg % type(exc))
         elif inspect.isclass(expected_warning):
@@ -211,11 +189,16 @@ class WarningsChecker(WarningsRecorder):
         self.expected_warning = expected_warning
 
     def __exit__(self, *exc_info):
         super(WarningsChecker, self).__exit__(*exc_info)
 
         # only check if we're not currently handling an exception
         if all(a is None for a in exc_info):
             if self.expected_warning is not None:
-                if not any(r.category in self.expected_warning for r in self):
+                if not any(issubclass(r.category, self.expected_warning)
+                           for r in self):
                     __tracebackhide__ = True
-                    pytest.fail("DID NOT WARN")
+                    from _pytest.runner import fail
+                    fail("DID NOT WARN. No warnings of type {0} was emitted. "
+                         "The list of emitted warnings is: {1}.".format(
+                            self.expected_warning,
+                            [each.message for each in self]))
--- a/third_party/python/pytest/_pytest/resultlog.py
+++ b/third_party/python/pytest/_pytest/resultlog.py
@@ -1,32 +1,36 @@
 """ log machine-parseable test session result information in a plain
 text file.
 """
+from __future__ import absolute_import, division, print_function
 
 import py
 import os
 
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting", "resultlog plugin options")
     group.addoption('--resultlog', '--result-log', action="store",
         metavar="path", default=None,
-        help="path for machine-readable result log.")
+        help="DEPRECATED path for machine-readable result log.")
 
 def pytest_configure(config):
     resultlog = config.option.resultlog
     # prevent opening resultlog on slave nodes (xdist)
     if resultlog and not hasattr(config, 'slaveinput'):
         dirname = os.path.dirname(os.path.abspath(resultlog))
         if not os.path.isdir(dirname):
             os.makedirs(dirname)
         logfile = open(resultlog, 'w', 1) # line buffered
         config._resultlog = ResultLog(config, logfile)
         config.pluginmanager.register(config._resultlog)
 
+        from _pytest.deprecated import RESULT_LOG
+        config.warn('C1', RESULT_LOG)
+
 def pytest_unconfigure(config):
     resultlog = getattr(config, '_resultlog', None)
     if resultlog:
         resultlog.logfile.close()
         del config._resultlog
         config.pluginmanager.unregister(resultlog)
 
 def generic_path(item):
@@ -53,19 +57,19 @@ def generic_path(item):
     return ''.join(gpath)
 
 class ResultLog(object):
     def __init__(self, config, logfile):
         self.config = config
         self.logfile = logfile # preferably line buffered
 
     def write_log_entry(self, testpath, lettercode, longrepr):
-        py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
+        print("%s %s" % (lettercode, testpath), file=self.logfile)
         for line in longrepr.splitlines():
-            py.builtin.print_(" %s" % line, file=self.logfile)
+            print(" %s" % line, file=self.logfile)
 
     def log_outcome(self, report, lettercode, longrepr):
         testpath = getattr(report, 'nodeid', None)
         if testpath is None:
             testpath = report.fspath
         self.write_log_entry(testpath, lettercode, longrepr)
 
     def pytest_runtest_logreport(self, report):
--- a/third_party/python/pytest/_pytest/runner.py
+++ b/third_party/python/pytest/_pytest/runner.py
@@ -1,25 +1,19 @@
 """ basic collect and runtest protocol implementations """
+from __future__ import absolute_import, division, print_function
+
 import bdb
 import sys
 from time import time
 
 import py
-import pytest
 from _pytest._code.code import TerminalRepr, ExceptionInfo
 
 
-def pytest_namespace():
-    return {
-        'fail'         : fail,
-        'skip'         : skip,
-        'importorskip' : importorskip,
-        'exit'         : exit,
-    }
 
 #
 # pytest plugin hooks
 
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting", "reporting", after="general")
     group.addoption('--durations',
          action="store", type=int, default=None, metavar="N",
@@ -68,26 +62,39 @@ def pytest_runtest_protocol(item, nextit
 
 def runtestprotocol(item, log=True, nextitem=None):
     hasrequest = hasattr(item, "_request")
     if hasrequest and not item._request:
         item._initrequest()
     rep = call_and_report(item, "setup", log)
     reports = [rep]
     if rep.passed:
-        reports.append(call_and_report(item, "call", log))
+        if item.config.option.setupshow:
+            show_test_item(item)
+        if not item.config.option.setuponly:
+            reports.append(call_and_report(item, "call", log))
     reports.append(call_and_report(item, "teardown", log,
         nextitem=nextitem))
     # after all teardown hooks have been called
     # want funcargs and request info to go away
     if hasrequest:
         item._request = False
         item.funcargs = None
     return reports
 
+def show_test_item(item):
+    """Show test function, parameters and the fixtures of the test item."""
+    tw = item.config.get_terminal_writer()
+    tw.line()
+    tw.write(' ' * 8)
+    tw.write(item._nodeid)
+    used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
+    if used_fixtures:
+        tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
+
 def pytest_runtest_setup(item):
     item.session._setupstate.prepare(item)
 
 def pytest_runtest_call(item):
     try:
         item.runtest()
     except Exception:
         # Store trace info to allow postmortem debugging
@@ -193,16 +200,46 @@ class BaseReport(object):
             except UnicodeEncodeError:
                 out.line("<unprintable longrepr>")
 
     def get_sections(self, prefix):
         for name, content in self.sections:
             if name.startswith(prefix):
                 yield prefix, content
 
+    @property
+    def longreprtext(self):
+        """
+        Read-only property that returns the full string representation
+        of ``longrepr``.
+
+        .. versionadded:: 3.0
+        """
+        tw = py.io.TerminalWriter(stringio=True)
+        tw.hasmarkup = False
+        self.toterminal(tw)
+        exc = tw.stringio.getvalue()
+        return exc.strip()
+
+    @property
+    def capstdout(self):
+        """Return captured text from stdout, if capturing is enabled
+
+        .. versionadded:: 3.0
+        """
+        return ''.join(content for (prefix, content) in self.get_sections('Captured stdout'))
+
+    @property
+    def capstderr(self):
+        """Return captured text from stderr, if capturing is enabled
+
+        .. versionadded:: 3.0
+        """
+        return ''.join(content for (prefix, content) in self.get_sections('Captured stderr'))
+
     passed = property(lambda x: x.outcome == "passed")
     failed = property(lambda x: x.outcome == "failed")
     skipped = property(lambda x: x.outcome == "skipped")
 
     @property
     def fspath(self):
         return self.nodeid.split("::")[0]
 
@@ -214,17 +251,17 @@ def pytest_runtest_makereport(item, call
     sections = []
     if not call.excinfo:
         outcome = "passed"
         longrepr = None
     else:
         if not isinstance(excinfo, ExceptionInfo):
             outcome = "failed"
             longrepr = excinfo
-        elif excinfo.errisinstance(pytest.skip.Exception):
+        elif excinfo.errisinstance(skip.Exception):
             outcome = "skipped"
             r = excinfo._getreprcrash()
             longrepr = (str(r.path), r.lineno, r.message)
         else:
             outcome = "failed"
             if call.when == "call":
                 longrepr = item.repr_failure(excinfo)
             else: # exception in setup or teardown
@@ -258,18 +295,20 @@ class TestReport(BaseReport):
         self.outcome = outcome
 
         #: None or a failure representation.
         self.longrepr = longrepr
 
         #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
         self.when = when
 
-        #: list of (secname, data) extra information which needs to
-        #: marshallable
+        #: list of pairs ``(str, str)`` of extra information which needs to
+        #: marshallable. Used by pytest to add captured text
+        #: from ``stdout`` and ``stderr``, but may be used by other plugins
+        #: to add arbitrary information to reports.
         self.sections = list(sections)
 
         #: time it took to run just the test
         self.duration = duration
 
         self.__dict__.update(extra)
 
     def __repr__(self):
@@ -280,17 +319,19 @@ class TeardownErrorReport(BaseReport):
     outcome = "failed"
     when = "teardown"
     def __init__(self, longrepr, **extra):
         self.longrepr = longrepr
         self.sections = []
         self.__dict__.update(extra)
 
 def pytest_make_collect_report(collector):
-    call = CallInfo(collector._memocollect, "memocollect")
+    call = CallInfo(
+        lambda: list(collector.collect()),
+        'collect')
     longrepr = None
     if not call.excinfo:
         outcome = "passed"
     else:
         from _pytest import nose
         skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
         if call.excinfo.errisinstance(skip_exceptions):
             outcome = "skipped"
@@ -442,74 +483,98 @@ class OutcomeException(Exception):
         return "<%s instance>" %(self.__class__.__name__,)
     __str__ = __repr__
 
 class Skipped(OutcomeException):
     # XXX hackish: on 3k we fake to live in the builtins
     # in order to have Skipped exception printing shorter/nicer
     __module__ = 'builtins'
 
+    def __init__(self, msg=None, pytrace=True, allow_module_level=False):
+        OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
+        self.allow_module_level = allow_module_level
+
+
 class Failed(OutcomeException):
     """ raised from an explicit call to pytest.fail() """
     __module__ = 'builtins'
 
+
 class Exit(KeyboardInterrupt):
     """ raised for immediate program exits (no tracebacks/summaries)"""
     def __init__(self, msg="unknown reason"):
         self.msg = msg
         KeyboardInterrupt.__init__(self, msg)
 
 # exposed helper methods
 
 def exit(msg):
     """ exit testing process as if KeyboardInterrupt was triggered. """
     __tracebackhide__ = True
     raise Exit(msg)
 
+
 exit.Exception = Exit
 
+
 def skip(msg=""):
     """ skip an executing test with the given message.  Note: it's usually
     better to use the pytest.mark.skipif marker to declare a test to be
     skipped under certain conditions like mismatching platforms or
     dependencies.  See the pytest_skipping plugin for details.
     """
     __tracebackhide__ = True
     raise Skipped(msg=msg)
+
+
 skip.Exception = Skipped
 
+
 def fail(msg="", pytrace=True):
     """ explicitly fail an currently-executing test with the given Message.
 
     :arg pytrace: if false the msg represents the full failure information
                   and no python traceback will be reported.
     """
     __tracebackhide__ = True
     raise Failed(msg=msg, pytrace=pytrace)
+
+
 fail.Exception = Failed
 
 
 def importorskip(modname, minversion=None):
     """ return imported module if it has at least "minversion" as its
     __version__ attribute.  If no minversion is specified the a skip
     is only triggered if the module can not be imported.
     """
+    import warnings
     __tracebackhide__ = True
     compile(modname, '', 'eval') # to catch syntaxerrors
-    try:
-        __import__(modname)
-    except ImportError:
-        skip("could not import %r" %(modname,))
+    should_skip = False
+
+    with warnings.catch_warnings():
+        # make sure to ignore ImportWarnings that might happen because
+        # of existing directories with the same name we're trying to
+        # import but without a __init__.py file
+        warnings.simplefilter('ignore')
+        try:
+            __import__(modname)
+        except ImportError:
+            # Do not raise chained exception here(#1485)
+            should_skip = True
+    if should_skip:
+        raise Skipped("could not import %r" %(modname,), allow_module_level=True)
     mod = sys.modules[modname]
     if minversion is None:
         return mod
     verattr = getattr(mod, '__version__', None)
     if minversion is not None:
         try:
             from pkg_resources import parse_version as pv
         except ImportError:
-            skip("we have a required version for %r but can not import "
-                 "no pkg_resources to parse version strings." %(modname,))
+            raise Skipped("we have a required version for %r but can not import "
+                          "pkg_resources to parse version strings." % (modname,),
+                          allow_module_level=True)
         if verattr is None or pv(verattr) < pv(minversion):
-            skip("module %r has __version__ %r, required is: %r" %(
-                 modname, verattr, minversion))
+            raise Skipped("module %r has __version__ %r, required is: %r" %(
+                          modname, verattr, minversion), allow_module_level=True)
     return mod
-
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/setuponly.py
@@ -0,0 +1,74 @@
+from __future__ import absolute_import, division, print_function
+
+import pytest
+import sys
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("debugconfig")
+    group.addoption('--setuponly', '--setup-only', action="store_true",
+                    help="only setup fixtures, do not execute tests.")
+    group.addoption('--setupshow', '--setup-show', action="store_true",
+                    help="show setup of fixtures while executing tests.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_fixture_setup(fixturedef, request):
+    yield
+    config = request.config
+    if config.option.setupshow:
+        if hasattr(request, 'param'):
+            # Save the fixture parameter so ._show_fixture_action() can
+            # display it now and during the teardown (in .finish()).
+            if fixturedef.ids:
+                if callable(fixturedef.ids):
+                    fixturedef.cached_param = fixturedef.ids(request.param)
+                else:
+                    fixturedef.cached_param = fixturedef.ids[
+                        request.param_index]
+            else:
+                fixturedef.cached_param = request.param
+        _show_fixture_action(fixturedef, 'SETUP')
+
+
+def pytest_fixture_post_finalizer(fixturedef):
+    if hasattr(fixturedef, "cached_result"):
+        config = fixturedef._fixturemanager.config
+        if config.option.setupshow:
+            _show_fixture_action(fixturedef, 'TEARDOWN')
+            if hasattr(fixturedef, "cached_param"):
+                del fixturedef.cached_param
+
+
+def _show_fixture_action(fixturedef, msg):
+    config = fixturedef._fixturemanager.config
+    capman = config.pluginmanager.getplugin('capturemanager')
+    if capman:
+        out, err = capman.suspendcapture()
+
+    tw = config.get_terminal_writer()
+    tw.line()
+    tw.write(' ' * 2 * fixturedef.scopenum)
+    tw.write('{step} {scope} {fixture}'.format(
+        step=msg.ljust(8),  # align the output to TEARDOWN
+        scope=fixturedef.scope[0].upper(),
+        fixture=fixturedef.argname))
+
+    if msg == 'SETUP':
+        deps = sorted(arg for arg in fixturedef.argnames if arg != 'request')
+        if deps:
+            tw.write(' (fixtures used: {0})'.format(', '.join(deps)))
+
+    if hasattr(fixturedef, 'cached_param'):
+        tw.write('[{0}]'.format(fixturedef.cached_param))
+
+    if capman:
+        capman.resumecapture()
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_cmdline_main(config):
+    if config.option.setuponly:
+        config.option.setupshow = True
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/setupplan.py
@@ -0,0 +1,25 @@
+from __future__ import absolute_import, division, print_function
+
+import pytest
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("debugconfig")
+    group.addoption('--setupplan', '--setup-plan', action="store_true",
+                    help="show what fixtures and tests would be executed but "
+                    "don't execute anything.")
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_fixture_setup(fixturedef, request):
+    # Will return a dummy fixture if the setuponly option is provided.
+    if request.config.option.setupplan:
+        fixturedef.cached_result = (None, None, None)
+        return fixturedef.cached_result
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_cmdline_main(config):
+    if config.option.setupplan:
+        config.option.setuponly = True
+        config.option.setupshow = True
--- a/third_party/python/pytest/_pytest/skipping.py
+++ b/third_party/python/pytest/_pytest/skipping.py
@@ -1,37 +1,43 @@
 """ support for skip/xfail functions and markers. """
+from __future__ import absolute_import, division, print_function
+
 import os
 import sys
 import traceback
 
 import py
-import pytest
+from _pytest.config import hookimpl
 from _pytest.mark import MarkInfo, MarkDecorator
-
+from _pytest.runner import fail, skip
 
 def pytest_addoption(parser):
     group = parser.getgroup("general")
     group.addoption('--runxfail',
            action="store_true", dest="runxfail", default=False,
            help="run tests even if they are marked xfail")
 
     parser.addini("xfail_strict", "default for the strict parameter of xfail "
                                   "markers when not given explicitly (default: "
                                   "False)",
                                   default=False,
                                   type="bool")
 
 
 def pytest_configure(config):
     if config.option.runxfail:
+        # yay a hack
+        import pytest
         old = pytest.xfail
         config._cleanup.append(lambda: setattr(pytest, "xfail", old))
+
         def nop(*args, **kwargs):
             pass
+
         nop.Exception = XFailed
         setattr(pytest, "xfail", nop)
 
     config.addinivalue_line("markers",
         "skip(reason=None): skip the given test function with an optional reason. "
         "Example: skip(reason=\"no way of currently testing this\") skips the "
         "test."
     )
@@ -39,37 +45,35 @@ def pytest_configure(config):
         "skipif(condition): skip the given test function if eval(condition) "
         "results in a True value.  Evaluation happens within the "
         "module global context. Example: skipif('sys.platform == \"win32\"') "
         "skips the test if we are on the win32 platform. see "
         "http://pytest.org/latest/skipping.html"
     )
     config.addinivalue_line("markers",
         "xfail(condition, reason=None, run=True, raises=None, strict=False): "
-        "mark the the test function as an expected failure if eval(condition) "
+        "mark the test function as an expected failure if eval(condition) "
         "has a True value. Optionally specify a reason for better reporting "
         "and run=False if you don't even want to execute the test function. "
         "If only specific exception(s) are expected, you can list them in "
         "raises, and if the test fails in other ways, it will be reported as "
         "a true failure. See http://pytest.org/latest/skipping.html"
     )
 
 
-def pytest_namespace():
-    return dict(xfail=xfail)
-
-
-class XFailed(pytest.fail.Exception):
+class XFailed(fail.Exception):
     """ raised from an explicit call to pytest.xfail() """
 
 
 def xfail(reason=""):
     """ xfail an executing test or setup functions with the given reason."""
     __tracebackhide__ = True
     raise XFailed(reason)
+
+
 xfail.Exception = XFailed
 
 
 class MarkEvaluator:
     def __init__(self, item, name):
         self.item = item
         self.name = name
 
@@ -91,62 +95,57 @@ class MarkEvaluator:
         return not isinstance(exc, raises)
 
     def istrue(self):
         try:
             return self._istrue()
         except Exception:
             self.exc = sys.exc_info()
             if isinstance(self.exc[1], SyntaxError):
-                msg = [" " * (self.exc[1].offset + 4) + "^",]
+                msg = [" " * (self.exc[1].offset + 4) + "^", ]
                 msg.append("SyntaxError: invalid syntax")
             else:
                 msg = traceback.format_exception_only(*self.exc[:2])
-            pytest.fail("Error evaluating %r expression\n"
-                        "    %s\n"
-                        "%s"
-                        %(self.name, self.expr, "\n".join(msg)),
-                        pytrace=False)
+            fail("Error evaluating %r expression\n"
+                 "    %s\n"
+                 "%s"
+                 % (self.name, self.expr, "\n".join(msg)),
+                 pytrace=False)
 
     def _getglobals(self):
         d = {'os': os, 'sys': sys, 'config': self.item.config}
-        func = self.item.obj
-        try:
-            d.update(func.__globals__)
-        except AttributeError:
-            d.update(func.func_globals)
+        if hasattr(self.item, 'obj'):
+            d.update(self.item.obj.__globals__)
         return d
 
     def _istrue(self):
         if hasattr(self, 'result'):
             return self.result
         if self.holder:
-            d = self._getglobals()
             if self.holder.args or 'condition' in self.holder.kwargs:
                 self.result = False
                 # "holder" might be a MarkInfo or a MarkDecorator; only
                 # MarkInfo keeps track of all parameters it received in an
                 # _arglist attribute
-                if hasattr(self.holder, '_arglist'):
-                    arglist = self.holder._arglist
-                else:
-                    arglist = [(self.holder.args, self.holder.kwargs)]
-                for args, kwargs in arglist:
+                marks = getattr(self.holder, '_marks', None) \
+                    or [self.holder.mark]
+                for _, args, kwargs in marks:
                     if 'condition' in kwargs:
                         args = (kwargs['condition'],)
                     for expr in args:
                         self.expr = expr
                         if isinstance(expr, py.builtin._basestring):
+                            d = self._getglobals()
                             result = cached_eval(self.item.config, expr, d)
                         else:
                             if "reason" not in kwargs:
                                 # XXX better be checked at collection time
                                 msg = "you need to specify reason=STRING " \
                                       "when using booleans as conditions."
-                                pytest.fail(msg)
+                                fail(msg)
                             result = bool(expr)
                         if result:
                             self.result = True
                             self.reason = kwargs.get('reason', None)
                             self.expr = expr
                             return self.result
             else:
                 self.result = True
@@ -160,112 +159,126 @@ class MarkEvaluator:
         if not expl:
             if not hasattr(self, 'expr'):
                 return ""
             else:
                 return "condition: " + str(self.expr)
         return expl
 
 
-@pytest.hookimpl(tryfirst=True)
+@hookimpl(tryfirst=True)
 def pytest_runtest_setup(item):
     # Check if skip or skipif are specified as pytest marks
 
     skipif_info = item.keywords.get('skipif')
     if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
         eval_skipif = MarkEvaluator(item, 'skipif')
         if eval_skipif.istrue():
             item._evalskip = eval_skipif
-            pytest.skip(eval_skipif.getexplanation())
+            skip(eval_skipif.getexplanation())
 
     skip_info = item.keywords.get('skip')
     if isinstance(skip_info, (MarkInfo, MarkDecorator)):
         item._evalskip = True
         if 'reason' in skip_info.kwargs:
-            pytest.skip(skip_info.kwargs['reason'])
+            skip(skip_info.kwargs['reason'])
         elif skip_info.args:
-            pytest.skip(skip_info.args[0])
+            skip(skip_info.args[0])
         else:
-            pytest.skip("unconditional skip")
+            skip("unconditional skip")
 
     item._evalxfail = MarkEvaluator(item, 'xfail')
     check_xfail_no_run(item)
 
 
-@pytest.mark.hookwrapper
+@hookimpl(hookwrapper=True)
 def pytest_pyfunc_call(pyfuncitem):
     check_xfail_no_run(pyfuncitem)
     outcome = yield
     passed = outcome.excinfo is None
     if passed:
         check_strict_xfail(pyfuncitem)
 
 
 def check_xfail_no_run(item):
     """check xfail(run=False)"""
     if not item.config.option.runxfail:
         evalxfail = item._evalxfail
         if evalxfail.istrue():
             if not evalxfail.get('run', True):
-                pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
+                xfail("[NOTRUN] " + evalxfail.getexplanation())
 
 
 def check_strict_xfail(pyfuncitem):
     """check xfail(strict=True) for the given PASSING test"""
     evalxfail = pyfuncitem._evalxfail
     if evalxfail.istrue():
         strict_default = pyfuncitem.config.getini('xfail_strict')
         is_strict_xfail = evalxfail.get('strict', strict_default)
         if is_strict_xfail:
             del pyfuncitem._evalxfail
             explanation = evalxfail.getexplanation()
-            pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
+            fail('[XPASS(strict)] ' + explanation, pytrace=False)
 
 
-@pytest.hookimpl(hookwrapper=True)
+@hookimpl(hookwrapper=True)
 def pytest_runtest_makereport(item, call):
     outcome = yield
     rep = outcome.get_result()
     evalxfail = getattr(item, '_evalxfail', None)
     evalskip = getattr(item, '_evalskip', None)
     # unitttest special case, see setting of _unexpectedsuccess
     if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
-        # we need to translate into how pytest encodes xpass
-        rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
-        rep.outcome = "failed"
+        from _pytest.compat import _is_unittest_unexpected_success_a_failure
+        if item._unexpectedsuccess:
+            rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
+        else:
+            rep.longrepr = "Unexpected success"
+        if _is_unittest_unexpected_success_a_failure():
+            rep.outcome = "failed"
+        else:
+            rep.outcome = "passed"
+            rep.wasxfail = rep.longrepr
     elif item.config.option.runxfail:
         pass   # don't interefere
-    elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
+    elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
         rep.wasxfail = "reason: " + call.excinfo.value.msg
         rep.outcome = "skipped"
     elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
         evalxfail.istrue():
         if call.excinfo:
             if evalxfail.invalidraise(call.excinfo.value):
                 rep.outcome = "failed"
             else:
                 rep.outcome = "skipped"
                 rep.wasxfail = evalxfail.getexplanation()
         elif call.when == "call":
-            rep.outcome = "failed"  # xpass outcome
-            rep.wasxfail = evalxfail.getexplanation()
+            strict_default = item.config.getini('xfail_strict')
+            is_strict_xfail = evalxfail.get('strict', strict_default)
+            explanation = evalxfail.getexplanation()
+            if is_strict_xfail:
+                rep.outcome = "failed"
+                rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
+            else:
+                rep.outcome = "passed"
+                rep.wasxfail = explanation
     elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
         # skipped by mark.skipif; change the location of the failure
         # to point to the item definition, otherwise it will display
         # the location of where the skip exception was raised within pytest
         filename, line, reason = rep.longrepr
         filename, line = item.location[:2]
         rep.longrepr = filename, line, reason
 
 # called by terminalreporter progress reporting
 def pytest_report_teststatus(report):
     if hasattr(report, "wasxfail"):
         if report.skipped:
             return "xfailed", "x", "xfail"
-        elif report.failed:
+        elif report.passed:
             return "xpassed", "X", ("XPASS", {'yellow': True})
 
 # called by the terminalreporter instance/plugin
 def pytest_terminal_summary(terminalreporter):
     tr = terminalreporter
     if not tr.reportchars:
         #for name in "xfailed skipped failed xpassed":
         #    if not tr.stats.get(name, 0):
@@ -289,40 +302,44 @@ def pytest_terminal_summary(terminalrepo
         elif char == 'p':
             show_simple(terminalreporter, lines, 'passed', "PASSED %s")
 
     if lines:
         tr._tw.sep("=", "short test summary info")
         for line in lines:
             tr._tw.line(line)
 
+
 def show_simple(terminalreporter, lines, stat, format):
     failed = terminalreporter.stats.get(stat)
     if failed:
         for rep in failed:
             pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
-            lines.append(format %(pos,))
+            lines.append(format % (pos,))
+
 
 def show_xfailed(terminalreporter, lines):
     xfailed = terminalreporter.stats.get("xfailed")
     if xfailed:
         for rep in xfailed:
             pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
             reason = rep.wasxfail
             lines.append("XFAIL %s" % (pos,))
             if reason:
                 lines.append("  " + str(reason))
 
+
 def show_xpassed(terminalreporter, lines):
     xpassed = terminalreporter.stats.get("xpassed")
     if xpassed:
         for rep in xpassed:
             pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
             reason = rep.wasxfail
-            lines.append("XPASS %s %s" %(pos, reason))
+            lines.append("XPASS %s %s" % (pos, reason))
+
 
 def cached_eval(config, expr, d):
     if not hasattr(config, '_evalcache'):
         config._evalcache = {}
     try:
         return config._evalcache[expr]
     except KeyError:
         import _pytest._code
@@ -337,25 +354,27 @@ def folded_skips(skipped):
         key = event.longrepr
         assert len(key) == 3, (event, key)
         d.setdefault(key, []).append(event)
     l = []
     for key, events in d.items():
         l.append((len(events),) + key)
     return l
 
+
 def show_skipped(terminalreporter, lines):
     tr = terminalreporter
     skipped = tr.stats.get('skipped', [])
     if skipped:
         #if not tr.hasopt('skipped'):
         #    tr.write_line(
         #        "%d skipped tests, specify -rs for more info" %
         #        len(skipped))
         #    return
         fskips = folded_skips(skipped)
         if fskips:
             #tr.write_sep("_", "skipped test summary")
             for num, fspath, lineno, reason in fskips:
                 if reason.startswith("Skipped: "):
                     reason = reason[9:]
-                lines.append("SKIP [%d] %s:%d: %s" %
+                lines.append(
+                    "SKIP [%d] %s:%d: %s" %
                     (num, fspath, lineno, reason))
deleted file mode 100755
--- a/third_party/python/pytest/_pytest/standalonetemplate.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#! /usr/bin/env python
-
-# Hi There!
-# You may be wondering what this giant blob of binary data here is, you might
-# even be worried that we're up to something nefarious (good for you for being
-# paranoid!). This is a base64 encoding of a zip file, this zip file contains
-# a fully functional basic pytest script.
-#
-# Pytest is a thing that tests packages, pytest itself is a package that some-
-# one might want to install, especially if they're looking to run tests inside
-# some package they want to install. Pytest has a lot of code to collect and
-# execute tests, and other such sort of "tribal knowledge" that has been en-
-# coded in its code base. Because of this we basically include a basic copy
-# of pytest inside this blob. We do this  because it let's you as a maintainer
-# or application developer who wants people who don't deal with python much to
-# easily run tests without installing the complete pytest package.
-#
-# If you're wondering how this is created: you can create it yourself if you
-# have a complete pytest installation by using this command on the command-
-# line: ``py.test --genscript=runtests.py``.
-
-sources = """
-@SOURCES@"""
-
-import sys
-import base64
-import zlib
-
-class DictImporter(object):
-    def __init__(self, sources):
-        self.sources = sources
-
-    def find_module(self, fullname, path=None):
-        if fullname == "argparse" and sys.version_info >= (2,7):
-            # we were generated with <python2.7 (which pulls in argparse)
-            # but we are running now on a stdlib which has it, so use that.
-            return None
-        if fullname in self.sources:
-            return self
-        if fullname + '.__init__' in self.sources:
-            return self
-        return None
-
-    def load_module(self, fullname):
-        # print "load_module:",  fullname
-        from types import ModuleType
-        try:
-            s = self.sources[fullname]
-            is_pkg = False
-        except KeyError:
-            s = self.sources[fullname + '.__init__']
-            is_pkg = True
-
-        co = compile(s, fullname, 'exec')
-        module = sys.modules.setdefault(fullname, ModuleType(fullname))
-        module.__file__ = "%s/%s" % (__file__, fullname)
-        module.__loader__ = self
-        if is_pkg:
-            module.__path__ = [fullname]
-
-        do_exec(co, module.__dict__) # noqa
-        return sys.modules[fullname]
-
-    def get_source(self, name):
-        res = self.sources.get(name)
-        if res is None:
-            res = self.sources.get(name + '.__init__')
-        return res
-
-if __name__ == "__main__":
-    try:
-        import pkg_resources  # noqa
-    except ImportError:
-        sys.stderr.write("ERROR: setuptools not installed\n")
-        sys.exit(2)
-    if sys.version_info >= (3, 0):
-        exec("def do_exec(co, loc): exec(co, loc)\n")
-        import pickle
-        sources = sources.encode("ascii") # ensure bytes
-        sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
-    else:
-        import cPickle as pickle
-        exec("def do_exec(co, loc): exec co in loc\n")
-        sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
-
-    importer = DictImporter(sources)
-    sys.meta_path.insert(0, importer)
-    entry = "@ENTRY@"
-    do_exec(entry, locals()) # noqa
--- a/third_party/python/pytest/_pytest/terminal.py
+++ b/third_party/python/pytest/_pytest/terminal.py
@@ -1,12 +1,15 @@
 """ terminal reporting of the full testing process.
 
 This is a good source for looking at the various reporting hooks.
 """
+from __future__ import absolute_import, division, print_function
+
+import itertools
 from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
     EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
 import pytest
 import py
 import sys
 import time
 import platform
 
@@ -15,26 +18,28 @@ import _pytest._pluggy as pluggy
 
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting", "reporting", after="general")
     group._addoption('-v', '--verbose', action="count",
                dest="verbose", default=0, help="increase verbosity."),
     group._addoption('-q', '--quiet', action="count",
                dest="quiet", default=0, help="decrease verbosity."),
     group._addoption('-r',
-         action="store", dest="reportchars", default=None, metavar="chars",
+         action="store", dest="reportchars", default='', metavar="chars",
          help="show extra test summary info as specified by chars (f)ailed, "
-              "(E)error, (s)skipped, (x)failed, (X)passed (w)pytest-warnings "
-              "(p)passed, (P)passed with output, (a)all except pP.")
+              "(E)error, (s)skipped, (x)failed, (X)passed, "
+              "(p)passed, (P)passed with output, (a)all except pP. "
+              "Warnings are displayed at all times except when "
+              "--disable-warnings is set")
+    group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
+                     dest='disable_warnings', action='store_true',
+                     help='disable warnings summary')
     group._addoption('-l', '--showlocals',
          action="store_true", dest="showlocals", default=False,
          help="show locals in tracebacks (disabled by default).")
-    group._addoption('--report',
-         action="store", dest="report", default=None, metavar="opts",
-         help="(deprecated, use -r)")
     group._addoption('--tb', metavar="style",
                action="store", dest="tbstyle", default='auto',
                choices=['auto', 'long', 'short', 'no', 'line', 'native'],
                help="traceback print mode (auto/long/short/line/native/no).")
     group._addoption('--fulltrace', '--full-trace',
                action="store_true", default=False,
                help="don't cut any tracebacks (default is to cut).")
     group._addoption('--color', metavar="color",
@@ -49,28 +54,21 @@ def pytest_configure(config):
     if config.option.debug or config.option.traceconfig:
         def mywriter(tags, args):
             msg = " ".join(map(str, args))
             reporter.write_line("[traceconfig] " + msg)
         config.trace.root.setprocessor("pytest:config", mywriter)
 
 def getreportopt(config):
     reportopts = ""
-    optvalue = config.option.report
-    if optvalue:
-        py.builtin.print_("DEPRECATED: use -r instead of --report option.",
-            file=sys.stderr)
-        if optvalue:
-            for setting in optvalue.split(","):
-                setting = setting.strip()
-                if setting == "skipped":
-                    reportopts += "s"
-                elif setting == "xfailed":
-                    reportopts += "x"
     reportchars = config.option.reportchars
+    if not config.option.disable_warnings and 'w' not in reportchars:
+        reportchars += 'w'
+    elif config.option.disable_warnings and 'w' in reportchars:
+        reportchars = reportchars.replace('w', '')
     if reportchars:
         for char in reportchars:
             if char not in reportopts and char != 'a':
                 reportopts += char
             elif char == 'a':
                 reportopts = 'fEsxXw'
     return reportopts
 
@@ -80,23 +78,50 @@ def pytest_report_teststatus(report):
     elif report.skipped:
         letter = "s"
     elif report.failed:
         letter = "F"
         if report.when != "call":
             letter = "f"
     return report.outcome, letter, report.outcome.upper()
 
+
 class WarningReport:
+    """
+    Simple structure to hold warnings information captured by ``pytest_logwarning``.
+    """
     def __init__(self, code, message, nodeid=None, fslocation=None):
+        """
+        :param code: unused
+        :param str message: user friendly message about the warning
+        :param str|None nodeid: node id that generated the warning (see ``get_location``).
+        :param tuple|py.path.local fslocation:
+            file system location of the source of the warning (see ``get_location``).
+        """
         self.code = code
         self.message = message
         self.nodeid = nodeid
         self.fslocation = fslocation
 
+    def get_location(self, config):
+        """
+        Returns the more user-friendly information about the location
+        of a warning, or None.
+        """
+        if self.nodeid:
+            return self.nodeid
+        if self.fslocation:
+            if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
+                filename, linenum = self.fslocation[:2]
+                relpath = py.path.local(filename).relto(config.invocation_dir)
+                return '%s:%s' % (relpath, linenum)
+            else:
+                return str(self.fslocation)
+        return None
+
 
 class TerminalReporter:
     def __init__(self, config, file=None):
         import _pytest.config
         self.config = config
         self.verbosity = self.config.option.verbose
         self.showheader = self.verbosity >= 0
         self.showfspath = self.verbosity >= 0
@@ -166,18 +191,16 @@ class TerminalReporter:
 
     def pytest_internalerror(self, excrepr):
         for line in py.builtin.text(excrepr).split("\n"):
             self.write_line("INTERNALERROR> " + line)
         return 1
 
     def pytest_logwarning(self, code, fslocation, message, nodeid):
         warnings = self.stats.setdefault("warnings", [])
-        if isinstance(fslocation, tuple):
-            fslocation = "%s:%d" % fslocation
         warning = WarningReport(code=code, fslocation=fslocation,
                                 message=message, nodeid=nodeid)
         warnings.append(warning)
 
     def pytest_plugin_registered(self, plugin):
         if self.config.option.traceconfig:
             msg = "PLUGIN registered: %s" % (plugin,)
             # XXX this event may happen during setup/teardown time
@@ -254,17 +277,17 @@ class TerminalReporter:
             return
 
         errors = len(self.stats.get('error', []))
         skipped = len(self.stats.get('skipped', []))
         if final:
             line = "collected "
         else:
             line = "collecting "
-        line += str(self._numcollected) + " items"
+        line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's')
         if errors:
             line += " / %d errors" % errors
         if skipped:
             line += " / %d skipped" % skipped
         if self.isatty:
             if final:
                 line += " \n"
             self.rewrite(line, bold=True)
@@ -295,18 +318,18 @@ class TerminalReporter:
             config=self.config, startdir=self.startdir)
         lines.reverse()
         for line in flatten(lines):
             self.write_line(line)
 
     def pytest_report_header(self, config):
         inifile = ""
         if config.inifile:
-            inifile = config.rootdir.bestrelpath(config.inifile)
-        lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)]
+            inifile = " " + config.rootdir.bestrelpath(config.inifile)
+        lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
 
         plugininfo = config.pluginmanager.list_plugin_distinfo()
         if plugininfo:
 
             lines.append(
                 "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
         return lines
 
@@ -361,17 +384,18 @@ class TerminalReporter:
     def pytest_sessionfinish(self, exitstatus):
         outcome = yield
         outcome.get_result()
         self._tw.line("")
         summary_exit_codes = (
             EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
             EXIT_NOTESTSCOLLECTED)
         if exitstatus in summary_exit_codes:
-            self.config.hook.pytest_terminal_summary(terminalreporter=self)
+            self.config.hook.pytest_terminal_summary(terminalreporter=self,
+                                                     exitstatus=exitstatus)
             self.summary_errors()
             self.summary_failures()
             self.summary_warnings()
             self.summary_passes()
         if exitstatus == EXIT_INTERRUPTED:
             self._report_keyboardinterrupt()
             del self._keyboardinterrupt_memo
         self.summary_deselected()
@@ -437,51 +461,71 @@ class TerminalReporter:
         l = []
         for x in self.stats.get(name, []):
             if not hasattr(x, '_pdbshown'):
                 l.append(x)
         return l
 
     def summary_warnings(self):
         if self.hasopt("w"):
-            warnings = self.stats.get("warnings")
-            if not warnings:
+            all_warnings = self.stats.get("warnings")
+            if not all_warnings:
                 return
-            self.write_sep("=", "pytest-warning summary")
-            for w in warnings:
-                self._tw.line("W%s %s %s" % (w.code,
-                              w.fslocation, w.message))
+
+            grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
+
+            self.write_sep("=", "warnings summary", yellow=True, bold=False)
+            for location, warnings in grouped:
+                self._tw.line(str(location) or '<undetermined location>')
+                for w in warnings:
+                    lines = w.message.splitlines()
+                    indented = '\n'.join('  ' + x for x in lines)
+                    self._tw.line(indented)
+                self._tw.line()
+            self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html')
 
     def summary_passes(self):
         if self.config.option.tbstyle != "no":
             if self.hasopt("P"):
                 reports = self.getreports('passed')
                 if not reports:
                     return
                 self.write_sep("=", "PASSES")
                 for rep in reports:
                     msg = self._getfailureheadline(rep)
                     self.write_sep("_", msg)
                     self._outrep_summary(rep)
 
+    def print_teardown_sections(self, rep):
+        for secname, content in rep.sections:
+            if 'teardown' in secname:
+                self._tw.sep('-', secname)
+                if content[-1:] == "\n":
+                    content = content[:-1]
+                self._tw.line(content)
+
+
     def summary_failures(self):
         if self.config.option.tbstyle != "no":
             reports = self.getreports('failed')
             if not reports:
                 return
             self.write_sep("=", "FAILURES")
             for rep in reports:
                 if self.config.option.tbstyle == "line":
                     line = self._getcrashline(rep)
                     self.write_line(line)
                 else:
                     msg = self._getfailureheadline(rep)
                     markup = {'red': True, 'bold': True}
                     self.write_sep("_", msg, **markup)
                     self._outrep_summary(rep)
+                    for report in self.getreports(''):
+                        if report.nodeid == rep.nodeid and report.when == 'teardown':
+                            self.print_teardown_sections(report)
 
     def summary_errors(self):
         if self.config.option.tbstyle != "no":
             reports = self.getreports('error')
             if not reports:
                 return
             self.write_sep("=", "ERRORS")
             for rep in self.stats['error']:
@@ -512,26 +556,18 @@ class TerminalReporter:
 
         if self.verbosity >= 0:
             self.write_sep("=", msg, **markup)
         if self.verbosity == -1:
             self.write_line(msg, **markup)
 
     def summary_deselected(self):
         if 'deselected' in self.stats:
-            l = []
-            k = self.config.option.keyword
-            if k:
-                l.append("-k%s" % k)
-            m = self.config.option.markexpr
-            if m:
-                l.append("-m %r" % m)
-            if l:
-                self.write_sep("=", "%d tests deselected by %r" % (
-                    len(self.stats['deselected']), " ".join(l)), bold=True)
+            self.write_sep("=", "%d tests deselected" % (
+                len(self.stats['deselected'])), bold=True)
 
 def repr_pythonversion(v=None):
     if v is None:
         v = sys.version_info
     try:
         return "%s.%s.%s-%s-%s" % v
     except (TypeError, ValueError):
         return str(v)
@@ -541,30 +577,28 @@ def flatten(l):
         if isinstance(x, (list, tuple)):
             for y in flatten(x):
                 yield y
         else:
             yield x
 
 def build_summary_stats_line(stats):
     keys = ("failed passed skipped deselected "
-           "xfailed xpassed warnings error").split()
-    key_translation = {'warnings': 'pytest-warnings'}
+            "xfailed xpassed warnings error").split()
     unknown_key_seen = False
     for key in stats.keys():
         if key not in keys:
             if key: # setup/teardown reports have an empty key, ignore them
                 keys.append(key)
                 unknown_key_seen = True
     parts = []
     for key in keys:
         val = stats.get(key, None)
         if val:
-            key_name = key_translation.get(key, key)
-            parts.append("%d %s" % (len(val), key_name))
+            parts.append("%d %s" % (len(val), key))
 
     if parts:
         line = ", ".join(parts)
     else:
         line = "no tests ran"
 
     if 'failed' in stats or 'error' in stats:
         color = 'red'
--- a/third_party/python/pytest/_pytest/tmpdir.py
+++ b/third_party/python/pytest/_pytest/tmpdir.py
@@ -1,14 +1,16 @@
 """ support for providing temporary directories to test functions.  """
+from __future__ import absolute_import, division, print_function
+
 import re
 
 import pytest
 import py
-from _pytest.monkeypatch import monkeypatch
+from _pytest.monkeypatch import MonkeyPatch
 
 
 class TempdirFactory:
     """Factory for temporary directories under the common base temp directory.
 
     The base directory can be configured using the ``--basetemp`` option.
     """
 
@@ -76,48 +78,49 @@ def get_user():
     in the current environment (see #1010).
     """
     import getpass
     try:
         return getpass.getuser()
     except (ImportError, KeyError):
         return None
 
+
 # backward compatibility
 TempdirHandler = TempdirFactory
 
 
 def pytest_configure(config):
     """Create a TempdirFactory and attach it to the config object.
 
     This is to comply with existing plugins which expect the handler to be
     available at pytest_configure time, but ideally should be moved entirely
     to the tmpdir_factory session fixture.
     """
-    mp = monkeypatch()
+    mp = MonkeyPatch()
     t = TempdirFactory(config)
     config._cleanup.extend([mp.undo, t.finish])
     mp.setattr(config, '_tmpdirhandler', t, raising=False)
     mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
 
 
 @pytest.fixture(scope='session')
 def tmpdir_factory(request):
     """Return a TempdirFactory instance for the test session.
     """
     return request.config._tmpdirhandler
 
 
 @pytest.fixture
 def tmpdir(request, tmpdir_factory):
-    """return a temporary directory path object
+    """Return a temporary directory path object
     which is unique to each test function invocation,
     created as a sub directory of the base temporary
     directory.  The returned object is a `py.path.local`_
     path object.
     """
     name = request.node.name
-    name = re.sub("[\W]", "_", name)
+    name = re.sub(r"[\W]", "_", name)
     MAXVAL = 30
     if len(name) > MAXVAL:
         name = name[:MAXVAL]
     x = tmpdir_factory.mktemp(name, numbered=True)
     return x
--- a/third_party/python/pytest/_pytest/unittest.py
+++ b/third_party/python/pytest/_pytest/unittest.py
@@ -1,37 +1,38 @@
 """ discovery and running of std-library "unittest" style tests. """
-from __future__ import absolute_import
+from __future__ import absolute_import, division, print_function
 
 import sys
 import traceback
 
-import pytest
-# for transfering markers
+# for transferring markers
 import _pytest._code
-from _pytest.python import transfer_markers
-from _pytest.skipping import MarkEvaluator
+from _pytest.config import hookimpl
+from _pytest.runner import fail, skip
+from _pytest.python import transfer_markers, Class, Module, Function
+from _pytest.skipping import MarkEvaluator, xfail
 
 
 def pytest_pycollect_makeitem(collector, name, obj):
     # has unittest been imported and is obj a subclass of its TestCase?
     try:
         if not issubclass(obj, sys.modules["unittest"].TestCase):
             return
     except Exception:
         return
     # yes, so let's collect it
     return UnitTestCase(name, parent=collector)
 
 
-class UnitTestCase(pytest.Class):
+class UnitTestCase(Class):
     # marker for fixturemanger.getfixtureinfo()
     # to declare that our children do not support funcargs
     nofuncargs = True
-                                              
+
     def setup(self):
         cls = self.obj
         if getattr(cls, '__unittest_skip__', False):
             return  # skipped
         setup = getattr(cls, 'setUpClass', None)
         if setup is not None:
             setup()
         teardown = getattr(cls, 'tearDownClass', None)
@@ -41,35 +42,36 @@ class UnitTestCase(pytest.Class):
 
     def collect(self):
         from unittest import TestLoader
         cls = self.obj
         if not getattr(cls, "__test__", True):
             return
         self.session._fixturemanager.parsefactories(self, unittest=True)
         loader = TestLoader()
-        module = self.getparent(pytest.Module).obj
+        module = self.getparent(Module).obj
         foundsomething = False
         for name in loader.getTestCaseNames(self.obj):
             x = getattr(self.obj, name)
+            if not getattr(x, '__test__', True):
+                continue
             funcobj = getattr(x, 'im_func', x)
             transfer_markers(funcobj, cls, module)
             yield TestCaseFunction(name, parent=self)
             foundsomething = True
 
         if not foundsomething:
             runtest = getattr(self.obj, 'runTest', None)
             if runtest is not None:
                 ut = sys.modules.get("twisted.trial.unittest", None)
                 if ut is None or runtest != ut.TestCase.runTest:
                     yield TestCaseFunction('runTest', parent=self)
 
 
-
-class TestCaseFunction(pytest.Function):
+class TestCaseFunction(Function):
     _excinfo = None
 
     def setup(self):
         self._testcase = self.parent.obj(self.name)
         self._fix_unittest_skip_decorator()
         self._obj = getattr(self._testcase, self.name)
         if hasattr(self._testcase, 'setup_method'):
             self._testcase.setup_method(self._obj)
@@ -87,113 +89,145 @@ class TestCaseFunction(pytest.Function):
         See issue #1169
         """
         if sys.version_info[0] == 2:
             setattr(self._testcase, "__name__", self.name)
 
     def teardown(self):
         if hasattr(self._testcase, 'teardown_method'):
             self._testcase.teardown_method(self._obj)
+        # Allow garbage collection on TestCase instance attributes.
+        self._testcase = None
+        self._obj = None
 
     def startTest(self, testcase):
         pass
 
     def _addexcinfo(self, rawexcinfo):
         # unwrap potential exception info (see twisted trial support below)
         rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
         try:
             excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
         except TypeError:
             try:
                 try:
                     l = traceback.format_exception(*rawexcinfo)
                     l.insert(0, "NOTE: Incompatible Exception Representation, "
-                        "displaying natively:\n\n")
-                    pytest.fail("".join(l), pytrace=False)
-                except (pytest.fail.Exception, KeyboardInterrupt):
+                                "displaying natively:\n\n")
+                    fail("".join(l), pytrace=False)
+                except (fail.Exception, KeyboardInterrupt):
                     raise
                 except:
-                    pytest.fail("ERROR: Unknown Incompatible Exception "
-                        "representation:\n%r" %(rawexcinfo,), pytrace=False)
+                    fail("ERROR: Unknown Incompatible Exception "
+                         "representation:\n%r" % (rawexcinfo,), pytrace=False)
             except KeyboardInterrupt:
                 raise
-            except pytest.fail.Exception:
+            except fail.Exception:
                 excinfo = _pytest._code.ExceptionInfo()
         self.__dict__.setdefault('_excinfo', []).append(excinfo)
 
     def addError(self, testcase, rawexcinfo):
         self._addexcinfo(rawexcinfo)
+
     def addFailure(self, testcase, rawexcinfo):
         self._addexcinfo(rawexcinfo)
 
     def addSkip(self, testcase, reason):
         try:
-            pytest.skip(reason)
-        except pytest.skip.Exception:
+            skip(reason)
+        except skip.Exception:
             self._evalskip = MarkEvaluator(self, 'SkipTest')
             self._evalskip.result = True
             self._addexcinfo(sys.exc_info())
 
     def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
         try:
-            pytest.xfail(str(reason))
-        except pytest.xfail.Exception:
+            xfail(str(reason))
+        except xfail.Exception:
             self._addexcinfo(sys.exc_info())
 
     def addUnexpectedSuccess(self, testcase, reason=""):
         self._unexpectedsuccess = reason
 
     def addSuccess(self, testcase):
         pass
 
     def stopTest(self, testcase):
         pass
 
+    def _handle_skip(self):
+        # implements the skipping machinery (see #2137)
+        # analog to pythons Lib/unittest/case.py:run
+        testMethod = getattr(self._testcase, self._testcase._testMethodName)
+        if (getattr(self._testcase.__class__, "__unittest_skip__", False) or
+            getattr(testMethod, "__unittest_skip__", False)):
+            # If the class or method was skipped.
+            skip_why = (getattr(self._testcase.__class__, '__unittest_skip_why__', '') or
+                        getattr(testMethod, '__unittest_skip_why__', ''))
+            try:  # PY3, unittest2 on PY2
+                self._testcase._addSkip(self, self._testcase, skip_why)
+            except TypeError:  # PY2
+                if sys.version_info[0] != 2:
+                    raise
+                self._testcase._addSkip(self, skip_why)
+            return True
+        return False
+
     def runtest(self):
-        self._testcase(result=self)
+        if self.config.pluginmanager.get_plugin("pdbinvoke") is None:
+            self._testcase(result=self)
+        else:
+            # disables tearDown and cleanups for post mortem debugging (see #1890)
+            if self._handle_skip():
+                return
+            self._testcase.debug()
 
     def _prunetraceback(self, excinfo):
-        pytest.Function._prunetraceback(self, excinfo)
+        Function._prunetraceback(self, excinfo)
         traceback = excinfo.traceback.filter(
-            lambda x:not x.frame.f_globals.get('__unittest'))
+            lambda x: not x.frame.f_globals.get('__unittest'))
         if traceback:
             excinfo.traceback = traceback
 
-@pytest.hookimpl(tryfirst=True)
+
+@hookimpl(tryfirst=True)
 def pytest_runtest_makereport(item, call):
     if isinstance(item, TestCaseFunction):
         if item._excinfo:
             call.excinfo = item._excinfo.pop(0)
             try:
                 del call.result
             except AttributeError:
                 pass
 
 # twisted trial support
 
-@pytest.hookimpl(hookwrapper=True)
+
+@hookimpl(hookwrapper=True)
 def pytest_runtest_protocol(item):
     if isinstance(item, TestCaseFunction) and \
        'twisted.trial.unittest' in sys.modules:
         ut = sys.modules['twisted.python.failure']
         Failure__init__ = ut.Failure.__init__
         check_testcase_implements_trial_reporter()
+
         def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
             captureVars=None):
             if exc_value is None:
                 self._rawexcinfo = sys.exc_info()
             else:
                 if exc_type is None:
                     exc_type = type(exc_value)
                 self._rawexcinfo = (exc_type, exc_value, exc_tb)
             try:
                 Failure__init__(self, exc_value, exc_type, exc_tb,
                     captureVars=captureVars)
             except TypeError:
                 Failure__init__(self, exc_value, exc_type, exc_tb)
+
         ut.Failure.__init__ = excstore
         yield
         ut.Failure.__init__ = Failure__init__
     else:
         yield
 
 
 def check_testcase_implements_trial_reporter(done=[]):
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-This directory vendors the `pluggy` module.
-
-For a more detailed discussion for the reasons to vendoring this 
-package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944).
-
-To update the current version, execute:
-
-```
-$ pip install -U pluggy==<version> --no-compile --target=_pytest/vendored_packages
-```
-
-And commit the modified files. The `pluggy-<version>.dist-info` directory 
-created by `pip` should be ignored.
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-Plugin registration and hook calling for Python
-===============================================
-
-This is the plugin manager as used by pytest but stripped
-of pytest specific details.
-
-During the 0.x series this plugin does not have much documentation
-except extensive docstrings in the pluggy.py module.
-
-
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
+++ /dev/null
@@ -1,39 +0,0 @@
-Metadata-Version: 2.0
-Name: pluggy
-Version: 0.3.1
-Summary: plugin and hook calling mechanisms for python
-Home-page: UNKNOWN
-Author: Holger Krekel
-Author-email: holger at merlinux.eu
-License: MIT license
-Platform: unix
-Platform: linux
-Platform: osx
-Platform: win32
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: POSIX
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Operating System :: MacOS :: MacOS X
-Classifier: Topic :: Software Development :: Testing
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-
-Plugin registration and hook calling for Python
-===============================================
-
-This is the plugin manager as used by pytest but stripped
-of pytest specific details.
-
-During the 0.x series this plugin does not have much documentation
-except extensive docstrings in the pluggy.py module.
-
-
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
+++ /dev/null
@@ -1,8 +0,0 @@
-pluggy.py,sha256=v_RfWzyW6DPU1cJu_EFoL_OHq3t13qloVdR6UaMCXQA,29862
-pluggy-0.3.1.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
-pluggy-0.3.1.dist-info/pbr.json,sha256=xX3s6__wOcAyF-AZJX1sdZyW6PUXT-FkfBlM69EEUCg,47
-pluggy-0.3.1.dist-info/RECORD,,
-pluggy-0.3.1.dist-info/metadata.json,sha256=nLKltOT78dMV-00uXD6Aeemp4xNsz2q59j6ORSDeLjw,1027
-pluggy-0.3.1.dist-info/METADATA,sha256=1b85Ho2u4iK30M099k7axMzcDDhLcIMb-A82JUJZnSo,1334
-pluggy-0.3.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
-pluggy-0.3.1.dist-info/DESCRIPTION.rst,sha256=P5Akh1EdIBR6CeqtV2P8ZwpGSpZiTKPw0NyS7jEiD-g,306
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.24.0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
+++ /dev/null
@@ -1,1 +0,0 @@
-{"license": "MIT license", "name": "pluggy", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "plugin and hook calling mechanisms for python", "platform": "unix", "version": "0.3.1", "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "holger at merlinux.eu", "name": "Holger Krekel"}]}}, "classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"]}
\ No newline at end of file
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
+++ /dev/null
@@ -1,1 +0,0 @@
-{"is_release": false, "git_version": "7d4c9cd"}
\ No newline at end of file
deleted file mode 100644
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-pluggy
--- a/third_party/python/pytest/_pytest/vendored_packages/pluggy.py
+++ b/third_party/python/pytest/_pytest/vendored_packages/pluggy.py
@@ -62,18 +62,19 @@ Pluggy currently consists of functionali
 
 - a simple tracing mechanism, including tracing of plugin calls and
   their arguments.
 
 """
 import sys
 import inspect
 
-__version__ = '0.3.1'
-__all__ = ["PluginManager", "PluginValidationError",
+__version__ = '0.4.0'
+
+__all__ = ["PluginManager", "PluginValidationError", "HookCallError",
            "HookspecMarker", "HookimplMarker"]
 
 _py3 = sys.version_info > (3, 0)
 
 
 class HookspecMarker:
     """ Decorator helper class for marking functions as hook specifications.
 
@@ -303,17 +304,17 @@ class _TracedHookExecution:
     def undo(self):
         self.pluginmanager._inner_hookexec = self.oldcall
 
 
 class PluginManager(object):
     """ Core Pluginmanager class which manages registration
     of plugin objects and 1:N hook calling.
 
-    You can register new hooks by calling ``addhooks(module_or_class)``.
+    You can register new hooks by calling ``add_hookspec(module_or_class)``.
     You can register plugin objects (which contain hooks) by calling
     ``register(plugin)``.  The Pluginmanager is initialized with a
     prefix that is searched for in the names of the dict of registered
     plugin objects.  An optional excludefunc allows to blacklist names which
     are not considered as hooks despite a matching prefix.
 
     For debugging purposes you can call ``enable_tracing()``
     which will subsequently send debug information to the trace helper.
@@ -369,17 +370,20 @@ class PluginManager(object):
                     self._verify_hook(hook, hookimpl)
                     hook._maybe_apply_history(hookimpl)
                 hook._add_hookimpl(hookimpl)
                 hookcallers.append(hook)
         return plugin_name
 
     def parse_hookimpl_opts(self, plugin, name):
         method = getattr(plugin, name)
-        res = getattr(method, self.project_name + "_impl", None)
+        try:
+            res = getattr(method, self.project_name + "_impl", None)
+        except Exception:
+            res = {}
         if res is not None and not isinstance(res, dict):
             # false positive
             res = None
         elif res is None and self._implprefix and name.startswith(self._implprefix):
             res = {}
         return res
 
     def unregister(self, plugin=None, name=None):
@@ -450,16 +454,20 @@ class PluginManager(object):
         by the caller of register(plugin, name). To obtain the name
         of an registered plugin use ``get_name(plugin)`` instead."""
         return getattr(plugin, "__name__", None) or str(id(plugin))
 
     def get_plugin(self, name):
         """ Return a plugin or None for the given name. """
         return self._name2plugin.get(name)
 
+    def has_plugin(self, name):
+        """ Return True if a plugin with the given name is registered. """
+        return self.get_plugin(name) is not None
+
     def get_name(self, plugin):
         """ Return name for registered plugin or None if not registered. """
         for name, val in self._name2plugin.items():
             if plugin == val:
                 return name
 
     def _verify_hook(self, hook, hookimpl):
         if hook.is_historic() and hookimpl.hookwrapper:
@@ -487,25 +495,29 @@ class PluginManager(object):
                         if not hookimpl.optionalhook:
                             raise PluginValidationError(
                                 "unknown hook %r in plugin %r" %
                                 (name, hookimpl.plugin))
 
     def load_setuptools_entrypoints(self, entrypoint_name):
         """ Load modules from querying the specified setuptools entrypoint name.
         Return the number of loaded plugins. """
-        from pkg_resources import iter_entry_points, DistributionNotFound
+        from pkg_resources import (iter_entry_points, DistributionNotFound,
+                                   VersionConflict)
         for ep in iter_entry_points(entrypoint_name):
             # is the plugin registered or blocked?
             if self.get_plugin(ep.name) or self.is_blocked(ep.name):
                 continue
             try:
                 plugin = ep.load()
             except DistributionNotFound:
                 continue
+            except VersionConflict as e:
+                raise PluginValidationError(
+                    "Plugin %r could not be loaded: %s!" % (ep.name, e))
             self.register(plugin, name=ep.name)
             self._plugin_distinfo.append((plugin, ep.dist))
         return len(self._plugin_distinfo)
 
     def list_plugin_distinfo(self):
         """ return list of distinfo/plugin tuples for all setuptools registered
         plugins. """
         return list(self._plugin_distinfo)
@@ -523,17 +535,17 @@ class PluginManager(object):
         and return an undo function which, when called,
         will remove the added tracers.
 
         ``before(hook_name, hook_impls, kwargs)`` will be called ahead
         of all hook calls and receive a hookcaller instance, a list
         of HookImpl instances and the keyword arguments for the hook call.
 
         ``after(outcome, hook_name, hook_impls, kwargs)`` receives the
-        same arguments as ``before`` but also a :py:class:`_CallOutcome`` object
+        same arguments as ``before`` but also a :py:class:`_CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` object
         which represents the result of the overall hook call.
         """
         return _TracedHookExecution(self, before, after).undo
 
     def enable_tracing(self):
         """ enable tracing of hook calls and return an undo function. """
         hooktrace = self.hook._trace
 
@@ -568,34 +580,40 @@ class PluginManager(object):
         return orig
 
 
 class _MultiCall:
     """ execute a call into multiple python functions/methods. """
 
     # XXX note that the __multicall__ argument is supported only
     # for pytest compatibility reasons.  It was never officially
-    # supported there and is explicitly deprecated since 2.8
+    # supported there and is explicitely deprecated since 2.8
     # so we can remove it soon, allowing to avoid the below recursion
     # in execute() and simplify/speed up the execute loop.
 
     def __init__(self, hook_impls, kwargs, specopts={}):
         self.hook_impls = hook_impls
         self.kwargs = kwargs
         self.kwargs["__multicall__"] = self
         self.specopts = specopts
 
     def execute(self):
         all_kwargs = self.kwargs
         self.results = results = []
         firstresult = self.specopts.get("firstresult")
 
         while self.hook_impls:
             hook_impl = self.hook_impls.pop()
-            args = [all_kwargs[argname] for argname in hook_impl.argnames]
+            try:
+                args = [all_kwargs[argname] for argname in hook_impl.argnames]
+            except KeyError:
+                for argname in hook_impl.argnames:
+                    if argname not in all_kwargs:
+                        raise HookCallError(
+                            "hook call must provide argument %r" % (argname,))
             if hook_impl.hookwrapper:
                 return _wrapped_call(hook_impl.function(*args), self.execute)
             res = hook_impl.function(*args)
             if res is not None:
                 if firstresult:
                     return res
                 results.append(res)
 
@@ -624,17 +642,20 @@ def varnames(func, startindex=None):
     if inspect.isclass(func):
         try:
             func = func.__init__
         except AttributeError:
             return ()
         startindex = 1
     else:
         if not inspect.isfunction(func) and not inspect.ismethod(func):
-            func = getattr(func, '__call__', func)
+            try:
+                func = getattr(func, '__call__', func)
+            except Exception:
+                return ()
         if startindex is None:
             startindex = int(inspect.ismethod(func))
 
     try:
         rawcode = func.__code__
     except AttributeError:
         return ()
     try:
@@ -758,16 +779,20 @@ class HookImpl:
         self.plugin_name = plugin_name
         self.__dict__.update(hook_impl_opts)
 
 
 class PluginValidationError(Exception):
     """ plugin failed validation. """
 
 
+class HookCallError(Exception):
+    """ Hook was called wrongly. """
+
+
 if hasattr(inspect, 'signature'):
     def _formatdef(func):
         return "%s%s" % (
             func.__name__,
             str(inspect.signature(func))
         )
 else:
     def _formatdef(func):
new file mode 100644
--- /dev/null
+++ b/third_party/python/pytest/_pytest/warnings.py
@@ -0,0 +1,88 @@
+from __future__ import absolute_import, division, print_function
+
+import warnings
+from contextlib import contextmanager
+
+import pytest
+
+from _pytest import compat
+
+
+def _setoption(wmod, arg):
+    """
+    Copy of the warning._setoption function but does not escape arguments.
+    """
+    parts = arg.split(':')
+    if len(parts) > 5:
+        raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
+    while len(parts) < 5:
+        parts.append('')
+    action, message, category, module, lineno = [s.strip()
+                                                 for s in parts]
+    action = wmod._getaction(action)
+    category = wmod._getcategory(category)
+    if lineno:
+        try:
+            lineno = int(lineno)
+            if lineno < 0:
+                raise ValueError
+        except (ValueError, OverflowError):
+            raise wmod._OptionError("invalid lineno %r" % (lineno,))
+    else:
+        lineno = 0
+    wmod.filterwarnings(action, message, category, module, lineno)
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("pytest-warnings")
+    group.addoption(
+        '-W', '--pythonwarnings', action='append',
+        help="set which warnings to report, see -W option of python itself.")
+    parser.addini("filterwarnings", type="linelist",
+                  help="Each line specifies warning filter pattern which would be passed"
+                  "to warnings.filterwarnings. Process after -W and --pythonwarnings.")
+
+
+@contextmanager
+def catch_warnings_for_item(item):
+    """
+    catches the warnings generated during setup/call/teardown execution
+    of the given item and after it is done posts them as warnings to this
+    item.
+    """
+    args = item.config.getoption('pythonwarnings') or []
+    inifilters = item.config.getini("filterwarnings")
+    with warnings.catch_warnings(record=True) as log:
+        for arg in args:
+            warnings._setoption(arg)
+
+        for arg in inifilters:
+            _setoption(warnings, arg)
+
+        yield
+
+        for warning in log:
+            warn_msg = warning.message
+            unicode_warning = False
+
+            if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
+                new_args = [compat.safe_str(m) for m in warn_msg.args]
+                unicode_warning = warn_msg.args != new_args
+                warn_msg.args = new_args
+
+            msg = warnings.formatwarning(
+                warn_msg, warning.category,
+                warning.filename, warning.lineno, warning.line)
+            item.warn("unused", msg)
+
+            if unicode_warning:
+                warnings.warn(
+                    "Warning is using unicode non convertible to ascii, "
+                    "converting to a safe representation:\n  %s"  % msg,
+                    UnicodeWarning)
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_protocol(item):
+    with catch_warnings_for_item(item):
+        yield
--- a/third_party/python/pytest/pytest.py
+++ b/third_party/python/pytest/pytest.py
@@ -1,28 +1,78 @@
 # PYTHON_ARGCOMPLETE_OK
 """
 pytest: unit and functional testing with Python.
 """
+
+
+# else we are imported
+
+from _pytest.config import (
+    main, UsageError, _preloadplugins, cmdline,
+    hookspec, hookimpl
+)
+from _pytest.fixtures import fixture, yield_fixture
+from _pytest.assertion import register_assert_rewrite
+from _pytest.freeze_support import freeze_includes
+from _pytest import __version__
+from _pytest.debugging import pytestPDB as __pytestPDB
+from _pytest.recwarn import warns, deprecated_call
+from _pytest.runner import fail, skip, importorskip, exit
+from _pytest.mark import MARK_GEN as mark, param
+from _pytest.skipping import xfail
+from _pytest.main import Item, Collector, File, Session
+from _pytest.fixtures import fillfixtures as _fillfuncargs
+from _pytest.python import (
+    raises, approx,
+    Module, Class, Instance, Function, Generator,
+)
+
+set_trace = __pytestPDB.set_trace
+
 __all__ = [
     'main',
     'UsageError',
     'cmdline',
     'hookspec',
     'hookimpl',
     '__version__',
+    'register_assert_rewrite',
+    'freeze_includes',
+    'set_trace',
+    'warns',
+    'deprecated_call',
+    'fixture',
+    'yield_fixture',
+    'fail',
+    'skip',
+    'xfail',
+    'importorskip',
+    'exit',
+    'mark',
+    'param',
+    'approx',
+    '_fillfuncargs',
+
+    'Item',
+    'File',
+    'Collector',
+    'Session',
+    'Module',
+    'Class',
+    'Instance',
+    'Function',
+    'Generator',
+    'raises',
+
+
 ]
 
-if __name__ == '__main__': # if run as a script or by 'python -m pytest'
+if __name__ == '__main__':
+    # if run as a script or by 'python -m pytest'
     # we trigger the below "else" condition by the following import
     import pytest
     raise SystemExit(pytest.main())
-
-# else we are imported
+else:
 
-from _pytest.config import (
-    main, UsageError, _preloadplugins, cmdline,
-    hookspec, hookimpl
-)
-from _pytest import __version__
-
-_preloadplugins() # to populate pytest.* namespace so help(pytest) works
-
+    from _pytest.compat import _setup_collect_fakemodule
+    _preloadplugins()  # to populate pytest.* namespace so help(pytest) works
+    _setup_collect_fakemodule()
deleted file mode 100644
--- a/third_party/python/pytest/setup.cfg
+++ /dev/null
@@ -1,19 +0,0 @@
-[build_sphinx]
-source-dir = doc/en/
-build-dir = doc/build
-all_files = 1
-
-[upload_sphinx]
-upload-dir = doc/en/build/html
-
-[bdist_wheel]
-universal = 1
-
-[devpi:upload]
-formats = sdist.tgz,bdist_wheel
-
-[egg_info]
-tag_build = 
-tag_date = 0
-tag_svn_revision = 0
-
deleted file mode 100644
--- a/third_party/python/pytest/setup.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import os, sys
-import setuptools
-import pkg_resources
-from setuptools import setup, Command
-
-classifiers = ['Development Status :: 6 - Mature',
-               'Intended Audience :: Developers',
-               'License :: OSI Approved :: MIT License',
-               'Operating System :: POSIX',
-               'Operating System :: Microsoft :: Windows',
-               'Operating System :: MacOS :: MacOS X',
-               'Topic :: Software Development :: Testing',
-               'Topic :: Software Development :: Libraries',
-               'Topic :: Utilities'] + [
-              ('Programming Language :: Python :: %s' % x) for x in
-                  '2 2.6 2.7 3 3.2 3.3 3.4 3.5'.split()]
-
-with open('README.rst') as fd:
-    long_description = fd.read()
-
-def get_version():
-    p = os.path.join(os.path.dirname(
-                     os.path.abspath(__file__)), "_pytest", "__init__.py")
-    with open(p) as f:
-        for line in f.readlines():
-            if "__version__" in line:
-                return line.strip().split("=")[-1].strip(" '")
-    raise ValueError("could not read version")
-
-
-def has_environment_marker_support():
-    """
-    Tests that setuptools has support for PEP-426 environment marker support.
-
-    The first known release to support it is 0.7 (and the earliest on PyPI seems to be 0.7.2
-    so we're using that), see: http://pythonhosted.org/setuptools/history.html#id142
-
-    References:
-
-    * https://wheel.readthedocs.io/en/latest/index.html#defining-conditional-dependencies
-    * https://www.python.org/dev/peps/pep-0426/#environment-markers
-    """
-    try:
-        return pkg_resources.parse_version(setuptools.__version__) >= pkg_resources.parse_version('0.7.2')
-    except Exception as exc:
-        sys.stderr.write("Could not test setuptool's version: %s\n" % exc)
-        return False
-
-
-def main():
-    install_requires = ['py>=1.4.29']  # pluggy is vendored in _pytest.vendored_packages
-    extras_require = {}
-    if has_environment_marker_support():
-        extras_require[':python_version=="2.6" or python_version=="3.0" or python_version=="3.1"'] = ['argparse']
-        extras_require[':sys_platform=="win32"'] = ['colorama']
-    else:
-        if sys.version_info < (2, 7) or (3,) <= sys.version_info < (3, 2):
-            install_requires.append('argparse')
-        if sys.platform == 'win32':
-            install_requires.append('colorama')
-
-    setup(
-        name='pytest',
-        description='pytest: simple powerful testing with Python',
-        long_description=long_description,
-        version=get_version(),
-        url='http://pytest.org',
-        license='MIT license',
-        platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
-        author='Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others',
-        author_email='holger at merlinux.eu',
-        entry_points=make_entry_points(),
-        classifiers=classifiers,
-        cmdclass={'test': PyTest},
-        # the following should be enabled for release
-        install_requires=install_requires,
-        extras_require=extras_require,
-        packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'],
-        py_modules=['pytest'],
-        zip_safe=False,
-    )
-
-
-def cmdline_entrypoints(versioninfo, platform, basename):
-    target = 'pytest:main'
-    if platform.startswith('java'):
-        points = {'py.test-jython': target}
-    else:
-        if basename.startswith('pypy'):
-            points = {'py.test-%s' % basename: target}
-        else: # cpython
-            points = {'py.test-%s.%s' % versioninfo[:2] : target}
-        points['py.test'] = target
-    return points
-
-
-def make_entry_points():
-    basename = os.path.basename(sys.executable)
-    points = cmdline_entrypoints(sys.version_info, sys.platform, basename)
-    keys = list(points.keys())
-    keys.sort()
-    l = ['%s = %s' % (x, points[x]) for x in keys]
-    return {'console_scripts': l}
-
-
-class PyTest(Command):
-    user_options = []
-    def initialize_options(self):
-        pass
-    def finalize_options(self):
-        pass
-    def run(self):
-        import subprocess
-        PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x]
-        PPATH.insert(0, os.getcwd())
-        os.environ['PYTHONPATH'] = ':'.join(PPATH)
-        errno = subprocess.call([sys.executable, 'pytest.py', '--ignore=doc'])
-        raise SystemExit(errno)
-
-
-if __name__ == '__main__':
-    main()