Ticket #12415: 12415_script_review.patch

File 12415_script_review.patch, 110.9 KB (added by jdemeyer, 7 years ago)
  • deleted file ncadoctest.py

    # HG changeset patch
    # User Jeroen Demeyer <jdemeyer@cage.ugent.be>
    # Date 1361712212 -3600
    # Node ID 323f734509b08a2d4f33f039c62ede8bd2e73e37
    # Parent  9f873edbf50f6599134cfb52317a0f6485b95767
    New doctest framework: scripts reviewer patch
    
    diff --git a/ncadoctest.py b/ncadoctest.py
    deleted file mode 100644
    + -  
    1 # Module doctest.
    2 # Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
    3 # Major enhancements and refactoring by:
    4 #     Jim Fulton
    5 #     Edward Loper
    6 
    7 # Provided as-is; use at your own risk; no warranty; no promises; enjoy!
    8 
    9 r"""Module doctest -- a framework for running examples in docstrings.
    10 
    11 In simplest use, end each module M to be tested with:
    12 
    13 def _test():
    14     import doctest
    15     doctest.testmod()
    16 
    17 if __name__ == "__main__":
    18     _test()
    19 
    20 Then running the module as a script will cause the examples in the
    21 docstrings to get executed and verified:
    22 
    23 python M.py
    24 
    25 This won't display anything unless an example fails, in which case the
    26 failing example(s) and the cause(s) of the failure(s) are printed to stdout
    27 (why not stderr? because stderr is a lame hack <0.2 wink>), and the final
    28 line of output is "Test failed.".
    29 
    30 Run it with the -v switch instead:
    31 
    32 python M.py -v
    33 
    34 and a detailed report of all examples tried is printed to stdout, along
    35 with assorted summaries at the end.
    36 
    37 You can force verbose mode by passing "verbose=True" to testmod, or prohibit
    38 it by passing "verbose=False".  In either of those cases, sys.argv is not
    39 examined by testmod.
    40 
    41 There are a variety of other ways to run doctests, including integration
    42 with the unittest framework, and support for running non-Python text
    43 files containing doctests.  There are also many ways to override parts
    44 of doctest's default behaviors.  See the Library Reference Manual for
    45 details.
    46 """
    47 
    48 __docformat__ = 'reStructuredText en'
    49 
    50 __all__ = [
    51     # 0, Option Flags
    52     'register_optionflag',
    53     'DONT_ACCEPT_TRUE_FOR_1',
    54     'DONT_ACCEPT_BLANKLINE',
    55     'NORMALIZE_WHITESPACE',
    56     'ELLIPSIS',
    57     'SKIP',
    58     'IGNORE_EXCEPTION_DETAIL',
    59     'COMPARISON_FLAGS',
    60     'REPORT_UDIFF',
    61     'REPORT_CDIFF',
    62     'REPORT_NDIFF',
    63     'REPORT_ONLY_FIRST_FAILURE',
    64     'REPORTING_FLAGS',
    65     # 1. Utility Functions
    66     # 2. Example & DocTest
    67     'Example',
    68     'DocTest',
    69     # 3. Doctest Parser
    70     'DocTestParser',
    71     # 4. Doctest Finder
    72     'DocTestFinder',
    73     # 5. Doctest Runner
    74     'DocTestRunner',
    75     'OutputChecker',
    76     'DocTestFailure',
    77     'UnexpectedException',
    78     'DebugRunner',
    79     # 6. Test Functions
    80     'testmod',
    81     'testfile',
    82     'run_docstring_examples',
    83     # 7. Tester
    84     'Tester',
    85     # 8. Unittest Support
    86     'DocTestSuite',
    87     'DocFileSuite',
    88     'set_unittest_reportflags',
    89     # 9. Debugging Support
    90     'script_from_examples',
    91     'testsource',
    92     'debug_src',
    93     'debug',
    94 ]
    95 
    96 import __future__
    97 
    98 import sys, traceback, inspect, linecache, os, re
    99 import unittest, difflib, pdb, tempfile
    100 import warnings
    101 from StringIO import StringIO
    102 
    103 # There are 4 basic classes:
    104 #  - Example: a <source, want> pair, plus an intra-docstring line number.
    105 #  - DocTest: a collection of examples, parsed from a docstring, plus
    106 #    info about where the docstring came from (name, filename, lineno).
    107 #  - DocTestFinder: extracts DocTests from a given object's docstring and
    108 #    its contained objects' docstrings.
    109 #  - DocTestRunner: runs DocTest cases, and accumulates statistics.
    110 #
    111 # So the basic picture is:
    112 #
    113 #                             list of:
    114 # +------+                   +---------+                   +-------+
    115 # |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
    116 # +------+                   +---------+                   +-------+
    117 #                            | Example |
    118 #                            |   ...   |
    119 #                            | Example |
    120 #                            +---------+
    121 
    122 # Option constants.
    123 
    124 OPTIONFLAGS_BY_NAME = {}
    125 def register_optionflag(name):
    126     # Create a new flag unless `name` is already known.
    127     return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
    128 
    129 DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
    130 DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
    131 NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
    132 ELLIPSIS = register_optionflag('ELLIPSIS')
    133 SKIP = register_optionflag('SKIP')
    134 IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
    135 
    136 COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
    137                     DONT_ACCEPT_BLANKLINE |
    138                     NORMALIZE_WHITESPACE |
    139                     ELLIPSIS |
    140                     SKIP |
    141                     IGNORE_EXCEPTION_DETAIL)
    142 
    143 REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
    144 REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
    145 REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
    146 REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
    147 
    148 REPORTING_FLAGS = (REPORT_UDIFF |
    149                    REPORT_CDIFF |
    150                    REPORT_NDIFF |
    151                    REPORT_ONLY_FIRST_FAILURE)
    152 
    153 # Special string markers for use in `want` strings:
    154 BLANKLINE_MARKER = '<BLANKLINE>'
    155 ELLIPSIS_MARKER = '...'
    156 
    157 ######################################################################
    158 ## Table of Contents
    159 ######################################################################
    160 #  1. Utility Functions
    161 #  2. Example & DocTest -- store test cases
    162 #  3. DocTest Parser -- extracts examples from strings
    163 #  4. DocTest Finder -- extracts test cases from objects
    164 #  5. DocTest Runner -- runs test cases
    165 #  6. Test Functions -- convenient wrappers for testing
    166 #  7. Tester Class -- for backwards compatibility
    167 #  8. Unittest Support
    168 #  9. Debugging Support
    169 # 10. Example Usage
    170 
    171 ######################################################################
    172 ## 1. Utility Functions
    173 ######################################################################
    174 
    175 def _extract_future_flags(globs):
    176     """
    177     Return the compiler-flags associated with the future features that
    178     have been imported into the given namespace (globs).
    179     """
    180     flags = 0
    181     for fname in __future__.all_feature_names:
    182         feature = globs.get(fname, None)
    183         if feature is getattr(__future__, fname):
    184             flags |= feature.compiler_flag
    185     return flags
    186 
    187 def _normalize_module(module, depth=2):
    188     """
    189     Return the module specified by `module`.  In particular:
    190       - If `module` is a module, then return module.
    191       - If `module` is a string, then import and return the
    192         module with that name.
    193       - If `module` is None, then return the calling module.
    194         The calling module is assumed to be the module of
    195         the stack frame at the given depth in the call stack.
    196     """
    197     if inspect.ismodule(module):
    198         return module
    199     elif isinstance(module, (str, unicode)):
    200         return __import__(module, globals(), locals(), ["*"])
    201     elif module is None:
    202         return sys.modules[sys._getframe(depth).f_globals['__name__']]
    203     else:
    204         raise TypeError("Expected a module, string, or None")
    205 
    206 def _load_testfile(filename, package, module_relative):
    207     if module_relative:
    208         package = _normalize_module(package, 3)
    209         filename = _module_relative_path(package, filename)
    210         if hasattr(package, '__loader__'):
    211             if hasattr(package.__loader__, 'get_data'):
    212                 file_contents = package.__loader__.get_data(filename)
    213                 # get_data() opens files as 'rb', so one must do the equivalent
    214                 # conversion as universal newlines would do.
    215                 return file_contents.replace(os.linesep, '\n'), filename
    216     return open(filename).read(), filename
    217 
    218 def _indent(s, indent=4):
    219     """
    220     Add the given number of space characters to the beginning every
    221     non-blank line in `s`, and return the result.
    222     """
    223     # This regexp matches the start of non-blank lines:
    224     return re.sub('(?m)^(?!$)', indent*' ', s)
    225 
    226 def _exception_traceback(exc_info):
    227     """
    228     Return a string containing a traceback message for the given
    229     exc_info tuple (as returned by sys.exc_info()).
    230     """
    231     # Get a traceback message.
    232     excout = StringIO()
    233     exc_type, exc_val, exc_tb = exc_info
    234     traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
    235     return excout.getvalue()
    236 
    237 # Override some StringIO methods.
    238 class _SpoofOut(StringIO):
    239     def getvalue(self):
    240         result = StringIO.getvalue(self)
    241         # If anything at all was written, make sure there's a trailing
    242         # newline.  There's no way for the expected output to indicate
    243         # that a trailing newline is missing.
    244         if result and not result.endswith("\n"):
    245             result += "\n"
    246         # Prevent softspace from screwing up the next test case, in
    247         # case they used print with a trailing comma in an example.
    248         if hasattr(self, "softspace"):
    249             del self.softspace
    250         return result
    251 
    252     def truncate(self,   size=None):
    253         StringIO.truncate(self, size)
    254         if hasattr(self, "softspace"):
    255             del self.softspace
    256 
    257 # Worst-case linear-time ellipsis matching.
    258 def _ellipsis_match(want, got):
    259     """
    260     Essentially the only subtle case:
    261     >>> _ellipsis_match('aa...aa', 'aaa')
    262     False
    263     """
    264     if ELLIPSIS_MARKER not in want:
    265         return want == got
    266 
    267     # Find "the real" strings.
    268     ws = want.split(ELLIPSIS_MARKER)
    269     assert len(ws) >= 2
    270 
    271     # Deal with exact matches possibly needed at one or both ends.
    272     startpos, endpos = 0, len(got)
    273     w = ws[0]
    274     if w:   # starts with exact match
    275         if got.startswith(w):
    276             startpos = len(w)
    277             del ws[0]
    278         else:
    279             return False
    280     w = ws[-1]
    281     if w:   # ends with exact match
    282         if got.endswith(w):
    283             endpos -= len(w)
    284             del ws[-1]
    285         else:
    286             return False
    287 
    288     if startpos > endpos:
    289         # Exact end matches required more characters than we have, as in
    290         # _ellipsis_match('aa...aa', 'aaa')
    291         return False
    292 
    293     # For the rest, we only need to find the leftmost non-overlapping
    294     # match for each piece.  If there's no overall match that way alone,
    295     # there's no overall match period.
    296     for w in ws:
    297         # w may be '' at times, if there are consecutive ellipses, or
    298         # due to an ellipsis at the start or end of `want`.  That's OK.
    299         # Search for an empty string succeeds, and doesn't change startpos.
    300         startpos = got.find(w, startpos, endpos)
    301         if startpos < 0:
    302             return False
    303         startpos += len(w)
    304 
    305     return True
    306 
    307 def _comment_line(line):
    308     "Return a commented form of the given line"
    309     line = line.rstrip()
    310     if line:
    311         return '# '+line
    312     else:
    313         return '#'
    314 
    315 class _OutputRedirectingPdb(pdb.Pdb):
    316     """
    317     A specialized version of the python debugger that redirects stdout
    318     to a given stream when interacting with the user.  Stdout is *not*
    319     redirected when traced code is executed.
    320     """
    321     def __init__(self, out):
    322         self.__out = out
    323         self.__debugger_used = False
    324         pdb.Pdb.__init__(self, stdout=out)
    325 
    326     def set_trace(self, frame=None):
    327         self.__debugger_used = True
    328         if frame is None:
    329             frame = sys._getframe().f_back
    330         pdb.Pdb.set_trace(self, frame)
    331 
    332     def set_continue(self):
    333         # Calling set_continue unconditionally would break unit test
    334         # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
    335         if self.__debugger_used:
    336             pdb.Pdb.set_continue(self)
    337 
    338     def trace_dispatch(self, *args):
    339         # Redirect stdout to the given stream.
    340         save_stdout = sys.stdout
    341         sys.stdout = self.__out
    342         # Call Pdb's trace dispatch method.
    343         try:
    344             return pdb.Pdb.trace_dispatch(self, *args)
    345         finally:
    346             sys.stdout = save_stdout
    347 
    348 # [XX] Normalize with respect to os.path.pardir?
    349 def _module_relative_path(module, path):
    350     if not inspect.ismodule(module):
    351         raise TypeError, 'Expected a module: %r' % module
    352     if path.startswith('/'):
    353         raise ValueError, 'Module-relative files may not have absolute paths'
    354 
    355     # Find the base directory for the path.
    356     if hasattr(module, '__file__'):
    357         # A normal module/package
    358         basedir = os.path.split(module.__file__)[0]
    359     elif module.__name__ == '__main__':
    360         # An interactive session.
    361         if len(sys.argv)>0 and sys.argv[0] != '':
    362             basedir = os.path.split(sys.argv[0])[0]
    363         else:
    364             basedir = os.curdir
    365     else:
    366         # A module w/o __file__ (this includes builtins)
    367         raise ValueError("Can't resolve paths relative to the module " +
    368                          module + " (it has no __file__)")
    369 
    370     # Combine the base directory and the path.
    371     return os.path.join(basedir, *(path.split('/')))
    372 
    373 ######################################################################
    374 ## 2. Example & DocTest
    375 ######################################################################
    376 ## - An "example" is a <source, want> pair, where "source" is a
    377 ##   fragment of source code, and "want" is the expected output for
    378 ##   "source."  The Example class also includes information about
    379 ##   where the example was extracted from.
    380 ##
    381 ## - A "doctest" is a collection of examples, typically extracted from
    382 ##   a string (such as an object's docstring).  The DocTest class also
    383 ##   includes information about where the string was extracted from.
    384 
    385 class Example:
    386     """
    387     A single doctest example, consisting of source code and expected
    388     output.  `Example` defines the following attributes:
    389 
    390       - source: A single Python statement, always ending with a newline.
    391         The constructor adds a newline if needed.
    392 
    393       - want: The expected output from running the source code (either
    394         from stdout, or a traceback in case of exception).  `want` ends
    395         with a newline unless it's empty, in which case it's an empty
    396         string.  The constructor adds a newline if needed.
    397 
    398       - exc_msg: The exception message generated by the example, if
    399         the example is expected to generate an exception; or `None` if
    400         it is not expected to generate an exception.  This exception
    401         message is compared against the return value of
    402         `traceback.format_exception_only()`.  `exc_msg` ends with a
    403         newline unless it's `None`.  The constructor adds a newline
    404         if needed.
    405 
    406       - lineno: The line number within the DocTest string containing
    407         this Example where the Example begins.  This line number is
    408         zero-based, with respect to the beginning of the DocTest.
    409 
    410       - indent: The example's indentation in the DocTest string.
    411         I.e., the number of space characters that preceed the
    412         example's first prompt.
    413 
    414       - options: A dictionary mapping from option flags to True or
    415         False, which is used to override default options for this
    416         example.  Any option flags not contained in this dictionary
    417         are left at their default value (as specified by the
    418         DocTestRunner's optionflags).  By default, no options are set.
    419     """
    420     def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
    421                  options=None):
    422         # Normalize inputs.
    423         if not source.endswith('\n'):
    424             source += '\n'
    425         if want and not want.endswith('\n'):
    426             want += '\n'
    427         if exc_msg is not None and not exc_msg.endswith('\n'):
    428             exc_msg += '\n'
    429         # Store properties.
    430         self.source = source
    431         self.want = want
    432         self.lineno = lineno
    433         self.indent = indent
    434         if options is None: options = {}
    435         self.options = options
    436         self.exc_msg = exc_msg
    437 
    438 class DocTest:
    439     """
    440     A collection of doctest examples that should be run in a single
    441     namespace.  Each `DocTest` defines the following attributes:
    442 
    443       - examples: the list of examples.
    444 
    445       - globs: The namespace (aka globals) that the examples should
    446         be run in.
    447 
    448       - name: A name identifying the DocTest (typically, the name of
    449         the object whose docstring this DocTest was extracted from).
    450 
    451       - filename: The name of the file that this DocTest was extracted
    452         from, or `None` if the filename is unknown.
    453 
    454       - lineno: The line number within filename where this DocTest
    455         begins, or `None` if the line number is unavailable.  This
    456         line number is zero-based, with respect to the beginning of
    457         the file.
    458 
    459       - docstring: The string that the examples were extracted from,
    460         or `None` if the string is unavailable.
    461     """
    462     def __init__(self, examples, globs, name, filename, lineno, docstring):
    463         """
    464         Create a new DocTest containing the given examples.  The
    465         DocTest's globals are initialized with a copy of `globs`.
    466         """
    467         assert not isinstance(examples, basestring), \
    468                "DocTest no longer accepts str; use DocTestParser instead"
    469         self.examples = examples
    470         self.docstring = docstring
    471         self.globs = globs.copy()
    472         self.name = name
    473         self.filename = filename
    474         self.lineno = lineno
    475 
    476     def __repr__(self):
    477         if len(self.examples) == 0:
    478             examples = 'no examples'
    479         elif len(self.examples) == 1:
    480             examples = '1 example'
    481         else:
    482             examples = '%d examples' % len(self.examples)
    483         return ('<DocTest %s from %s:%s (%s)>' %
    484                 (self.name, self.filename, self.lineno, examples))
    485 
    486 
    487     # This lets us sort tests by name:
    488     def __cmp__(self, other):
    489         if not isinstance(other, DocTest):
    490             return -1
    491         return cmp((self.name, self.filename, self.lineno, id(self)),
    492                    (other.name, other.filename, other.lineno, id(other)))
    493 
    494 ######################################################################
    495 ## 3. DocTestParser
    496 ######################################################################
    497 
    498 class DocTestParser:
    499     """
    500     A class used to parse strings containing doctest examples.
    501     """
    502     # This regular expression is used to find doctest examples in a
    503     # string.  It defines three groups: `source` is the source code
    504     # (including leading indentation and prompts); `indent` is the
    505     # indentation of the first (PS1) line of the source code; and
    506     # `want` is the expected output (including leading indentation).
    507     _EXAMPLE_RE = re.compile(r'''
    508         # Source consists of a PS1 line followed by zero or more PS2 lines.
    509         (?P<source>
    510             (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
    511             (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
    512         \n?
    513         # Want consists of any non-blank lines that do not start with PS1.
    514         (?P<want> (?:(?![ ]*$)    # Not a blank line
    515                      (?![ ]*>>>)  # Not a line starting with PS1
    516                      .*$\n?       # But any other line
    517                   )*)
    518         ''', re.MULTILINE | re.VERBOSE)
    519 
    520     # A regular expression for handling `want` strings that contain
    521     # expected exceptions.  It divides `want` into three pieces:
    522     #    - the traceback header line (`hdr`)
    523     #    - the traceback stack (`stack`)
    524     #    - the exception message (`msg`), as generated by
    525     #      traceback.format_exception_only()
    526     # `msg` may have multiple lines.  We assume/require that the
    527     # exception message is the first non-indented line starting with a word
    528     # character following the traceback header line.
    529     _EXCEPTION_RE = re.compile(r"""
    530         # Grab the traceback header.  Different versions of Python have
    531         # said different things on the first traceback line.
    532         ^(?P<hdr> Traceback\ \(
    533             (?: most\ recent\ call\ last
    534             |   innermost\ last
    535             ) \) :
    536         )
    537         \s* $                # toss trailing whitespace on the header.
    538         (?P<stack> .*?)      # don't blink: absorb stuff until...
    539         ^ (?P<msg> \w+ .*)   #     a line *starts* with alphanum.
    540         """, re.VERBOSE | re.MULTILINE | re.DOTALL)
    541 
    542     # A callable returning a true value iff its argument is a blank line
    543     # or contains a single comment.
    544     _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
    545 
    546     def parse(self, string, name='<string>'):
    547         """
    548         Divide the given string into examples and intervening text,
    549         and return them as a list of alternating Examples and strings.
    550         Line numbers for the Examples are 0-based.  The optional
    551         argument `name` is a name identifying this string, and is only
    552         used for error messages.
    553         """
    554         string = string.expandtabs()
    555         # If all lines begin with the same indentation, then strip it.
    556         min_indent = self._min_indent(string)
    557         if min_indent > 0:
    558             string = '\n'.join([l[min_indent:] for l in string.split('\n')])
    559 
    560         output = []
    561         charno, lineno = 0, 0
    562         # Find all doctest examples in the string:
    563         for m in self._EXAMPLE_RE.finditer(string):
    564             # Add the pre-example text to `output`.
    565             output.append(string[charno:m.start()])
    566             # Update lineno (lines before this example)
    567             lineno += string.count('\n', charno, m.start())
    568             # Extract info from the regexp match.
    569             (source, options, want, exc_msg) = \
    570                      self._parse_example(m, name, lineno)
    571             # Create an Example, and add it to the list.
    572             if not self._IS_BLANK_OR_COMMENT(source):
    573                 output.append( Example(source, want, exc_msg,
    574                                     lineno=lineno,
    575                                     indent=min_indent+len(m.group('indent')),
    576                                     options=options) )
    577             # Update lineno (lines inside this example)
    578             lineno += string.count('\n', m.start(), m.end())
    579             # Update charno.
    580             charno = m.end()
    581         # Add any remaining post-example text to `output`.
    582         output.append(string[charno:])
    583         return output
    584 
    585     def get_doctest(self, string, globs, name, filename, lineno):
    586         """
    587         Extract all doctest examples from the given string, and
    588         collect them into a `DocTest` object.
    589 
    590         `globs`, `name`, `filename`, and `lineno` are attributes for
    591         the new `DocTest` object.  See the documentation for `DocTest`
    592         for more information.
    593         """
    594         return DocTest(self.get_examples(string, name), globs,
    595                        name, filename, lineno, string)
    596 
    597     def get_examples(self, string, name='<string>'):
    598         """
    599         Extract all doctest examples from the given string, and return
    600         them as a list of `Example` objects.  Line numbers are
    601         0-based, because it's most common in doctests that nothing
    602         interesting appears on the same line as opening triple-quote,
    603         and so the first interesting line is called \"line 1\" then.
    604 
    605         The optional argument `name` is a name identifying this
    606         string, and is only used for error messages.
    607         """
    608         return [x for x in self.parse(string, name)
    609                 if isinstance(x, Example)]
    610 
    611     def _parse_example(self, m, name, lineno):
    612         """
    613         Given a regular expression match from `_EXAMPLE_RE` (`m`),
    614         return a pair `(source, want)`, where `source` is the matched
    615         example's source code (with prompts and indentation stripped);
    616         and `want` is the example's expected output (with indentation
    617         stripped).
    618 
    619         `name` is the string's name, and `lineno` is the line number
    620         where the example starts; both are used for error messages.
    621         """
    622         # Get the example's indentation level.
    623         indent = len(m.group('indent'))
    624 
    625         # Divide source into lines; check that they're properly
    626         # indented; and then strip their indentation & prompts.
    627         source_lines = m.group('source').split('\n')
    628         self._check_prompt_blank(source_lines, indent, name, lineno)
    629         self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
    630         source = '\n'.join([sl[indent+4:] for sl in source_lines])
    631 
    632         # Divide want into lines; check that it's properly indented; and
    633         # then strip the indentation.  Spaces before the last newline should
    634         # be preserved, so plain rstrip() isn't good enough.
    635         want = m.group('want')
    636         want_lines = want.split('\n')
    637         if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
    638             del want_lines[-1]  # forget final newline & spaces after it
    639         self._check_prefix(want_lines, ' '*indent, name,
    640                            lineno + len(source_lines))
    641         want = '\n'.join([wl[indent:] for wl in want_lines])
    642 
    643         # If `want` contains a traceback message, then extract it.
    644         m = self._EXCEPTION_RE.match(want)
    645         if m:
    646             exc_msg = m.group('msg')
    647         else:
    648             exc_msg = None
    649 
    650         # Extract options from the source.
    651         options = self._find_options(source, name, lineno)
    652 
    653         return source, options, want, exc_msg
    654 
    655     # This regular expression looks for option directives in the
    656     # source code of an example.  Option directives are comments
    657     # starting with "doctest:".  Warning: this may give false
    658     # positives for string-literals that contain the string
    659     # "#doctest:".  Eliminating these false positives would require
    660     # actually parsing the string; but we limit them by ignoring any
    661     # line containing "#doctest:" that is *followed* by a quote mark.
    662     _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
    663                                       re.MULTILINE)
    664 
    665     def _find_options(self, source, name, lineno):
    666         """
    667         Return a dictionary containing option overrides extracted from
    668         option directives in the given source string.
    669 
    670         `name` is the string's name, and `lineno` is the line number
    671         where the example starts; both are used for error messages.
    672         """
    673         options = {}
    674         # (note: with the current regexp, this will match at most once:)
    675         for m in self._OPTION_DIRECTIVE_RE.finditer(source):
    676             option_strings = m.group(1).replace(',', ' ').split()
    677             for option in option_strings:
    678                 if (option[0] not in '+-' or
    679                     option[1:] not in OPTIONFLAGS_BY_NAME):
    680                     raise ValueError('line %r of the doctest for %s '
    681                                      'has an invalid option: %r' %
    682                                      (lineno+1, name, option))
    683                 flag = OPTIONFLAGS_BY_NAME[option[1:]]
    684                 options[flag] = (option[0] == '+')
    685         if options and self._IS_BLANK_OR_COMMENT(source):
    686             raise ValueError('line %r of the doctest for %s has an option '
    687                              'directive on a line with no example: %r' %
    688                              (lineno, name, source))
    689         return options
    690 
    691     # This regular expression finds the indentation of every non-blank
    692     # line in a string.
    693     _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
    694 
    695     def _min_indent(self, s):
    696         "Return the minimum indentation of any non-blank line in `s`"
    697         indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
    698         if len(indents) > 0:
    699             return min(indents)
    700         else:
    701             return 0
    702 
    703     def _check_prompt_blank(self, lines, indent, name, lineno):
    704         """
    705         Given the lines of a source string (including prompts and
    706         leading indentation), check to make sure that every prompt is
    707         followed by a space character.  If any line is not followed by
    708         a space character, then raise ValueError.
    709         """
    710         for i, line in enumerate(lines):
    711             if len(line) >= indent+4 and line[indent+3] != ' ':
    712                 raise ValueError('line %r of the docstring for %s '
    713                                  'lacks blank after %s: %r' %
    714                                  (lineno+i+1, name,
    715                                   line[indent:indent+3], line))
    716 
    717     def _check_prefix(self, lines, prefix, name, lineno):
    718         """
    719         Check that every line in the given list starts with the given
    720         prefix; if any line does not, then raise a ValueError.
    721         """
    722         for i, line in enumerate(lines):
    723             if line and not line.startswith(prefix):
    724                 raise ValueError('line %r of the docstring for %s has '
    725                                  'inconsistent leading whitespace: %r' %
    726                                  (lineno+i+1, name, line))
    727 
    728 
    729 ######################################################################
    730 ## 4. DocTest Finder
    731 ######################################################################
    732 
    733 class DocTestFinder:
    734     """
    735     A class used to extract the DocTests that are relevant to a given
    736     object, from its docstring and the docstrings of its contained
    737     objects.  Doctests can currently be extracted from the following
    738     object types: modules, functions, classes, methods, staticmethods,
    739     classmethods, and properties.
    740     """
    741 
    742     def __init__(self, verbose=False, parser=DocTestParser(),
    743                  recurse=True, exclude_empty=True):
    744         """
    745         Create a new doctest finder.
    746 
    747         The optional argument `parser` specifies a class or
    748         function that should be used to create new DocTest objects (or
    749         objects that implement the same interface as DocTest).  The
    750         signature for this factory function should match the signature
    751         of the DocTest constructor.
    752 
    753         If the optional argument `recurse` is false, then `find` will
    754         only examine the given object, and not any contained objects.
    755 
    756         If the optional argument `exclude_empty` is false, then `find`
    757         will include tests for objects with empty docstrings.
    758         """
    759         self._parser = parser
    760         self._verbose = verbose
    761         self._recurse = recurse
    762         self._exclude_empty = exclude_empty
    763 
    764     def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
    765         """
    766         Return a list of the DocTests that are defined by the given
    767         object's docstring, or by any of its contained objects'
    768         docstrings.
    769 
    770         The optional parameter `module` is the module that contains
    771         the given object.  If the module is not specified or is None, then
    772         the test finder will attempt to automatically determine the
    773         correct module.  The object's module is used:
    774 
    775             - As a default namespace, if `globs` is not specified.
    776             - To prevent the DocTestFinder from extracting DocTests
    777               from objects that are imported from other modules.
    778             - To find the name of the file containing the object.
    779             - To help find the line number of the object within its
    780               file.
    781 
    782         Contained objects whose module does not match `module` are ignored.
    783 
    784         If `module` is False, no attempt to find the module will be made.
    785         This is obscure, of use mostly in tests:  if `module` is False, or
    786         is None but cannot be found automatically, then all objects are
    787         considered to belong to the (non-existent) module, so all contained
    788         objects will (recursively) be searched for doctests.
    789 
    790         The globals for each DocTest is formed by combining `globs`
    791         and `extraglobs` (bindings in `extraglobs` override bindings
    792         in `globs`).  A new copy of the globals dictionary is created
    793         for each DocTest.  If `globs` is not specified, then it
    794         defaults to the module's `__dict__`, if specified, or {}
    795         otherwise.  If `extraglobs` is not specified, then it defaults
    796         to {}.
    797 
    798         """
    799         # If name was not specified, then extract it from the object.
    800         if name is None:
    801             name = getattr(obj, '__name__', None)
    802             if name is None:
    803                 raise ValueError("DocTestFinder.find: name must be given "
    804                         "when obj.__name__ doesn't exist: %r" %
    805                                  (type(obj),))
    806 
    807         # Find the module that contains the given object (if obj is
    808         # a module, then module=obj.).  Note: this may fail, in which
    809         # case module will be None.
    810         if module is False:
    811             module = None
    812         elif module is None:
    813             module = inspect.getmodule(obj)
    814 
    815         # Read the module's source code.  This is used by
    816         # DocTestFinder._find_lineno to find the line number for a
    817         # given object's docstring.
    818         try:
    819             file = inspect.getsourcefile(obj) or inspect.getfile(obj)
    820             source_lines = linecache.getlines(file)
    821             if not source_lines:
    822                 source_lines = None
    823         except TypeError:
    824             source_lines = None
    825 
    826         # Initialize globals, and merge in extraglobs.
    827         if globs is None:
    828             if module is None:
    829                 globs = {}
    830             else:
    831                 globs = module.__dict__.copy()
    832         else:
    833             globs = globs.copy()
    834         if extraglobs is not None:
    835             globs.update(extraglobs)
    836 
    837         # Recursively expore `obj`, extracting DocTests.
    838         tests = []
    839         self._find(tests, obj, name, module, source_lines, globs, {})
    840         # Sort the tests by alpha order of names, for consistency in
    841         # verbose-mode output.  This was a feature of doctest in Pythons
    842         # <= 2.3 that got lost by accident in 2.4.  It was repaired in
    843         # 2.4.4 and 2.5.
    844         tests.sort()
    845         return tests
    846 
    847     def _from_module(self, module, object):
    848         """
    849         Return true if the given object is defined in the given
    850         module.
    851         """
    852         if module is None:
    853             return True
    854         elif inspect.isfunction(object):
    855             return module.__dict__ is object.func_globals
    856         elif inspect.isclass(object):
    857             return module.__name__ == object.__module__
    858         elif inspect.getmodule(object) is not None:
    859             return module is inspect.getmodule(object)
    860         elif hasattr(object, '__module__'):
    861             return module.__name__ == object.__module__
    862         elif isinstance(object, property):
    863             return True # [XX] no way not be sure.
    864         else:
    865             raise ValueError("object must be a class or function")
    866 
    867     def _find(self, tests, obj, name, module, source_lines, globs, seen):
    868         """
    869         Find tests for the given object and any contained objects, and
    870         add them to `tests`.
    871         """
    872         if self._verbose:
    873             print 'Finding tests in %s' % name
    874 
    875         # If we've already processed this object, then ignore it.
    876         if id(obj) in seen:
    877             return
    878         seen[id(obj)] = 1
    879 
    880         # Find a test for this object, and add it to the list of tests.
    881         test = self._get_test(obj, name, module, globs, source_lines)
    882         if test is not None:
    883             tests.append(test)
    884 
    885         # Look for tests in a module's contained objects.
    886         if inspect.ismodule(obj) and self._recurse:
    887             for valname, val in obj.__dict__.items():
    888                 valname = '%s.%s' % (name, valname)
    889                 # Recurse to functions & classes.
    890                 if ((inspect.isfunction(val) or inspect.isclass(val)) and
    891                     self._from_module(module, val)):
    892                     self._find(tests, val, valname, module, source_lines,
    893                                globs, seen)
    894 
    895         # Look for tests in a module's __test__ dictionary.
    896         if inspect.ismodule(obj) and self._recurse:
    897             for valname, val in getattr(obj, '__test__', {}).items():
    898                 if not isinstance(valname, basestring):
    899                     raise ValueError("DocTestFinder.find: __test__ keys "
    900                                      "must be strings: %r" %
    901                                      (type(valname),))
    902                 if not (inspect.isfunction(val) or inspect.isclass(val) or
    903                         inspect.ismethod(val) or inspect.ismodule(val) or
    904                         isinstance(val, basestring)):
    905                     raise ValueError("DocTestFinder.find: __test__ values "
    906                                      "must be strings, functions, methods, "
    907                                      "classes, or modules: %r" %
    908                                      (type(val),))
    909                 valname = '%s.__test__.%s' % (name, valname)
    910                 self._find(tests, val, valname, module, source_lines,
    911                            globs, seen)
    912 
    913         # Look for tests in a class's contained objects.
    914         if inspect.isclass(obj) and self._recurse:
    915             for valname, val in obj.__dict__.items():
    916                 # Special handling for staticmethod/classmethod.
    917                 if isinstance(val, staticmethod):
    918                     val = getattr(obj, valname)
    919                 if isinstance(val, classmethod):
    920                     val = getattr(obj, valname).im_func
    921 
    922                 # Recurse to methods, properties, and nested classes.
    923                 if ((inspect.isfunction(val) or inspect.isclass(val) or
    924                       isinstance(val, property)) and
    925                       self._from_module(module, val)):
    926                     valname = '%s.%s' % (name, valname)
    927                     self._find(tests, val, valname, module, source_lines,
    928                                globs, seen)
    929 
    930     def _get_test(self, obj, name, module, globs, source_lines):
    931         """
    932         Return a DocTest for the given object, if it defines a docstring;
    933         otherwise, return None.
    934         """
    935         # Extract the object's docstring.  If it doesn't have one,
    936         # then return None (no test for this object).
    937         if isinstance(obj, basestring):
    938             docstring = obj
    939         else:
    940             try:
    941                 if obj.__doc__ is None:
    942                     docstring = ''
    943                 else:
    944                     docstring = obj.__doc__
    945                     if not isinstance(docstring, basestring):
    946                         docstring = str(docstring)
    947             except (TypeError, AttributeError):
    948                 docstring = ''
    949 
    950         # Find the docstring's location in the file.
    951         lineno = self._find_lineno(obj, source_lines)
    952 
    953         # Don't bother if the docstring is empty.
    954         if self._exclude_empty and not docstring:
    955             return None
    956 
    957         # Return a DocTest for this object.
    958         if module is None:
    959             filename = None
    960         else:
    961             filename = getattr(module, '__file__', module.__name__)
    962             if filename[-4:] in (".pyc", ".pyo"):
    963                 filename = filename[:-1]
    964         return self._parser.get_doctest(docstring, globs, name,
    965                                         filename, lineno)
    966 
    967     def _find_lineno(self, obj, source_lines):
    968         """
    969         Return a line number of the given object's docstring.  Note:
    970         this method assumes that the object has a docstring.
    971         """
    972         lineno = None
    973 
    974         # Find the line number for modules.
    975         if inspect.ismodule(obj):
    976             lineno = 0
    977 
    978         # Find the line number for classes.
    979         # Note: this could be fooled if a class is defined multiple
    980         # times in a single file.
    981         if inspect.isclass(obj):
    982             if source_lines is None:
    983                 return None
    984             pat = re.compile(r'^\s*class\s*%s\b' %
    985                              getattr(obj, '__name__', '-'))
    986             for i, line in enumerate(source_lines):
    987                 if pat.match(line):
    988                     lineno = i
    989                     break
    990 
    991         # Find the line number for functions & methods.
    992         if inspect.ismethod(obj): obj = obj.im_func
    993         if inspect.isfunction(obj): obj = obj.func_code
    994         if inspect.istraceback(obj): obj = obj.tb_frame
    995         if inspect.isframe(obj): obj = obj.f_code
    996         if inspect.iscode(obj):
    997             lineno = getattr(obj, 'co_firstlineno', None)-1
    998 
    999         # Find the line number where the docstring starts.  Assume
    1000         # that it's the first line that begins with a quote mark.
    1001         # Note: this could be fooled by a multiline function
    1002         # signature, where a continuation line begins with a quote
    1003         # mark.
    1004         if lineno is not None:
    1005             if source_lines is None:
    1006                 return lineno+1
    1007             pat = re.compile('(^|.*:)\s*\w*("|\')')
    1008             for lineno in range(lineno, len(source_lines)):
    1009                 if pat.match(source_lines[lineno]):
    1010                     return lineno
    1011 
    1012         # We couldn't find the line number.
    1013         return None
    1014 
    1015 ######################################################################
    1016 ## 5. DocTest Runner
    1017 ######################################################################
    1018 
    1019 class DocTestRunner:
    1020     """
    1021     A class used to run DocTest test cases, and accumulate statistics.
    1022     The `run` method is used to process a single DocTest case.  It
    1023     returns a tuple `(f, t)`, where `t` is the number of test cases
    1024     tried, and `f` is the number of test cases that failed.
    1025 
    1026         >>> tests = DocTestFinder().find(_TestClass)
    1027         >>> runner = DocTestRunner(verbose=False)
    1028         >>> tests.sort(key = lambda test: test.name)
    1029         >>> for test in tests:
    1030         ...     print test.name, '->', runner.run(test)
    1031         _TestClass -> (0, 2)
    1032         _TestClass.__init__ -> (0, 2)
    1033         _TestClass.get -> (0, 2)
    1034         _TestClass.square -> (0, 1)
    1035 
    1036     The `summarize` method prints a summary of all the test cases that
    1037     have been run by the runner, and returns an aggregated `(f, t)`
    1038     tuple:
    1039 
    1040         >>> runner.summarize(verbose=1)
    1041         4 items passed all tests:
    1042            2 tests in _TestClass
    1043            2 tests in _TestClass.__init__
    1044            2 tests in _TestClass.get
    1045            1 tests in _TestClass.square
    1046         7 tests in 4 items.
    1047         7 passed and 0 failed.
    1048         Test passed.
    1049         (0, 7)
    1050 
    1051     The aggregated number of tried examples and failed examples is
    1052     also available via the `tries` and `failures` attributes:
    1053 
    1054         >>> runner.tries
    1055         7
    1056         >>> runner.failures
    1057         0
    1058 
    1059     The comparison between expected outputs and actual outputs is done
    1060     by an `OutputChecker`.  This comparison may be customized with a
    1061     number of option flags; see the documentation for `testmod` for
    1062     more information.  If the option flags are insufficient, then the
    1063     comparison may also be customized by passing a subclass of
    1064     `OutputChecker` to the constructor.
    1065 
    1066     The test runner's display output can be controlled in two ways.
    1067     First, an output function (`out) can be passed to
    1068     `TestRunner.run`; this function will be called with strings that
    1069     should be displayed.  It defaults to `sys.stdout.write`.  If
    1070     capturing the output is not sufficient, then the display output
    1071     can be also customized by subclassing DocTestRunner, and
    1072     overriding the methods `report_start`, `report_success`,
    1073     `report_unexpected_exception`, and `report_failure`.
    1074     """
    1075     # This divider string is used to separate failure messages, and to
    1076     # separate sections of the summary.
    1077     DIVIDER = "*" * 70
    1078 
    1079     def __init__(self, checker=None, verbose=None, optionflags=0):
    1080         """
    1081         Create a new test runner.
    1082 
    1083         Optional keyword arg `checker` is the `OutputChecker` that
    1084         should be used to compare the expected outputs and actual
    1085         outputs of doctest examples.
    1086 
    1087         Optional keyword arg 'verbose' prints lots of stuff if true,
    1088         only failures if false; by default, it's true iff '-v' is in
    1089         sys.argv.
    1090 
    1091         Optional argument `optionflags` can be used to control how the
    1092         test runner compares expected output to actual output, and how
    1093         it displays failures.  See the documentation for `testmod` for
    1094         more information.
    1095         """
    1096         self._checker = checker or OutputChecker()
    1097         if verbose is None:
    1098             verbose = '-v' in sys.argv
    1099         self._verbose = verbose
    1100         self.optionflags = optionflags
    1101         self.original_optionflags = optionflags
    1102 
    1103         # Keep track of the examples we've run.
    1104         self.tries = 0
    1105         self.failures = 0
    1106         self._name2ft = {}
    1107 
    1108         # Create a fake output target for capturing doctest output.
    1109         self._fakeout = _SpoofOut()
    1110 
    1111     #/////////////////////////////////////////////////////////////////
    1112     # Reporting methods
    1113     #/////////////////////////////////////////////////////////////////
    1114 
    1115     def report_start(self, out, test, example):
    1116         """
    1117         Report that the test runner is about to process the given
    1118         example.  (Only displays a message if verbose=True)
    1119         """
    1120         if self._verbose:
    1121             if example.want:
    1122                 out('Trying:\n' + _indent(example.source) +
    1123                     'Expecting:\n' + _indent(example.want))
    1124             else:
    1125                 out('Trying:\n' + _indent(example.source) +
    1126                     'Expecting nothing\n')
    1127 
    1128     def report_success(self, out, test, example, got):
    1129         """
    1130         Report that the given example ran successfully.  (Only
    1131         displays a message if verbose=True)
    1132         """
    1133         if self._verbose:
    1134             out("ok\n")
    1135 
    1136     def report_failure(self, out, test, example, got):
    1137         """
    1138         Report that the given example failed.
    1139         """
    1140         out(self._failure_header(test, example) +
    1141             self._checker.output_difference(example, got, self.optionflags))
    1142 
    1143     def report_unexpected_exception(self, out, test, example, exc_info):
    1144         """
    1145         Report that the given example raised an unexpected exception.
    1146         """
    1147         out(self._failure_header(test, example) +
    1148             'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
    1149 
    1150     def _failure_header(self, test, example):
    1151         out = [self.DIVIDER]
    1152         if test.filename:
    1153             if test.lineno is not None and example.lineno is not None:
    1154                 lineno = test.lineno + example.lineno + 1
    1155             else:
    1156                 lineno = '?'
    1157             out.append('File "%s", line %s, in %s' %
    1158                        (test.filename, lineno, test.name))
    1159         else:
    1160             out.append('Line %s, in %s' % (example.lineno+1, test.name))
    1161         out.append('Failed example:')
    1162         source = example.source
    1163         out.append(_indent(source))
    1164         return '\n'.join(out)
    1165 
    1166     #/////////////////////////////////////////////////////////////////
    1167     # DocTest Running
    1168     #/////////////////////////////////////////////////////////////////
    1169 
    1170     def run_one_example(self, test, example, filename, compileflags):
    1171         exec compile(example.source, filename, "single",
    1172                      compileflags, 1) in test.globs
    1173 
    1174     def run_one_test(self, test, compileflags, out):
    1175         """
    1176         Run the examples in `test`.  Write the outcome of each example
    1177         with one of the `DocTestRunner.report_*` methods, using the
    1178         writer function `out`.  `compileflags` is the set of compiler
    1179         flags that should be used to execute examples.  Return a tuple
    1180         `(f, t)`, where `t` is the number of examples tried, and `f`
    1181         is the number of examples that failed.  The examples are run
    1182         in the namespace `test.globs`.
    1183         """
    1184         # Keep track of the number of failures and tries.
    1185         failures = tries = 0
    1186 
    1187         # Save the option flags (since option directives can be used
    1188         # to modify them).
    1189         original_optionflags = self.optionflags
    1190 
    1191         SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
    1192 
    1193         check = self._checker.check_output
    1194 
    1195         # Process each example.
    1196         for examplenum, example in enumerate(test.examples):
    1197 
    1198             # If REPORT_ONLY_FIRST_FAILURE is set, then supress
    1199             # reporting after the first failure.
    1200             quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
    1201                      failures > 0)
    1202 
    1203             # Merge in the example's options.
    1204             self.optionflags = original_optionflags
    1205             if example.options:
    1206                 for (optionflag, val) in example.options.items():
    1207                     if val:
    1208                         self.optionflags |= optionflag
    1209                     else:
    1210                         self.optionflags &= ~optionflag
    1211 
    1212             # If 'SKIP' is set, then skip this example.
    1213             if self.optionflags & SKIP:
    1214                 continue
    1215 
    1216             # Record that we started this example.
    1217             tries += 1
    1218             if not quiet:
    1219                 self.report_start(out, test, example)
    1220 
    1221             # Use a special filename for compile(), so we can retrieve
    1222             # the source code during interactive debugging (see
    1223             # __patched_linecache_getlines).
    1224             filename = '<doctest %s[%d]>' % (test.name, examplenum)
    1225 
    1226             # Run the example in the given context (globs), and record
    1227             # any exception that gets raised.  (But don't intercept
    1228             # keyboard interrupts.)
    1229             try:
    1230                 # Don't blink!  This is where the user's code gets run.
    1231                 self.run_one_example(test, example, filename, compileflags)
    1232                 self.debugger.set_continue() # ==== Example Finished ====
    1233                 exception = None
    1234             except KeyboardInterrupt:
    1235                 raise
    1236             except:
    1237                 exception = sys.exc_info()
    1238                 self.debugger.set_continue() # ==== Example Finished ====
    1239 
    1240             got = self._fakeout.getvalue()  # the actual output
    1241             self._fakeout.truncate(0)
    1242             outcome = FAILURE   # guilty until proved innocent or insane
    1243 
    1244             # If the example executed without raising any exceptions,
    1245             # verify its output.
    1246             if exception is None:
    1247                 if check(example.want, got, self.optionflags):
    1248                     outcome = SUCCESS
    1249 
    1250             # The example raised an exception:  check if it was expected.
    1251             else:
    1252                 exc_info = sys.exc_info()
    1253                 exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
    1254                 if not quiet:
    1255                     got += _exception_traceback(exc_info)
    1256 
    1257                 # If `example.exc_msg` is None, then we weren't expecting
    1258                 # an exception.
    1259                 if example.exc_msg is None:
    1260                     outcome = BOOM
    1261 
    1262                 # We expected an exception:  see whether it matches.
    1263                 elif check(example.exc_msg, exc_msg, self.optionflags):
    1264                     outcome = SUCCESS
    1265 
    1266                 # Another chance if they didn't care about the detail.
    1267                 elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
    1268                     m1 = re.match(r'[^:]*:', example.exc_msg)
    1269                     m2 = re.match(r'[^:]*:', exc_msg)
    1270                     if m1 and m2 and check(m1.group(0), m2.group(0),
    1271                                            self.optionflags):
    1272                         outcome = SUCCESS
    1273 
    1274             # Report the outcome.
    1275             if outcome is SUCCESS:
    1276                 if not quiet:
    1277                     self.report_success(out, test, example, got)
    1278             elif outcome is FAILURE:
    1279                 if not quiet:
    1280                     self.report_failure(out, test, example, got)
    1281                 failures += 1
    1282             elif outcome is BOOM:
    1283                 if not quiet:
    1284                     self.report_unexpected_exception(out, test, example,
    1285                                                      exc_info)
    1286                 failures += 1
    1287             else:
    1288                 assert False, ("unknown outcome", outcome)
    1289 
    1290         # Restore the option flags (in case they were modified)
    1291         self.optionflags = original_optionflags
    1292 
    1293         # Record and return the number of failures and tries.
    1294         self.__record_outcome(test, failures, tries)
    1295         return failures, tries
    1296 
    1297     def __record_outcome(self, test, f, t):
    1298         """
    1299         Record the fact that the given DocTest (`test`) generated `f`
    1300         failures out of `t` tried examples.
    1301         """
    1302         f2, t2 = self._name2ft.get(test.name, (0,0))
    1303         self._name2ft[test.name] = (f+f2, t+t2)
    1304         self.failures += f
    1305         self.tries += t
    1306 
    1307     __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
    1308                                          r'(?P<name>[\w\.]+)'
    1309                                          r'\[(?P<examplenum>\d+)\]>$')
    1310     def __patched_linecache_getlines(self, filename, module_globals=None):
    1311         m = self.__LINECACHE_FILENAME_RE.match(filename)
    1312         if m and m.group('name') == self.test.name:
    1313             example = self.test.examples[int(m.group('examplenum'))]
    1314             return example.source.splitlines(True)
    1315         else:
    1316             return self.save_linecache_getlines(filename, module_globals)
    1317 
    1318     def run(self, test, compileflags=None, out=None, clear_globs=True):
    1319         """
    1320         Run the examples in `test`, and display the results using the
    1321         writer function `out`.
    1322 
    1323         The examples are run in the namespace `test.globs`.  If
    1324         `clear_globs` is true (the default), then this namespace will
    1325         be cleared after the test runs, to help with garbage
    1326         collection.  If you would like to examine the namespace after
    1327         the test completes, then use `clear_globs=False`.
    1328 
    1329         `compileflags` gives the set of flags that should be used by
    1330         the Python compiler when running the examples.  If not
    1331         specified, then it will default to the set of future-import
    1332         flags that apply to `globs`.
    1333 
    1334         The output of each example is checked using
    1335         `DocTestRunner.check_output`, and the results are formatted by
    1336         the `DocTestRunner.report_*` methods.
    1337         """
    1338         self.test = test
    1339 
    1340         if compileflags is None:
    1341             compileflags = _extract_future_flags(test.globs)
    1342 
    1343         save_stdout = sys.stdout
    1344         if out is None:
    1345             out = save_stdout.write
    1346         sys.stdout = self._fakeout
    1347 
    1348         # Patch pdb.set_trace to restore sys.stdout during interactive
    1349         # debugging (so it's not still redirected to self._fakeout).
    1350         # Note that the interactive output will go to *our*
    1351         # save_stdout, even if that's not the real sys.stdout; this
    1352         # allows us to write test cases for the set_trace behavior.
    1353         save_set_trace = pdb.set_trace
    1354         self.debugger = _OutputRedirectingPdb(save_stdout)
    1355         self.debugger.reset()
    1356         pdb.set_trace = self.debugger.set_trace
    1357 
    1358         # Patch linecache.getlines, so we can see the example's source
    1359         # when we're inside the debugger.
    1360         self.save_linecache_getlines = linecache.getlines
    1361         linecache.getlines = self.__patched_linecache_getlines
    1362 
    1363         try:
    1364             return self.run_one_test(test, compileflags, out)
    1365         finally:
    1366             sys.stdout = save_stdout
    1367             pdb.set_trace = save_set_trace
    1368             linecache.getlines = self.save_linecache_getlines
    1369             if clear_globs:
    1370                 test.globs.clear()
    1371 
    1372     #/////////////////////////////////////////////////////////////////
    1373     # Summarization
    1374     #/////////////////////////////////////////////////////////////////
    1375     def summarize(self, verbose=None):
    1376         """
    1377         Print a summary of all the test cases that have been run by
    1378         this DocTestRunner, and return a tuple `(f, t)`, where `f` is
    1379         the total number of failed examples, and `t` is the total
    1380         number of tried examples.
    1381 
    1382         The optional `verbose` argument controls how detailed the
    1383         summary is.  If the verbosity is not specified, then the
    1384         DocTestRunner's verbosity is used.
    1385         """
    1386         if verbose is None:
    1387             verbose = self._verbose
    1388         notests = []
    1389         passed = []
    1390         failed = []
    1391         totalt = totalf = 0
    1392         for x in self._name2ft.items():
    1393             name, (f, t) = x
    1394             assert f <= t
    1395             totalt += t
    1396             totalf += f
    1397             if t == 0:
    1398                 notests.append(name)
    1399             elif f == 0:
    1400                 passed.append( (name, t) )
    1401             else:
    1402                 failed.append(x)
    1403         if verbose:
    1404             if notests:
    1405                 print len(notests), "items had no tests:"
    1406                 notests.sort()
    1407                 for thing in notests:
    1408                     print "   ", thing
    1409             if passed:
    1410                 print len(passed), "items passed all tests:"
    1411                 passed.sort()
    1412                 for thing, count in passed:
    1413                     print " %3d tests in %s" % (count, thing)
    1414         if failed:
    1415             print self.DIVIDER
    1416             print len(failed), "items had failures:"
    1417             failed.sort()
    1418             for thing, (f, t) in failed:
    1419                 print " %3d of %3d in %s" % (f, t, thing)
    1420         if verbose:
    1421             print totalt, "tests in", len(self._name2ft), "items."
    1422             print totalt - totalf, "passed and", totalf, "failed."
    1423         if totalf:
    1424             print "***Test Failed***", totalf, "failures."
    1425         elif verbose:
    1426             print "Test passed."
    1427         return totalf, totalt
    1428 
    1429     #/////////////////////////////////////////////////////////////////
    1430     # Backward compatibility cruft to maintain doctest.master.
    1431     #/////////////////////////////////////////////////////////////////
    1432     def merge(self, other):
    1433         d = self._name2ft
    1434         for name, (f, t) in other._name2ft.items():
    1435             if name in d:
    1436                 print "*** DocTestRunner.merge: '" + name + "' in both" \
    1437                     " testers; summing outcomes."
    1438                 f2, t2 = d[name]
    1439                 f = f + f2
    1440                 t = t + t2
    1441             d[name] = f, t
    1442 
    1443 class OutputChecker:
    1444     """
    1445     A class used to check the whether the actual output from a doctest
    1446     example matches the expected output.  `OutputChecker` defines two
    1447     methods: `check_output`, which compares a given pair of outputs,
    1448     and returns true if they match; and `output_difference`, which
    1449     returns a string describing the differences between two outputs.
    1450     """
    1451     def check_output(self, want, got, optionflags):
    1452         """
    1453         Return True iff the actual output from an example (`got`)
    1454         matches the expected output (`want`).  These strings are
    1455         always considered to match if they are identical; but
    1456         depending on what option flags the test runner is using,
    1457         several non-exact match types are also possible.  See the
    1458         documentation for `TestRunner` for more information about
    1459         option flags.
    1460         """
    1461         # Handle the common case first, for efficiency:
    1462         # if they're string-identical, always return true.
    1463         if got == want:
    1464             return True
    1465 
    1466         # The values True and False replaced 1 and 0 as the return
    1467         # value for boolean comparisons in Python 2.3.
    1468         if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
    1469             if (got,want) == ("True\n", "1\n"):
    1470                 return True
    1471             if (got,want) == ("False\n", "0\n"):
    1472                 return True
    1473 
    1474         # <BLANKLINE> can be used as a special sequence to signify a
    1475         # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
    1476         if not (optionflags & DONT_ACCEPT_BLANKLINE):
    1477             # Replace <BLANKLINE> in want with a blank line.
    1478             want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
    1479                           '', want)
    1480             # If a line in got contains only spaces, then remove the
    1481             # spaces.
    1482             got = re.sub('(?m)^\s*?$', '', got)
    1483             if got == want:
    1484                 return True
    1485 
    1486         # This flag causes doctest to ignore any differences in the
    1487         # contents of whitespace strings.  Note that this can be used
    1488         # in conjunction with the ELLIPSIS flag.
    1489         if optionflags & NORMALIZE_WHITESPACE:
    1490             got = ' '.join(got.split())
    1491             want = ' '.join(want.split())
    1492             if got == want:
    1493                 return True
    1494 
    1495         # The ELLIPSIS flag says to let the sequence "..." in `want`
    1496         # match any substring in `got`.
    1497         if optionflags & ELLIPSIS:
    1498             if _ellipsis_match(want, got):
    1499                 return True
    1500 
    1501         # We didn't find any match; return false.
    1502         return False
    1503 
    1504     # Should we do a fancy diff?
    1505     def _do_a_fancy_diff(self, want, got, optionflags):
    1506         # Not unless they asked for a fancy diff.
    1507         if not optionflags & (REPORT_UDIFF |
    1508                               REPORT_CDIFF |
    1509                               REPORT_NDIFF):
    1510             return False
    1511 
    1512         # If expected output uses ellipsis, a meaningful fancy diff is
    1513         # too hard ... or maybe not.  In two real-life failures Tim saw,
    1514         # a diff was a major help anyway, so this is commented out.
    1515         # [todo] _ellipsis_match() knows which pieces do and don't match,
    1516         # and could be the basis for a kick-ass diff in this case.
    1517         ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
    1518         ##    return False
    1519 
    1520         # ndiff does intraline difference marking, so can be useful even
    1521         # for 1-line differences.
    1522         if optionflags & REPORT_NDIFF:
    1523             return True
    1524 
    1525         # The other diff types need at least a few lines to be helpful.
    1526         return want.count('\n') > 2 and got.count('\n') > 2
    1527 
    1528     def output_difference(self, example, got, optionflags):
    1529         """
    1530         Return a string describing the differences between the
    1531         expected output for a given example (`example`) and the actual
    1532         output (`got`).  `optionflags` is the set of option flags used
    1533         to compare `want` and `got`.
    1534         """
    1535         want = example.want
    1536         # If <BLANKLINE>s are being used, then replace blank lines
    1537         # with <BLANKLINE> in the actual output string.
    1538         if not (optionflags & DONT_ACCEPT_BLANKLINE):
    1539             got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
    1540 
    1541         # Check if we should use diff.
    1542         if self._do_a_fancy_diff(want, got, optionflags):
    1543             # Split want & got into lines.
    1544             want_lines = want.splitlines(True)  # True == keep line ends
    1545             got_lines = got.splitlines(True)
    1546             # Use difflib to find their differences.
    1547             if optionflags & REPORT_UDIFF:
    1548                 diff = difflib.unified_diff(want_lines, got_lines, n=2)
    1549                 diff = list(diff)[2:] # strip the diff header
    1550                 kind = 'unified diff with -expected +actual'
    1551             elif optionflags & REPORT_CDIFF:
    1552                 diff = difflib.context_diff(want_lines, got_lines, n=2)
    1553                 diff = list(diff)[2:] # strip the diff header
    1554                 kind = 'context diff with expected followed by actual'
    1555             elif optionflags & REPORT_NDIFF:
    1556                 engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
    1557                 diff = list(engine.compare(want_lines, got_lines))
    1558                 kind = 'ndiff with -expected +actual'
    1559             else:
    1560                 assert 0, 'Bad diff option'
    1561             # Remove trailing whitespace on diff output.
    1562             diff = [line.rstrip() + '\n' for line in diff]
    1563             return 'Differences (%s):\n' % kind + _indent(''.join(diff))
    1564 
    1565         # If we're not using diff, then simply list the expected
    1566         # output followed by the actual output.
    1567         if want and got:
    1568             return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
    1569         elif want:
    1570             return 'Expected:\n%sGot nothing\n' % _indent(want)
    1571         elif got:
    1572             return 'Expected nothing\nGot:\n%s' % _indent(got)
    1573         else:
    1574             return 'Expected nothing\nGot nothing\n'
    1575 
    1576 class DocTestFailure(Exception):
    1577     """A DocTest example has failed in debugging mode.
    1578 
    1579     The exception instance has variables:
    1580 
    1581     - test: the DocTest object being run
    1582 
    1583     - example: the Example object that failed
    1584 
    1585     - got: the actual output
    1586     """
    1587     def __init__(self, test, example, got):
    1588         self.test = test
    1589         self.example = example
    1590         self.got = got
    1591 
    1592     def __str__(self):
    1593         return str(self.test)
    1594 
    1595 class UnexpectedException(Exception):
    1596     """A DocTest example has encountered an unexpected exception
    1597 
    1598     The exception instance has variables:
    1599 
    1600     - test: the DocTest object being run
    1601 
    1602     - example: the Example object that failed
    1603 
    1604     - exc_info: the exception info
    1605     """
    1606     def __init__(self, test, example, exc_info):
    1607         self.test = test
    1608         self.example = example
    1609         self.exc_info = exc_info
    1610 
    1611     def __str__(self):
    1612         return str(self.test)
    1613 
    1614 class DebugRunner(DocTestRunner):
    1615     r"""Run doc tests but raise an exception as soon as there is a failure.
    1616 
    1617        If an unexpected exception occurs, an UnexpectedException is raised.
    1618        It contains the test, the example, and the original exception:
    1619 
    1620          >>> runner = DebugRunner(verbose=False)
    1621          >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
    1622          ...                                    {}, 'foo', 'foo.py', 0)
    1623          >>> try:
    1624          ...     runner.run(test)
    1625          ... except UnexpectedException, failure:
    1626          ...     pass
    1627 
    1628          >>> failure.test is test
    1629          True
    1630 
    1631          >>> failure.example.want
    1632          '42\n'
    1633 
    1634          >>> exc_info = failure.exc_info
    1635          >>> raise exc_info[0], exc_info[1], exc_info[2]
    1636          Traceback (most recent call last):
    1637          ...
    1638          KeyError
    1639 
    1640        We wrap the original exception to give the calling application
    1641        access to the test and example information.
    1642 
    1643        If the output doesn't match, then a DocTestFailure is raised:
    1644 
    1645          >>> test = DocTestParser().get_doctest('''
    1646          ...      >>> x = 1
    1647          ...      >>> x
    1648          ...      2
    1649          ...      ''', {}, 'foo', 'foo.py', 0)
    1650 
    1651          >>> try:
    1652          ...    runner.run(test)
    1653          ... except DocTestFailure, failure:
    1654          ...    pass
    1655 
    1656        DocTestFailure objects provide access to the test:
    1657 
    1658          >>> failure.test is test
    1659          True
    1660 
    1661        As well as to the example:
    1662 
    1663          >>> failure.example.want
    1664          '2\n'
    1665 
    1666        and the actual output:
    1667 
    1668          >>> failure.got
    1669          '1\n'
    1670 
    1671        If a failure or error occurs, the globals are left intact:
    1672 
    1673          >>> del test.globs['__builtins__']
    1674          >>> test.globs
    1675          {'x': 1}
    1676 
    1677          >>> test = DocTestParser().get_doctest('''
    1678          ...      >>> x = 2
    1679          ...      >>> raise KeyError
    1680          ...      ''', {}, 'foo', 'foo.py', 0)
    1681 
    1682          >>> runner.run(test)
    1683          Traceback (most recent call last):
    1684          ...
    1685          UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
    1686 
    1687          >>> del test.globs['__builtins__']
    1688          >>> test.globs
    1689          {'x': 2}
    1690 
    1691        But the globals are cleared if there is no error:
    1692 
    1693          >>> test = DocTestParser().get_doctest('''
    1694          ...      >>> x = 2
    1695          ...      ''', {}, 'foo', 'foo.py', 0)
    1696 
    1697          >>> runner.run(test)
    1698          (0, 1)
    1699 
    1700          >>> test.globs
    1701          {}
    1702 
    1703        """
    1704 
    1705     def run(self, test, compileflags=None, out=None, clear_globs=True):
    1706         r = DocTestRunner.run(self, test, compileflags, out, False)
    1707         if clear_globs:
    1708             test.globs.clear()
    1709         return r
    1710 
    1711     def report_unexpected_exception(self, out, test, example, exc_info):
    1712         raise UnexpectedException(test, example, exc_info)
    1713 
    1714     def report_failure(self, out, test, example, got):
    1715         raise DocTestFailure(test, example, got)
    1716 
    1717 ######################################################################
    1718 ## 6. Test Functions
    1719 ######################################################################
    1720 # These should be backwards compatible.
    1721 
    1722 # For backward compatibility, a global instance of a DocTestRunner
    1723 # class, updated by testmod.
    1724 master = None
    1725 
    1726 def testmod_returning_runner(m=None, name=None, globs=None, verbose=None,
    1727                              report=True, optionflags=0, extraglobs=None,
    1728                              raise_on_error=False, exclude_empty=False,
    1729                              runner=None):
    1730     """m=None, name=None, globs=None, verbose=None, report=True,
    1731        optionflags=0, extraglobs=None, raise_on_error=False,
    1732        exclude_empty=False
    1733 
    1734     Test examples in docstrings in functions and classes reachable
    1735     from module m (or the current module if m is not supplied), starting
    1736     with m.__doc__.
    1737 
    1738     Also test examples reachable from dict m.__test__ if it exists and is
    1739     not None.  m.__test__ maps names to functions, classes and strings;
    1740     function and class docstrings are tested even if the name is private;
    1741     strings are tested directly, as if they were docstrings.
    1742 
    1743     Return (#failures, #tests).
    1744 
    1745     See doctest.__doc__ for an overview.
    1746 
    1747     Optional keyword arg "name" gives the name of the module; by default
    1748     use m.__name__.
    1749 
    1750     Optional keyword arg "globs" gives a dict to be used as the globals
    1751     when executing examples; by default, use m.__dict__.  A copy of this
    1752     dict is actually used for each docstring, so that each docstring's
    1753     examples start with a clean slate.
    1754 
    1755     Optional keyword arg "extraglobs" gives a dictionary that should be
    1756     merged into the globals that are used to execute examples.  By
    1757     default, no extra globals are used.  This is new in 2.4.
    1758 
    1759     Optional keyword arg "verbose" prints lots of stuff if true, prints
    1760     only failures if false; by default, it's true iff "-v" is in sys.argv.
    1761 
    1762     Optional keyword arg "report" prints a summary at the end when true,
    1763     else prints nothing at the end.  In verbose mode, the summary is
    1764     detailed, else very brief (in fact, empty if all tests passed).
    1765 
    1766     Optional keyword arg "optionflags" or's together module constants,
    1767     and defaults to 0.  This is new in 2.3.  Possible values (see the
    1768     docs for details):
    1769 
    1770         DONT_ACCEPT_TRUE_FOR_1
    1771         DONT_ACCEPT_BLANKLINE
    1772         NORMALIZE_WHITESPACE
    1773         ELLIPSIS
    1774         SKIP
    1775         IGNORE_EXCEPTION_DETAIL
    1776         REPORT_UDIFF
    1777         REPORT_CDIFF
    1778         REPORT_NDIFF
    1779         REPORT_ONLY_FIRST_FAILURE
    1780 
    1781     Optional keyword arg "raise_on_error" raises an exception on the
    1782     first unexpected exception or failure. This allows failures to be
    1783     post-mortem debugged.
    1784 
    1785     Advanced tomfoolery:  testmod runs methods of a local instance of
    1786     class doctest.Tester, then merges the results into (or creates)
    1787     global Tester instance doctest.master.  Methods of doctest.master
    1788     can be called directly too, if you want to do something unusual.
    1789     Passing report=0 to testmod is especially useful then, to delay
    1790     displaying a summary.  Invoke doctest.master.summarize(verbose)
    1791     when you're done fiddling.
    1792     """
    1793     global master
    1794 
    1795     # If no module was given, then use __main__.
    1796     if m is None:
    1797         # DWA - m will still be None if this wasn't invoked from the command
    1798         # line, in which case the following TypeError is about as good an error
    1799         # as we should expect
    1800         m = sys.modules.get('__main__')
    1801 
    1802     # Check that we were actually given a module.
    1803     if not inspect.ismodule(m):
    1804         raise TypeError("testmod: module required; %r" % (m,))
    1805 
    1806     # If no name was given, then use the module's name.
    1807     if name is None:
    1808         name = m.__name__
    1809 
    1810     # Find, parse, and run all tests in the given module.
    1811     finder = DocTestFinder(exclude_empty=exclude_empty)
    1812 
    1813     if runner is None:
    1814         if raise_on_error:
    1815             runner = DebugRunner(verbose=verbose, optionflags=optionflags)
    1816         else:
    1817             runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
    1818 
    1819     for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
    1820         runner.run(test)
    1821 
    1822     if report:
    1823         runner.summarize()
    1824 
    1825     if master is None:
    1826         master = runner
    1827     else:
    1828         master.merge(runner)
    1829 
    1830     return runner
    1831 
    1832 def testmod(m=None, name=None, globs=None, verbose=None,
    1833             report=True, optionflags=0, extraglobs=None,
    1834             raise_on_error=False, exclude_empty=False, runner=None):
    1835 
    1836     runner = testmod_returning_runner(m=m, name=name, globs=globs, verbose=verbose,
    1837                                       report=report, optionflags=optionflags, extraglobs=extraglobs,
    1838                                       raise_on_error=raise_on_error, exclude_empty=exclude_empty, runner=runner)
    1839     return runner.failures, runner.tries
    1840 
    1841 def testfile(filename, module_relative=True, name=None, package=None,
    1842              globs=None, verbose=None, report=True, optionflags=0,
    1843              extraglobs=None, raise_on_error=False, parser=DocTestParser(),
    1844              encoding=None):
    1845     """
    1846     Test examples in the given file.  Return (#failures, #tests).
    1847 
    1848     Optional keyword arg "module_relative" specifies how filenames
    1849     should be interpreted:
    1850 
    1851       - If "module_relative" is True (the default), then "filename"
    1852          specifies a module-relative path.  By default, this path is
    1853          relative to the calling module's directory; but if the
    1854          "package" argument is specified, then it is relative to that
    1855          package.  To ensure os-independence, "filename" should use
    1856          "/" characters to separate path segments, and should not
    1857          be an absolute path (i.e., it may not begin with "/").
    1858 
    1859       - If "module_relative" is False, then "filename" specifies an
    1860         os-specific path.  The path may be absolute or relative (to
    1861         the current working directory).
    1862 
    1863     Optional keyword arg "name" gives the name of the test; by default
    1864     use the file's basename.
    1865 
    1866     Optional keyword argument "package" is a Python package or the
    1867     name of a Python package whose directory should be used as the
    1868     base directory for a module relative filename.  If no package is
    1869     specified, then the calling module's directory is used as the base
    1870     directory for module relative filenames.  It is an error to
    1871     specify "package" if "module_relative" is False.
    1872 
    1873     Optional keyword arg "globs" gives a dict to be used as the globals
    1874     when executing examples; by default, use {}.  A copy of this dict
    1875     is actually used for each docstring, so that each docstring's
    1876     examples start with a clean slate.
    1877 
    1878     Optional keyword arg "extraglobs" gives a dictionary that should be
    1879     merged into the globals that are used to execute examples.  By
    1880     default, no extra globals are used.
    1881 
    1882     Optional keyword arg "verbose" prints lots of stuff if true, prints
    1883     only failures if false; by default, it's true iff "-v" is in sys.argv.
    1884 
    1885     Optional keyword arg "report" prints a summary at the end when true,
    1886     else prints nothing at the end.  In verbose mode, the summary is
    1887     detailed, else very brief (in fact, empty if all tests passed).
    1888 
    1889     Optional keyword arg "optionflags" or's together module constants,
    1890     and defaults to 0.  Possible values (see the docs for details):
    1891 
    1892         DONT_ACCEPT_TRUE_FOR_1
    1893         DONT_ACCEPT_BLANKLINE
    1894         NORMALIZE_WHITESPACE
    1895         ELLIPSIS
    1896         SKIP
    1897         IGNORE_EXCEPTION_DETAIL
    1898         REPORT_UDIFF
    1899         REPORT_CDIFF
    1900         REPORT_NDIFF
    1901         REPORT_ONLY_FIRST_FAILURE
    1902 
    1903     Optional keyword arg "raise_on_error" raises an exception on the
    1904     first unexpected exception or failure. This allows failures to be
    1905     post-mortem debugged.
    1906 
    1907     Optional keyword arg "parser" specifies a DocTestParser (or
    1908     subclass) that should be used to extract tests from the files.
    1909 
    1910     Optional keyword arg "encoding" specifies an encoding that should
    1911     be used to convert the file to unicode.
    1912 
    1913     Advanced tomfoolery:  testmod runs methods of a local instance of
    1914     class doctest.Tester, then merges the results into (or creates)
    1915     global Tester instance doctest.master.  Methods of doctest.master
    1916     can be called directly too, if you want to do something unusual.
    1917     Passing report=0 to testmod is especially useful then, to delay
    1918     displaying a summary.  Invoke doctest.master.summarize(verbose)
    1919     when you're done fiddling.
    1920     """
    1921     global master
    1922 
    1923     if package and not module_relative:
    1924         raise ValueError("Package may only be specified for module-"
    1925                          "relative paths.")
    1926 
    1927     # Relativize the path
    1928     text, filename = _load_testfile(filename, package, module_relative)
    1929 
    1930     # If no name was given, then use the file's name.
    1931     if name is None:
    1932         name = os.path.basename(filename)
    1933 
    1934     # Assemble the globals.
    1935     if globs is None:
    1936         globs = {}
    1937     else:
    1938         globs = globs.copy()
    1939     if extraglobs is not None:
    1940         globs.update(extraglobs)
    1941 
    1942     if raise_on_error:
    1943         runner = DebugRunner(verbose=verbose, optionflags=optionflags)
    1944     else:
    1945         runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
    1946 
    1947     if encoding is not None:
    1948         text = text.decode(encoding)
    1949 
    1950     # Read the file, convert it to a test, and run it.
    1951     test = parser.get_doctest(text, globs, name, filename, 0)
    1952     runner.run(test)
    1953 
    1954     if report:
    1955         runner.summarize()
    1956 
    1957     if master is None:
    1958         master = runner
    1959     else:
    1960         master.merge(runner)
    1961 
    1962     return runner.failures, runner.tries
    1963 
    1964 def run_docstring_examples(f, globs, verbose=False, name="NoName",
    1965                            compileflags=None, optionflags=0):
    1966     """
    1967     Test examples in the given object's docstring (`f`), using `globs`
    1968     as globals.  Optional argument `name` is used in failure messages.
    1969     If the optional argument `verbose` is true, then generate output
    1970     even if there are no failures.
    1971 
    1972     `compileflags` gives the set of flags that should be used by the
    1973     Python compiler when running the examples.  If not specified, then
    1974     it will default to the set of future-import flags that apply to
    1975     `globs`.
    1976 
    1977     Optional keyword arg `optionflags` specifies options for the
    1978     testing and output.  See the documentation for `testmod` for more
    1979     information.
    1980     """
    1981     # Find, parse, and run all tests in the given module.
    1982     finder = DocTestFinder(verbose=verbose, recurse=False)
    1983     runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
    1984     for test in finder.find(f, name, globs=globs):
    1985         runner.run(test, compileflags=compileflags)
    1986 
    1987 ######################################################################
    1988 ## 7. Tester
    1989 ######################################################################
    1990 # This is provided only for backwards compatibility.  It's not
    1991 # actually used in any way.
    1992 
    1993 class Tester:
    1994     def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
    1995 
    1996         warnings.warn("class Tester is deprecated; "
    1997                       "use class doctest.DocTestRunner instead",
    1998                       DeprecationWarning, stacklevel=2)
    1999         if mod is None and globs is None:
    2000             raise TypeError("Tester.__init__: must specify mod or globs")
    2001         if mod is not None and not inspect.ismodule(mod):
    2002             raise TypeError("Tester.__init__: mod must be a module; %r" %
    2003                             (mod,))
    2004         if globs is None:
    2005             globs = mod.__dict__
    2006         self.globs = globs
    2007 
    2008         self.verbose = verbose
    2009         self.optionflags = optionflags
    2010         self.testfinder = DocTestFinder()
    2011         self.testrunner = DocTestRunner(verbose=verbose,
    2012                                         optionflags=optionflags)
    2013 
    2014     def runstring(self, s, name):
    2015         test = DocTestParser().get_doctest(s, self.globs, name, None, None)
    2016         if self.verbose:
    2017             print "Running string", name
    2018         (f,t) = self.testrunner.run(test)
    2019         if self.verbose:
    2020             print f, "of", t, "examples failed in string", name
    2021         return (f,t)
    2022 
    2023     def rundoc(self, object, name=None, module=None):
    2024         f = t = 0
    2025         tests = self.testfinder.find(object, name, module=module,
    2026                                      globs=self.globs)
    2027         for test in tests:
    2028             (f2, t2) = self.testrunner.run(test)
    2029             (f,t) = (f+f2, t+t2)
    2030         return (f,t)
    2031 
    2032     def rundict(self, d, name, module=None):
    2033         import new
    2034         m = new.module(name)
    2035         m.__dict__.update(d)
    2036         if module is None:
    2037             module = False
    2038         return self.rundoc(m, name, module)
    2039 
    2040     def run__test__(self, d, name):
    2041         import new
    2042         m = new.module(name)
    2043         m.__test__ = d
    2044         return self.rundoc(m, name)
    2045 
    2046     def summarize(self, verbose=None):
    2047         return self.testrunner.summarize(verbose)
    2048 
    2049     def merge(self, other):
    2050         self.testrunner.merge(other.testrunner)
    2051 
    2052 ######################################################################
    2053 ## 8. Unittest Support
    2054 ######################################################################
    2055 
    2056 _unittest_reportflags = 0
    2057 
    2058 def set_unittest_reportflags(flags):
    2059     """Sets the unittest option flags.
    2060 
    2061     The old flag is returned so that a runner could restore the old
    2062     value if it wished to:
    2063 
    2064       >>> import doctest
    2065       >>> old = doctest._unittest_reportflags
    2066       >>> doctest.set_unittest_reportflags(REPORT_NDIFF |
    2067       ...                          REPORT_ONLY_FIRST_FAILURE) == old
    2068       True
    2069 
    2070       >>> doctest._unittest_reportflags == (REPORT_NDIFF |
    2071       ...                                   REPORT_ONLY_FIRST_FAILURE)
    2072       True
    2073 
    2074     Only reporting flags can be set:
    2075 
    2076       >>> doctest.set_unittest_reportflags(ELLIPSIS)
    2077       Traceback (most recent call last):
    2078       ...
    2079       ValueError: ('Only reporting flags allowed', 8)
    2080 
    2081       >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
    2082       ...                                   REPORT_ONLY_FIRST_FAILURE)
    2083       True
    2084     """
    2085     global _unittest_reportflags
    2086 
    2087     if (flags & REPORTING_FLAGS) != flags:
    2088         raise ValueError("Only reporting flags allowed", flags)
    2089     old = _unittest_reportflags
    2090     _unittest_reportflags = flags
    2091     return old
    2092 
    2093 
    2094 class DocTestCase(unittest.TestCase):
    2095 
    2096     def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
    2097                  checker=None):
    2098 
    2099         unittest.TestCase.__init__(self)
    2100         self._dt_optionflags = optionflags
    2101         self._dt_checker = checker
    2102         self._dt_test = test
    2103         self._dt_setUp = setUp
    2104         self._dt_tearDown = tearDown
    2105 
    2106     def setUp(self):
    2107         test = self._dt_test
    2108 
    2109         if self._dt_setUp is not None:
    2110             self._dt_setUp(test)
    2111 
    2112     def tearDown(self):
    2113         test = self._dt_test
    2114 
    2115         if self._dt_tearDown is not None:
    2116             self._dt_tearDown(test)
    2117 
    2118         test.globs.clear()
    2119 
    2120     def runTest(self):
    2121         test = self._dt_test
    2122         old = sys.stdout
    2123         new = StringIO()
    2124         optionflags = self._dt_optionflags
    2125 
    2126         if not (optionflags & REPORTING_FLAGS):
    2127             # The option flags don't include any reporting flags,
    2128             # so add the default reporting flags
    2129             optionflags |= _unittest_reportflags
    2130 
    2131         runner = DocTestRunner(optionflags=optionflags,
    2132                                checker=self._dt_checker, verbose=False)
    2133 
    2134         try:
    2135             runner.DIVIDER = "-"*70
    2136             failures, tries = runner.run(
    2137                 test, out=new.write, clear_globs=False)
    2138         finally:
    2139             sys.stdout = old
    2140 
    2141         if failures:
    2142             raise self.failureException(self.format_failure(new.getvalue()))
    2143 
    2144     def format_failure(self, err):
    2145         test = self._dt_test
    2146         if test.lineno is None:
    2147             lineno = 'unknown line number'
    2148         else:
    2149             lineno = '%s' % test.lineno
    2150         lname = '.'.join(test.name.split('.')[-1:])
    2151         return ('Failed doctest test for %s\n'
    2152                 '  File "%s", line %s, in %s\n\n%s'
    2153                 % (test.name, test.filename, lineno, lname, err)
    2154                 )
    2155 
    2156     def debug(self):
    2157         r"""Run the test case without results and without catching exceptions
    2158 
    2159            The unit test framework includes a debug method on test cases
    2160            and test suites to support post-mortem debugging.  The test code
    2161            is run in such a way that errors are not caught.  This way a
    2162            caller can catch the errors and initiate post-mortem debugging.
    2163 
    2164            The DocTestCase provides a debug method that raises
    2165            UnexpectedException errors if there is an unexepcted
    2166            exception:
    2167 
    2168              >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
    2169              ...                {}, 'foo', 'foo.py', 0)
    2170              >>> case = DocTestCase(test)
    2171              >>> try:
    2172              ...     case.debug()
    2173              ... except UnexpectedException, failure:
    2174              ...     pass
    2175 
    2176            The UnexpectedException contains the test, the example, and
    2177            the original exception:
    2178 
    2179              >>> failure.test is test
    2180              True
    2181 
    2182              >>> failure.example.want
    2183              '42\n'
    2184 
    2185              >>> exc_info = failure.exc_info
    2186              >>> raise exc_info[0], exc_info[1], exc_info[2]
    2187              Traceback (most recent call last):
    2188              ...
    2189              KeyError
    2190 
    2191            If the output doesn't match, then a DocTestFailure is raised:
    2192 
    2193              >>> test = DocTestParser().get_doctest('''
    2194              ...      >>> x = 1
    2195              ...      >>> x
    2196              ...      2
    2197              ...      ''', {}, 'foo', 'foo.py', 0)
    2198              >>> case = DocTestCase(test)
    2199 
    2200              >>> try:
    2201              ...    case.debug()
    2202              ... except DocTestFailure, failure:
    2203              ...    pass
    2204 
    2205            DocTestFailure objects provide access to the test:
    2206 
    2207              >>> failure.test is test
    2208              True
    2209 
    2210            As well as to the example:
    2211 
    2212              >>> failure.example.want
    2213              '2\n'
    2214 
    2215            and the actual output:
    2216 
    2217              >>> failure.got
    2218              '1\n'
    2219 
    2220            """
    2221 
    2222         self.setUp()
    2223         runner = DebugRunner(optionflags=self._dt_optionflags,
    2224                              checker=self._dt_checker, verbose=False)
    2225         runner.run(self._dt_test)
    2226         self.tearDown()
    2227 
    2228     def id(self):
    2229         return self._dt_test.name
    2230 
    2231     def __repr__(self):
    2232         name = self._dt_test.name.split('.')
    2233         return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
    2234 
    2235     __str__ = __repr__
    2236 
    2237     def shortDescription(self):
    2238         return "Doctest: " + self._dt_test.name
    2239 
    2240 def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
    2241                  **options):
    2242     """
    2243     Convert doctest tests for a module to a unittest test suite.
    2244 
    2245     This converts each documentation string in a module that
    2246     contains doctest tests to a unittest test case.  If any of the
    2247     tests in a doc string fail, then the test case fails.  An exception
    2248     is raised showing the name of the file containing the test and a
    2249     (sometimes approximate) line number.
    2250 
    2251     The `module` argument provides the module to be tested.  The argument
    2252     can be either a module or a module name.
    2253 
    2254     If no argument is given, the calling module is used.
    2255 
    2256     A number of options may be provided as keyword arguments:
    2257 
    2258     setUp
    2259       A set-up function.  This is called before running the
    2260       tests in each file. The setUp function will be passed a DocTest
    2261       object.  The setUp function can access the test globals as the
    2262       globs attribute of the test passed.
    2263 
    2264     tearDown
    2265       A tear-down function.  This is called after running the
    2266       tests in each file.  The tearDown function will be passed a DocTest
    2267       object.  The tearDown function can access the test globals as the
    2268       globs attribute of the test passed.
    2269 
    2270     globs
    2271       A dictionary containing initial global variables for the tests.
    2272 
    2273     optionflags
    2274        A set of doctest option flags expressed as an integer.
    2275     """
    2276 
    2277     if test_finder is None:
    2278         test_finder = DocTestFinder()
    2279 
    2280     module = _normalize_module(module)
    2281     tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
    2282     if globs is None:
    2283         globs = module.__dict__
    2284     if not tests:
    2285         # Why do we want to do this? Because it reveals a bug that might
    2286         # otherwise be hidden.
    2287         raise ValueError(module, "has no tests")
    2288 
    2289     tests.sort()
    2290     suite = unittest.TestSuite()
    2291     for test in tests:
    2292         if len(test.examples) == 0:
    2293             continue
    2294         if not test.filename:
    2295             filename = module.__file__
    2296             if filename[-4:] in (".pyc", ".pyo"):
    2297                 filename = filename[:-1]
    2298             test.filename = filename
    2299         suite.addTest(DocTestCase(test, **options))
    2300 
    2301     return suite
    2302 
    2303 class DocFileCase(DocTestCase):
    2304 
    2305     def id(self):
    2306         return '_'.join(self._dt_test.name.split('.'))
    2307 
    2308     def __repr__(self):
    2309         return self._dt_test.filename
    2310     __str__ = __repr__
    2311 
    2312     def format_failure(self, err):
    2313         return ('Failed doctest test for %s\n  File "%s", line 0\n\n%s'
    2314                 % (self._dt_test.name, self._dt_test.filename, err)
    2315                 )
    2316 
    2317 def DocFileTest(path, module_relative=True, package=None,
    2318                 globs=None, parser=DocTestParser(),
    2319                 encoding=None, **options):
    2320     if globs is None:
    2321         globs = {}
    2322     else:
    2323         globs = globs.copy()
    2324 
    2325     if package and not module_relative:
    2326         raise ValueError("Package may only be specified for module-"
    2327                          "relative paths.")
    2328 
    2329     # Relativize the path.
    2330     doc, path = _load_testfile(path, package, module_relative)
    2331 
    2332     if "__file__" not in globs:
    2333         globs["__file__"] = path
    2334 
    2335     # Find the file and read it.
    2336     name = os.path.basename(path)
    2337 
    2338     # If an encoding is specified, use it to convert the file to unicode
    2339     if encoding is not None:
    2340         doc = doc.decode(encoding)
    2341 
    2342     # Convert it to a test, and wrap it in a DocFileCase.
    2343     test = parser.get_doctest(doc, globs, name, path, 0)
    2344     return DocFileCase(test, **options)
    2345 
    2346 def DocFileSuite(*paths, **kw):
    2347     """A unittest suite for one or more doctest files.
    2348 
    2349     The path to each doctest file is given as a string; the
    2350     interpretation of that string depends on the keyword argument
    2351     "module_relative".
    2352 
    2353     A number of options may be provided as keyword arguments:
    2354 
    2355     module_relative
    2356       If "module_relative" is True, then the given file paths are
    2357       interpreted as os-independent module-relative paths.  By
    2358       default, these paths are relative to the calling module's
    2359       directory; but if the "package" argument is specified, then
    2360       they are relative to that package.  To ensure os-independence,
    2361       "filename" should use "/" characters to separate path
    2362       segments, and may not be an absolute path (i.e., it may not
    2363       begin with "/").
    2364 
    2365       If "module_relative" is False, then the given file paths are
    2366       interpreted as os-specific paths.  These paths may be absolute
    2367       or relative (to the current working directory).
    2368 
    2369     package
    2370       A Python package or the name of a Python package whose directory
    2371       should be used as the base directory for module relative paths.
    2372       If "package" is not specified, then the calling module's
    2373       directory is used as the base directory for module relative
    2374       filenames.  It is an error to specify "package" if
    2375       "module_relative" is False.
    2376 
    2377     setUp
    2378       A set-up function.  This is called before running the
    2379       tests in each file. The setUp function will be passed a DocTest
    2380       object.  The setUp function can access the test globals as the
    2381       globs attribute of the test passed.
    2382 
    2383     tearDown
    2384       A tear-down function.  This is called after running the
    2385       tests in each file.  The tearDown function will be passed a DocTest
    2386       object.  The tearDown function can access the test globals as the
    2387       globs attribute of the test passed.
    2388 
    2389     globs
    2390       A dictionary containing initial global variables for the tests.
    2391 
    2392     optionflags
    2393       A set of doctest option flags expressed as an integer.
    2394 
    2395     parser
    2396       A DocTestParser (or subclass) that should be used to extract
    2397       tests from the files.
    2398 
    2399     encoding
    2400       An encoding that will be used to convert the files to unicode.
    2401     """
    2402     suite = unittest.TestSuite()
    2403 
    2404     # We do this here so that _normalize_module is called at the right
    2405     # level.  If it were called in DocFileTest, then this function
    2406     # would be the caller and we might guess the package incorrectly.
    2407     if kw.get('module_relative', True):
    2408         kw['package'] = _normalize_module(kw.get('package'))
    2409 
    2410     for path in paths:
    2411         suite.addTest(DocFileTest(path, **kw))
    2412 
    2413     return suite
    2414 
    2415 ######################################################################
    2416 ## 9. Debugging Support
    2417 ######################################################################
    2418 
    2419 def script_from_examples(s):
    2420     r"""Extract script from text with examples.
    2421 
    2422        Converts text with examples to a Python script.  Example input is
    2423        converted to regular code.  Example output and all other words
    2424        are converted to comments:
    2425 
    2426        >>> text = '''
    2427        ...       Here are examples of simple math.
    2428        ...
    2429        ...           Python has super accurate integer addition
    2430        ...
    2431        ...           >>> 2 + 2
    2432        ...           5
    2433        ...
    2434        ...           And very friendly error messages:
    2435        ...
    2436        ...           >>> 1/0
    2437        ...           To Infinity
    2438        ...           And
    2439        ...           Beyond
    2440        ...
    2441        ...           You can use logic if you want:
    2442        ...
    2443        ...           >>> if 0:
    2444        ...           ...    blah
    2445        ...           ...    blah
    2446        ...           ...
    2447        ...
    2448        ...           Ho hum
    2449        ...           '''
    2450 
    2451        >>> print script_from_examples(text)
    2452        # Here are examples of simple math.
    2453        #
    2454        #     Python has super accurate integer addition
    2455        #
    2456        2 + 2
    2457        # Expected:
    2458        ## 5
    2459        #
    2460        #     And very friendly error messages:
    2461        #
    2462        1/0
    2463        # Expected:
    2464        ## To Infinity
    2465        ## And
    2466        ## Beyond
    2467        #
    2468        #     You can use logic if you want:
    2469        #
    2470        if 0:
    2471           blah
    2472           blah
    2473        #
    2474        #     Ho hum
    2475        <BLANKLINE>
    2476        """
    2477     output = []
    2478     for piece in DocTestParser().parse(s):
    2479         if isinstance(piece, Example):
    2480             # Add the example's source code (strip trailing NL)
    2481             output.append(piece.source[:-1])
    2482             # Add the expected output:
    2483             want = piece.want
    2484             if want:
    2485                 output.append('# Expected:')
    2486                 output += ['## '+l for l in want.split('\n')[:-1]]
    2487         else:
    2488             # Add non-example text.
    2489             output += [_comment_line(l)
    2490                        for l in piece.split('\n')[:-1]]
    2491 
    2492     # Trim junk on both ends.
    2493     while output and output[-1] == '#':
    2494         output.pop()
    2495     while output and output[0] == '#':
    2496         output.pop(0)
    2497     # Combine the output, and return it.
    2498     # Add a courtesy newline to prevent exec from choking (see bug #1172785)
    2499     return '\n'.join(output) + '\n'
    2500 
    2501 def testsource(module, name):
    2502     """Extract the test sources from a doctest docstring as a script.
    2503 
    2504     Provide the module (or dotted name of the module) containing the
    2505     test to be debugged and the name (within the module) of the object
    2506     with the doc string with tests to be debugged.
    2507     """
    2508     module = _normalize_module(module)
    2509     tests = DocTestFinder().find(module)
    2510     test = [t for t in tests if t.name == name]
    2511     if not test:
    2512         raise ValueError(name, "not found in tests")
    2513     test = test[0]
    2514     testsrc = script_from_examples(test.docstring)
    2515     return testsrc
    2516 
    2517 def debug_src(src, pm=False, globs=None):
    2518     """Debug a single doctest docstring, in argument `src`'"""
    2519     testsrc = script_from_examples(src)
    2520     debug_script(testsrc, pm, globs)
    2521 
    2522 def debug_script(src, pm=False, globs=None):
    2523     "Debug a test script.  `src` is the script, as a string."
    2524     import pdb
    2525 
    2526     # Note that tempfile.NameTemporaryFile() cannot be used.  As the
    2527     # docs say, a file so created cannot be opened by name a second time
    2528     # on modern Windows boxes, and execfile() needs to open it.
    2529     srcfilename = tempfile.mktemp(".py", "doctestdebug")
    2530     f = open(srcfilename, 'w')
    2531     f.write(src)
    2532     f.close()
    2533 
    2534     try:
    2535         if globs:
    2536             globs = globs.copy()
    2537         else:
    2538             globs = {}
    2539 
    2540         if pm:
    2541             try:
    2542                 execfile(srcfilename, globs, globs)
    2543             except:
    2544                 print sys.exc_info()[1]
    2545                 pdb.post_mortem(sys.exc_info()[2])
    2546         else:
    2547             # Note that %r is vital here.  '%s' instead can, e.g., cause
    2548             # backslashes to get treated as metacharacters on Windows.
    2549             pdb.run("execfile(%r)" % srcfilename, globs, globs)
    2550 
    2551     finally:
    2552         os.remove(srcfilename)
    2553 
    2554 def debug(module, name, pm=False):
    2555     """Debug a single doctest docstring.
    2556 
    2557     Provide the module (or dotted name of the module) containing the
    2558     test to be debugged and the name (within the module) of the object
    2559     with the docstring with tests to be debugged.
    2560     """
    2561     module = _normalize_module(module)
    2562     testsrc = testsource(module, name)
    2563     debug_script(testsrc, pm, module.__dict__)
    2564 
    2565 ######################################################################
    2566 ## 10. Example Usage
    2567 ######################################################################
    2568 class _TestClass:
    2569     """
    2570     A pointless class, for sanity-checking of docstring testing.
    2571 
    2572     Methods:
    2573         square()
    2574         get()
    2575 
    2576     >>> _TestClass(13).get() + _TestClass(-12).get()
    2577     1
    2578     >>> hex(_TestClass(13).square().get())
    2579     '0xa9'
    2580     """
    2581 
    2582     def __init__(self, val):
    2583         """val -> _TestClass object with associated value val.
    2584 
    2585         >>> t = _TestClass(123)
    2586         >>> print t.get()
    2587         123
    2588         """
    2589 
    2590         self.val = val
    2591 
    2592     def square(self):
    2593         """square() -> square TestClass's associated value
    2594 
    2595         >>> _TestClass(13).square().get()
    2596         169
    2597         """
    2598 
    2599         self.val = self.val ** 2
    2600         return self
    2601 
    2602     def get(self):
    2603         """get() -> return TestClass's associated value.
    2604 
    2605         >>> x = _TestClass(-42)
    2606         >>> print x.get()
    2607         -42
    2608         """
    2609 
    2610         return self.val
    2611 
    2612 __test__ = {"_TestClass": _TestClass,
    2613             "string": r"""
    2614                       Example of a string object, searched as-is.
    2615                       >>> x = 1; y = 2
    2616                       >>> x + y, x * y
    2617                       (3, 2)
    2618                       """,
    2619 
    2620             "bool-int equivalence": r"""
    2621                                     In 2.2, boolean expressions displayed
    2622                                     0 or 1.  By default, we still accept
    2623                                     them.  This can be disabled by passing
    2624                                     DONT_ACCEPT_TRUE_FOR_1 to the new
    2625                                     optionflags argument.
    2626                                     >>> 4 == 4
    2627                                     1
    2628                                     >>> 4 == 4
    2629                                     True
    2630                                     >>> 4 > 4
    2631                                     0
    2632                                     >>> 4 > 4
    2633                                     False
    2634                                     """,
    2635 
    2636             "blank lines": r"""
    2637                 Blank lines can be marked with <BLANKLINE>:
    2638                     >>> print 'foo\n\nbar\n'
    2639                     foo
    2640                     <BLANKLINE>
    2641                     bar
    2642                     <BLANKLINE>
    2643             """,
    2644 
    2645             "ellipsis": r"""
    2646                 If the ellipsis flag is used, then '...' can be used to
    2647                 elide substrings in the desired output:
    2648                     >>> print range(1000) #doctest: +ELLIPSIS
    2649                     [0, 1, 2, ..., 999]
    2650             """,
    2651 
    2652             "whitespace normalization": r"""
    2653                 If the whitespace normalization flag is used, then
    2654                 differences in whitespace are ignored.
    2655                     >>> print range(30) #doctest: +NORMALIZE_WHITESPACE
    2656                     [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    2657                      15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
    2658                      27, 28, 29]
    2659             """,
    2660            }
    2661 
    2662 def _test():
    2663     r = unittest.TextTestRunner()
    2664     r.run(DocTestSuite())
    2665 
    2666 if __name__ == "__main__":
    2667     _test()
  • sage-make_devel_packages

    diff --git a/sage-make_devel_packages b/sage-make_devel_packages
    a b  
    115115
    116116cp -pr sage-* .hg* "$SCRIPTS"/
    117117cp -p "$SAGE_ROOT"/local/bin/sage-spkg-install "$SCRIPTS"/spkg-install
    118 cp -p "$SAGE_ROOT"/local/bin/ncadoctest.py "$SCRIPTS"/
    119 cp -p "$SAGE_ROOT"/local/bin/sagedoctest.py "$SCRIPTS"/
    120118
    121119chmod +x "$SCRIPTS"/spkg-install
    122120tar -jcf "$SCRIPTS".spkg "$SCRIPTS"
  • sage-runtests

    diff --git a/sage-runtests b/sage-runtests
    a b  
    55if __name__ == "__main__":
    66    parser = optparse.OptionParser()
    77
    8     def nthreads_callback(option, opt_str, value, parser):
     8    def optional_argument(option, opt_str, value, parser, typ, default_arg):
    99        assert value is None
    10         if parser.rargs: # there are more arguments
    11             try:
    12                 next_arg = int(parser.rargs[0])
    13                 parser.rargs.pop(0)
    14             except ValueError:
    15                 # No explicit number of threads passed
    16                 next_arg = 0
     10        try:
     11            next_arg = typ(parser.rargs[0])
     12        except Exception:
     13            next_arg = default_arg
    1714        else:
    18             next_arg = 0
    19         parser.values.nthreads = next_arg
     15            parser.rargs.pop(0)
     16        setattr(parser.values, option.dest, next_arg)
    2017
    21     parser.add_option("-p", "--nthreads", action="callback", callback=nthreads_callback, nargs=0, metavar="N", help="tests in parallel using N threads with 0 interpreted as minimum(8, cpu_count())")
    22     parser.add_option("--serial", action="store_true", default=False, help="run tests in a single process in series")
    23     parser.add_option("-T", "--timeout", type=int, default=-1, help="timeout (in seconds) for doctesting one file")
     18    parser.add_option("-p", "--nthreads", dest="nthreads", default=1, action="callback",
     19        callback=optional_argument, callback_args=(int, 0), nargs=0,
     20        metavar="N", help="tests in parallel using N threads with 0 interpreted as max(2, min(8, cpu_count()))")
     21    parser.add_option("-T", "--timeout", type=int, default=-1, help="timeout (in seconds) for doctesting one file, 0 for no timeout")
    2422    parser.add_option("-a", "--all", action="store_true", default=False, help="test all files in the Sage library")
    2523    parser.add_option("--logfile", metavar="FILE", help="log all output to FILE")
    26     parser.add_option("--sagenb", action="store_true", default=False, help="test all sagenb files")
     24    parser.add_option("--sagenb", action="store_true", default=False, help="test all files from the Sage notebook sources")
    2725
    28     parser.add_option("--long", action="store_true", default=False, help="include lines with the phrase 'long time'")
    29     parser.add_option("--optional", metavar="OPTIONAL_PKGS", default="sage", \
    30                           help="only run tests including one of the #optional tags listed in OPTIONAL_PKGS; if 'sage' is listed will also test the standard doctests; if OPTIONAL_PKGS='all' all tests will be run")
     26    parser.add_option("-l", "--long", action="store_true", default=False, help="include lines with the phrase 'long time'")
     27    parser.add_option("--warn-long", dest="warn_long", default=None, action="callback",
     28        callback=optional_argument, callback_args=(float, 1.0), nargs=0,
     29        metavar="SECONDS", help="warn if tests take more time than SECONDS")
     30    parser.add_option("--optional", metavar="PKGS", default="sage",
     31        help='only run tests including one of the "# optional" tags listed in PKGS; '
     32             'if "sage" is listed will also test the standard doctests; '
     33             'if PKGS=all, then all tests will be run')
    3134    parser.add_option("--randorder", type=int, metavar="SEED", help="randomize order of tests")
    3235    parser.add_option("--global-iterations", "--global_iterations", type=int, default=0, help="repeat the whole testing process this many times")
    3336    parser.add_option("--file-iterations", "--file_iterations", type=int, default=0, help="repeat each file this many times, stopping on the first failure")
     
    4043
    4144    parser.add_option("--gdb", action="store_true", default=False, help="run doctests under the control of gdb")
    4245    parser.add_option("--valgrind", "--memcheck", action="store_true", default=False,
    43                       help="run doctests using Valgrind's memcheck tool.  The log " + \
    44                          "files are named sage-memcheck.PID and can be found in " + \
     46                      help="run doctests using Valgrind's memcheck tool.  The log "
     47                         "files are named sage-memcheck.PID and can be found in " +
    4548                         os.path.join(os.environ["DOT_SAGE"], "valgrind"))
    4649    parser.add_option("--massif", action="store_true", default=False,
    47                       help="run doctests using Valgrind's massif tool.  The log " + \
    48                          "files are named sage-massif.PID and can be found in " + \
     50                      help="run doctests using Valgrind's massif tool.  The log "
     51                         "files are named sage-massif.PID and can be found in " +
    4952                         os.path.join(os.environ["DOT_SAGE"], "valgrind"))
    5053    parser.add_option("--cachegrind", action="store_true", default=False,
    51                       help="run doctests using Valgrind's cachegrind tool.  The log " + \
    52                          "files are named sage-cachegrind.PID and can be found in " + \
     54                      help="run doctests using Valgrind's cachegrind tool.  The log "
     55                         "files are named sage-cachegrind.PID and can be found in " +
    5356                         os.path.join(os.environ["DOT_SAGE"], "valgrind"))
    5457    parser.add_option("--omega", action="store_true", default=False,
    55                       help="run doctests using Valgrind's omega tool.  The log " + \
    56                          "files are named sage-omega.PID and can be found in " + \
     58                      help="run doctests using Valgrind's omega tool.  The log "
     59                         "files are named sage-omega.PID and can be found in " +
    5760                         os.path.join(os.environ["DOT_SAGE"], "valgrind"))
    5861
    59     parser.add_option("-f", "--failed", action="store_true", default=False, \
    60                           help="doctest only those files that failed in the previous run")
    61     parser.add_option("--new", action="store_true", default=False, help="doctest only those files that have been changed in the repository and not yet been committed")
     62    parser.add_option("-f", "--failed", action="store_true", default=False,
     63        help="doctest only those files that failed in the previous run")
     64    parser.add_option("-n", "--new", action="store_true", default=False,
     65        help="doctest only those files that have been changed in the repository and not yet been committed")
    6266
    63     parser.add_option("--stats_path", "--stats-path", default=os.path.join(os.path.expanduser("~/.sage/timings2.json")), \
     67    parser.add_option("--stats_path", "--stats-path", default=os.path.join(os.environ['DOT_SAGE'], "timings2.json"),
    6468                          help="path to a json dictionary for the latest run storing a timing for each file")
    6569
     70    # The --serial option is only really for internal use, better not
     71    # document it.
     72    parser.add_option("--serial", action="store_true", default=False, help=optparse.SUPPRESS_HELP)
     73
    6674    parser.set_usage("sage -t [options] filenames")
     75
     76
    6777    from sage.doctest.control import DocTestController
    6878    options, args = parser.parse_args()
     79
    6980    if len(args) == 0 and not (options.all or options.sagenb or options.new):
    7081        parser.print_help()
    71         sys.exit(8)
     82        err = 2
    7283    else:
    73         DC = DocTestController(*parser.parse_args())
     84        DC = DocTestController(options, args)
    7485        err = DC.run()
    7586
    76         # We use os._exit rather then sys.exit since sys.exit wasn't
    77         # completely quitting on sage.math after a KeyboardInterrupt
    78         os._exit(err)
     87    sys.exit(err)
  • deleted file sagedoctest.py

    diff --git a/sagedoctest.py b/sagedoctest.py
    deleted file mode 100644
    + -  
    1 from __future__ import with_statement
    2 
    3 import ncadoctest
    4 import sage.misc.randstate as randstate
    5 
    6 OrigDocTestRunner = ncadoctest.DocTestRunner
    7 class SageDocTestRunner(OrigDocTestRunner):
    8     def __init__(self, checker=None, verbose=None, optionflags=0):
    9         optionflags |= ncadoctest.NORMALIZE_WHITESPACE
    10         optionflags |= ncadoctest.ELLIPSIS
    11         OrigDocTestRunner.__init__(self, checker=checker, verbose=verbose, optionflags=optionflags)
    12         self._collect_timeit_stats = True
    13         self._timeit_stats = {}
    14         self._reset_random_seed = True
    15         self._random_seed = randstate.seed(0)
    16 
    17     def run(self, test, compileflags=None, out=None, clear_globs=True):
    18         r = OrigDocTestRunner.run(self, test, compileflags=compileflags, out=out, clear_globs=clear_globs)
    19         if self._collect_timeit_stats:
    20             pass # could save timeit stats here
    21         return r
    22 
    23     def run_one_test(self, test, compileflags, out):
    24         if self._reset_random_seed:
    25             randstate.set_random_seed(long(0))
    26         OrigDocTestRunner.run_one_test(self, test, compileflags, out)
    27 
    28     def run_one_example(self, test, example, filename, compileflags):
    29         if self._collect_timeit_stats:
    30             with self._random_seed:
    31                 from sage.misc.sage_timeit import sage_timeit
    32                 key = (example.source, example)
    33                 try:
    34                     self._timeit_stats[key] = sage_timeit(example.source, test.globs)
    35                 except Exception, e:
    36                     self._timeit_stats[key] = e
    37         # otherwise, just run the example
    38         OrigDocTestRunner.run_one_example(self, test, example, filename, compileflags)
    39 
    40     def save_timeit_stats_to_file_named(self, output_filename):
    41         if self._collect_timeit_stats:
    42             from sage.structure.sage_object import save
    43             save(self._timeit_stats, filename=output_filename)
    44 
    45 ncadoctest.DocTestRunner = SageDocTestRunner
    46 
    47 def testmod_returning_runner(m=None, name=None, globs=None, verbose=None,
    48                              report=True, optionflags=0, extraglobs=None,
    49                              raise_on_error=False, exclude_empty=False,
    50                              runner=None):
    51     return ncadoctest.testmod_returning_runner(m=m, name=name, globs=globs, verbose=verbose,
    52                                                report=report, optionflags=optionflags, extraglobs=extraglobs,
    53                                                raise_on_error=raise_on_error, exclude_empty=exclude_empty,
    54                                                runner=runner)