from test
import test_support
######################################################################
## Sample Objects (used by test cases)
######################################################################
>>> print sample_func(22)
>>> # comments get ignored. so are empty PS1 and PS2 prompts:
6 12 24 48 96 192 384 768 1536 3072
>>> print SampleClass(12).get()
>>> print SampleClass(12).double().get()
return SampleClass(self
.val
+ self
.val
)
>>> print SampleClass(-5).get()
>>> print SampleClass.a_staticmethod(10)
a_staticmethod
= staticmethod(a_staticmethod
)
def a_classmethod(cls
, v
):
>>> print SampleClass.a_classmethod(10)
>>> print SampleClass(0).a_classmethod(10)
a_classmethod
= classmethod(a_classmethod
)
a_property
= property(get
, doc
="""
>>> print SampleClass(22).a_property
>>> x = SampleClass.NestedClass(5)
def __init__(self
, val
=0):
>>> print SampleClass.NestedClass().get()
return SampleClass
.NestedClass(self
.val
*self
.val
)
class SampleNewStyleClass(object):
>>> print SampleNewStyleClass(12).get()
>>> print SampleNewStyleClass(12).double().get()
return SampleNewStyleClass(self
.val
+ self
.val
)
>>> print SampleNewStyleClass(-5).get()
######################################################################
## Fake stdin (for testing interactive debugging)
######################################################################
A fake input stream for pdb's interactive debugger. Whenever a
line is read, print it (to simulate the user typing it), and then
return it. The set of lines to return is specified in the
constructor; they should not have trailing newlines.
def __init__(self
, lines
):
######################################################################
######################################################################
Unit tests for the `Example` class.
Example is a simple container class that holds:
- `source`: A source string.
- `want`: An expected output string.
- `exc_msg`: An expected exception message string (or None if no
- `lineno`: A line number (within the docstring).
- `indent`: The example's indentation in the input string.
- `options`: An option dictionary, mapping option flags to True or
These attributes are set by the constructor. `source` and `want` are
required; the other attributes all have default values:
>>> example = doctest.Example('print 1', '1\n')
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('print 1\n', '1\n', None, 0, 0, {})
The first three attributes (`source`, `want`, and `exc_msg`) may be
specified positionally; the remaining arguments should be specified as
>>> exc_msg = 'IndexError: pop from an empty list'
>>> example = doctest.Example('[].pop()', '', exc_msg,
... options={doctest.ELLIPSIS: True})
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('[].pop()\n', '', 'IndexError: pop from an empty list\n', 5, 4, {8: True})
The constructor normalizes the `source` string to end in a newline:
Source spans a single line: no terminating newline.
>>> e = doctest.Example('print 1', '1\n')
>>> e = doctest.Example('print 1\n', '1\n')
Source spans multiple lines: require terminating newline.
>>> e = doctest.Example('print 1;\nprint 2\n', '1\n2\n')
('print 1;\nprint 2\n', '1\n2\n')
>>> e = doctest.Example('print 1;\nprint 2', '1\n2\n')
('print 1;\nprint 2\n', '1\n2\n')
Empty source string (which should never appear in real examples)
>>> e = doctest.Example('', '')
The constructor normalizes the `want` string to end in a newline,
unless it's the empty string:
>>> e = doctest.Example('print 1', '1\n')
>>> e = doctest.Example('print 1', '1')
>>> e = doctest.Example('print', '')
The constructor normalizes the `exc_msg` string to end in a newline,
>>> exc_msg = 'IndexError: pop from an empty list'
>>> e = doctest.Example('[].pop()', '', exc_msg)
'IndexError: pop from an empty list\n'
>>> exc_msg = 'IndexError: pop from an empty list\n'
>>> e = doctest.Example('[].pop()', '', exc_msg)
'IndexError: pop from an empty list\n'
Message spans multiple lines
>>> exc_msg = 'ValueError: 1\n 2'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
>>> exc_msg = 'ValueError: 1\n 2\n'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
Empty (but non-None) exception message (which should never appear
>>> e = doctest.Example('raise X()', '', exc_msg)
Unit tests for the `DocTest` class.
DocTest is a collection of examples, extracted from a docstring, along
with information about where the docstring comes from (a name,
filename, and line number). The docstring is parsed by the `DocTest`
... >>> print 'another\example'
>>> globs = {} # globals to run the test in.
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(docstring, globs, 'some_test',
<DocTest some_test from some_file:20 (2 examples)>
>>> e1, e2 = test.examples
>>> (e1.source, e1.want, e1.lineno)
('print 12\n', '12\n', 1)
>>> (e2.source, e2.want, e2.lineno)
("print 'another\\example'\n", 'another\nexample\n', 6)
Source information (name, filename, and line number) is available as
attributes on the doctest object:
>>> (test.name, test.filename, test.lineno)
('some_test', 'some_file', 20)
The line number of an example within its containing file is found by
adding the line number of the example and the line number of its
>>> test.lineno + e1.lineno
>>> test.lineno + e2.lineno
If the docstring contains inconsistant leading whitespace in the
expected output of an example, then `DocTest` will raise a ValueError:
... >>> print 'bad\nindentation'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 4 of the docstring for some_test has inconsistent leading whitespace: 'indentation'
If the docstring contains inconsistent leading whitespace on
continuation lines, then `DocTest` will raise a ValueError:
... >>> print ('bad indentation',
... ('bad', 'indentation')
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test has inconsistent leading whitespace: '... 2)'
If there's no blank space after a PS1 prompt ('>>>'), then `DocTest`
>>> docstring = '>>>print 1\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 1 of the docstring for some_test lacks blank after >>>: '>>>print 1'
If there's no blank space after a PS2 prompt ('...'), then `DocTest`
>>> docstring = '>>> if 1:\n...print 1\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test lacks blank after ...: '...print 1'
def test_DocTestFinder(): r
"""
Unit tests for the `DocTestFinder` class.
DocTestFinder is used to extract DocTests from an object's docstring
and the docstrings of its contained objects. It can be used with
modules, functions, classes, methods, staticmethods, classmethods, and
Finding Tests in Functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
For a function whose docstring contains examples, DocTestFinder.find()
will return a single test (for that function's docstring):
>>> finder = doctest.DocTestFinder()
We'll simulate a __file__ attr that ends in pyc:
>>> import test.test_doctest
>>> old = test.test_doctest.__file__
>>> test.test_doctest.__file__ = 'test_doctest.pyc'
>>> tests = finder.find(sample_func)
>>> print tests # doctest: +ELLIPSIS
[<DocTest sample_func from ...:13 (1 example)>]
The exact name depends on how test_doctest was invoked, so allow for
>>> tests[0].filename # doctest: +ELLIPSIS
>>> test.test_doctest.__file__ = old
>>> e = tests[0].examples[0]
>>> (e.source, e.want, e.lineno)
('print sample_func(22)\n', '44\n', 3)
By default, tests are created for objects with no docstring:
>>> finder.find(no_docstring)
However, the optional argument `exclude_empty` to the DocTestFinder
constructor can be used to exclude tests for objects with empty
>>> excl_empty_finder = doctest.DocTestFinder(exclude_empty=True)
>>> excl_empty_finder.find(no_docstring)
If the function has a docstring with no examples, then a test with no
examples is returned. (This lets `DocTestRunner` collect statistics
about which functions have no tests -- but is that useful? And should
an empty test also be created when there's no docstring?)
... ''' no doctest examples '''
>>> finder.find(no_examples) # doctest: +ELLIPSIS
[<DocTest no_examples from ...:1 (no examples)>]
For a class, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
methods, classmethods, staticmethods, properties, and nested classes.
>>> finder = doctest.DocTestFinder()
>>> tests = finder.find(SampleClass)
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_staticmethod
New-style classes are also supported:
>>> tests = finder.find(SampleNewStyleClass)
... print '%2s %s' % (len(t.examples), t.name)
1 SampleNewStyleClass.__init__
1 SampleNewStyleClass.double
1 SampleNewStyleClass.get
For a module, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
functions, classes, and the `__test__` dictionary, if it exists:
>>> m = new.module('some_module')
... 'sample_func': sample_func,
... 'SampleClass': SampleClass,
... 'd': '>>> print 6\n6\n>>> print 7\n7\n',
>>> finder = doctest.DocTestFinder()
>>> # Use module=test.test_doctest, to prevent doctest from
>>> # ignoring the objects since they weren't defined in m.
>>> import test.test_doctest
>>> tests = finder.find(m, module=test.test_doctest)
... print '%2s %s' % (len(t.examples), t.name)
3 some_module.SampleClass
3 some_module.SampleClass.NestedClass
1 some_module.SampleClass.NestedClass.__init__
1 some_module.SampleClass.__init__
2 some_module.SampleClass.a_classmethod
1 some_module.SampleClass.a_property
1 some_module.SampleClass.a_staticmethod
1 some_module.SampleClass.double
1 some_module.SampleClass.get
1 some_module.sample_func
If a single object is listed twice (under different names), then tests
will only be generated for it once:
>>> from test import doctest_aliases
>>> tests = excl_empty_finder.find(doctest_aliases)
test.doctest_aliases.TwoNames
TwoNames.f and TwoNames.g are bound to the same object.
We can't guess which will be found in doctest's traversal of
TwoNames.__dict__ first, so we have to allow for either.
>>> tests[1].name.split('.')[-1] in ['f', 'g']
A filter function can be used to restrict which objects get examined,
but this is temporary, undocumented internal support for testmod's
deprecated isprivate gimmick.
>>> def namefilter(prefix, base):
... return base.startswith('a_')
>>> tests = doctest.DocTestFinder(_namefilter=namefilter).find(SampleClass)
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
By default, that excluded objects with no doctests. exclude_empty=False
tells it to include (empty) tests for objects with no doctests. This feature
is really to support backward compatibility in what doctest.master.summarize()
>>> tests = doctest.DocTestFinder(_namefilter=namefilter,
... exclude_empty=False).find(SampleClass)
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
0 SampleClass.NestedClass.get
0 SampleClass.NestedClass.square
If a given object is filtered out, then none of the objects that it
contains will be added either:
>>> def namefilter(prefix, base):
... return base == 'NestedClass'
>>> tests = doctest.DocTestFinder(_namefilter=namefilter).find(SampleClass)
... print '%2s %s' % (len(t.examples), t.name)
2 SampleClass.a_classmethod
1 SampleClass.a_staticmethod
The filter function apply to contained objects, and *not* to the
object explicitly passed to DocTestFinder:
>>> def namefilter(prefix, base):
... return base == 'SampleClass'
>>> tests = doctest.DocTestFinder(_namefilter=namefilter).find(SampleClass)
DocTestFinder can be told not to look for tests in contained objects
using the `recurse` flag:
>>> tests = doctest.DocTestFinder(recurse=False).find(SampleClass)
... print '%2s %s' % (len(t.examples), t.name)
DocTestFinder finds the line number of each example:
... >>> # examples are not created for comments & bare prompts.
... >>> for x in range(10):
>>> test = doctest.DocTestFinder().find(f)[0]
>>> [e.lineno for e in test.examples]
def test_DocTestParser(): r
"""
Unit tests for the `DocTestParser` class.
DocTestParser is used to parse docstrings containing doctest examples.
The `parse` method divides a docstring into examples and intervening
... >>> x, y = 2, 3 # no output expected
>>> parser = doctest.DocTestParser()
>>> for piece in parser.parse(s):
... if isinstance(piece, doctest.Example):
... print 'Example:', (piece.source, piece.want, piece.lineno)
... print ' Text:', `piece`
Example: ('x, y = 2, 3 # no output expected\n', '', 1)
Example: ('if 1:\n print x\n print y\n', '2\n3\n', 2)
Example: ('x+y\n', '5\n', 9)
The `get_examples` method returns just the examples:
>>> for piece in parser.get_examples(s):
... print (piece.source, piece.want, piece.lineno)
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print x\n print y\n', '2\n3\n', 2)
The `get_doctest` method creates a Test from the examples, along with the
>>> test = parser.get_doctest(s, {}, 'name', 'filename', lineno=5)
>>> (test.name, test.filename, test.lineno)
>>> for piece in test.examples:
... print (piece.source, piece.want, piece.lineno)
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print x\n print y\n', '2\n3\n', 2)
class test_DocTestRunner
:
Unit tests for the `DocTestRunner` class.
DocTestRunner is used to run DocTest test cases, and to accumulate
statistics. Here's a simple DocTest case we can use:
>>> test = doctest.DocTestFinder().find(f)[0]
The main DocTestRunner interface is the `run` method, which runs a
given DocTest case in a given namespace (globs). It returns a tuple
`(f,t)`, where `f` is the number of failed tests and `t` is the number
>>> doctest.DocTestRunner(verbose=False).run(test)
If any example produces incorrect output, then the test runner reports
the failure and proceeds to the next example:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
**********************************************************************
The `verbose` flag makes the test runner generate more detailed
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
If the `verbose` flag is unspecified, then the output will be verbose
iff `-v` appears in sys.argv:
>>> # Save the real sys.argv list.
>>> # If -v does not appear in sys.argv, then output isn't verbose.
>>> doctest.DocTestRunner().run(test)
>>> # If -v does appear in sys.argv, then output is verbose.
>>> sys.argv = ['test', '-v']
>>> doctest.DocTestRunner().run(test)
In the remaining examples, the test runner's verbosity will be
explicitly set, to ensure that the test behavior is consistent.
Tests of `DocTestRunner`'s exception handling.
An expected exception is specified with a traceback message. The
lines between the first line and the type/value may be omitted or
replaced with any other string:
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
An example may not generate output before it raises an exception; if
it does, then the traceback message will not be recognized as
signaling an expected exception, so the example will be reported as an
... >>> print 'pre-exception output', x/0
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
print 'pre-exception output', x/0
ZeroDivisionError: integer division or modulo by zero
Exception messages may contain newlines:
... >>> raise ValueError, 'multi\nline\nmessage'
... Traceback (most recent call last):
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
If an exception is expected, but an exception with the wrong type or
message is raised, then it is reported as a failure:
... >>> raise ValueError, 'message'
... Traceback (most recent call last):
... ValueError: wrong message
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
raise ValueError, 'message'
Traceback (most recent call last):
ValueError: wrong message
Traceback (most recent call last):
However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the
... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... ValueError: wrong message
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type:
... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... TypeError: wrong type
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
Traceback (most recent call last):
If an exception is raised but not expected, then it is reported as an
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
Traceback (most recent call last):
ZeroDivisionError: integer division or modulo by zero
Tests of `DocTestRunner`'s option flag handling.
Several option flags can be used to customize the behavior of the test
runner. These are defined as module constants in doctest, and passed
to the DocTestRunner constructor (multiple constants should be or-ed
The DONT_ACCEPT_TRUE_FOR_1 flag disables matches between True/False
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
**********************************************************************
The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines
and the '<BLANKLINE>' marker:
... '>>> print "a\\n\\nb"\na\n<BLANKLINE>\nb\n'
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_BLANKLINE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
**********************************************************************
The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be
... '>>> print 1, 2, 3\n 1 2\n 3'
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.NORMALIZE_WHITESPACE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
An example from the docs:
>>> print range(20) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
The ELLIPSIS flag causes ellipsis marker ("...") in the expected
output to match any substring in the actual output:
... '>>> print range(15)\n[0, 1, 2, ..., 14]\n'
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.ELLIPSIS
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... also matches nothing:
... print i**2, #doctest: +ELLIPSIS
0 1...4...9 16 ... 36 49 64 ... 9801
... can be surprising; e.g., this test passes:
>>> for i in range(21): #doctest: +ELLIPSIS
>>> print range(20) # doctest:+ELLIPSIS
>>> print range(20) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
The REPORT_UDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a unified diff:
... >>> print '\n'.join('abcdefg')
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
print '\n'.join('abcdefg')
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_UDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
**********************************************************************
print '\n'.join('abcdefg')
Differences (unified diff with -expected +actual):
The REPORT_CDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a context diff:
>>> # Reuse f() from the REPORT_UDIFF example, above.
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_CDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
**********************************************************************
print '\n'.join('abcdefg')
Differences (context diff with expected followed by actual):
The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm
used by the popular ndiff.py utility. This does intraline difference
marking, as well as interline differences.
... >>> print "a b c d e f g h i j k l m"
... a b c d e f g h i j k 1 m
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_NDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
**********************************************************************
print "a b c d e f g h i j k l m"
Differences (ndiff with -expected +actual):
- a b c d e f g h i j k 1 m
+ a b c d e f g h i j k l m
The REPORT_ONLY_FIRST_FAILURE supresses result output after the first
... >>> print 1 # first success
... >>> print 2 # first failure
... >>> print 3 # second failure
... >>> print 4 # second success
... >>> print 5 # third failure
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
**********************************************************************
However, output from `report_start` is not supressed:
>>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test)
**********************************************************************
For the purposes of REPORT_ONLY_FIRST_FAILURE, unexpected exceptions
... >>> print 1 # first success
... >>> raise ValueError(2) # first failure
... >>> print 3 # second failure
... >>> print 4 # second success
... >>> print 5 # third failure
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
**********************************************************************
raise ValueError(2) # first failure
def option_directives(): r
"""
Tests of `DocTestRunner`'s option directive mechanism.
Option directives can be used to turn option flags on or off for a
single example. To turn an option on for an example, follow that
example with a comment of the form ``# doctest: +OPTION``:
... >>> print range(10) # should fail: no ellipsis
... >>> print range(10) # doctest: +ELLIPSIS
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
print range(10) # should fail: no ellipsis
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
To turn an option off for an example, follow that example with a
comment of the form ``# doctest: -OPTION``:
... >>> # should fail: no ellipsis
... >>> print range(10) # doctest: -ELLIPSIS
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False,
... optionflags=doctest.ELLIPSIS).run(test)
**********************************************************************
print range(10) # doctest: -ELLIPSIS
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Option directives affect only the example that they appear with; they
do not change the options for surrounding examples:
... >>> print range(10) # Should fail: no ellipsis
... >>> print range(10) # doctest: +ELLIPSIS
... >>> print range(10) # Should fail: no ellipsis
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
print range(10) # Should fail: no ellipsis
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
**********************************************************************
print range(10) # Should fail: no ellipsis
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Multiple options may be modified by a single option directive. They
may be separated by whitespace, commas, or both:
... >>> print range(10) # Should fail
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
print range(10) # Should fail
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
... >>> print range(10) # Should fail
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
print range(10) # Should fail
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
... >>> print range(10) # Should fail
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
**********************************************************************
print range(10) # Should fail
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
The option directive may be put on the line following the source, as
long as a continuation prompt is used:
... ... # doctest: +ELLIPSIS
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
For examples with multi-line source, the option directive may appear
... >>> for x in range(10): # doctest: +ELLIPSIS
... >>> for x in range(10):
... ... print x, # doctest: +ELLIPSIS
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
If more than one line of an example with multi-line source has an
option directive, then they are combined:
... Should fail (option directive not on the last line):
... >>> for x in range(10): # doctest: +ELLIPSIS
... ... print x, # doctest: +NORMALIZE_WHITESPACE
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
It is an error to have a comment of the form ``# doctest:`` that is
*not* followed by words of the form ``+OPTION`` or ``-OPTION``, where
``OPTION`` is an option that has been registered with
>>> # Error: Option not registered
>>> s = '>>> print 12 #doctest: +BADOPTION'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: '+BADOPTION'
>>> # Error: No + or - prefix
>>> s = '>>> print 12 #doctest: ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: 'ELLIPSIS'
It is an error to use an option directive on a line that contains no
>>> s = '>>> # doctest: +ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 0 of the doctest for s has an option directive on a line with no example: '# doctest: +ELLIPSIS'
def test_testsource(): r
"""
Unit tests for `testsource()`.
The testsource() function takes a module and a name, finds the (first)
test with that name in that module, and converts it to a script. The
example code is converted to regular Python code. The surrounding
words and expected output are converted to comments:
>>> import test.test_doctest
>>> name = 'test.test_doctest.sample_func'
>>> print doctest.testsource(test.test_doctest, name)
>>> name = 'test.test_doctest.SampleNewStyleClass'
>>> print doctest.testsource(test.test_doctest, name)
>>> name = 'test.test_doctest.SampleClass.a_classmethod'
>>> print doctest.testsource(test.test_doctest, name)
print SampleClass.a_classmethod(10)
print SampleClass(0).a_classmethod(10)
Create a docstring that we want to debug:
Create some fake stdin input, to feed to the debugger:
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput(['next', 'print x', 'continue'])
Run the debugger on the docstring, and then restore sys.stdin.
>>> try: doctest.debug_src(s)
... finally: sys.stdin = real_stdin
def test_pdb_set_trace():
"""Using pdb.set_trace from a doctest.
You can use pdb.set_trace from a doctest. To do so, you must
retrieve the set_trace function from the pdb module at the time
you use it. The doctest module changes sys.stdout so that it can
capture program output. It also temporarily replaces pdb.set_trace
with a version that restores stdout. This is necessary for you to
... >>> import pdb; pdb.set_trace()
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(doc, {}, "foo", "foo.py", 0)
>>> runner = doctest.DocTestRunner(verbose=False)
To demonstrate this, we'll create a fake standard input that
captures our debugger input:
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print x', # print data defined by the example
... 'continue', # stop debugging
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
> <doctest foo[1]>(1)?()->None
-> import pdb; pdb.set_trace()
You can also put pdb.set_trace in a function called from a test:
>>> def calls_set_trace():
... import pdb; pdb.set_trace()
... >>> calls_set_trace()
>>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print y', # print data defined in the function
... 'up', # out of function
... 'print x', # print data defined by the example
... 'continue', # stop debugging
... sys.stdin = real_stdin
> <doctest test.test_doctest.test_pdb_set_trace[8]>(3)calls_set_trace()->None
-> import pdb; pdb.set_trace()
During interactive debugging, source code is shown, even for
... ... import pdb; pdb.set_trace()
>>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'list', # list source from example 2
... 'next', # return from g()
... 'list', # list source from example 1
... 'next', # return from f()
... 'list', # list source from example 3
... 'continue', # stop debugging
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
> <doctest foo[1]>(3)g()->None
-> import pdb; pdb.set_trace()
3 -> import pdb; pdb.set_trace()
> <doctest foo[0]>(2)f()->None
> <doctest foo[2]>(1)?()->None
**********************************************************************
File "foo.py", line 7, in foo
def test_pdb_set_trace_nested():
"""This illustrates more-demanding use of set_trace with nested functions.
... def calls_set_trace(self):
... import pdb; pdb.set_trace()
>>> calls_set_trace = C().calls_set_trace
... >>> calls_set_trace()
>>> parser = doctest.DocTestParser()
>>> runner = doctest.DocTestRunner(verbose=False)
>>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print y', # print data defined in the function
... 'step', 'step', 'step', 'step', 'step', 'step', 'print z',
... 'continue', # stop debugging
... sys.stdin = real_stdin
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(7)f1()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(8)f1()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(11)f2()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(12)f2()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(13)f2()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
*** NameError: name 'foo' is not defined
"""DocTestSuite creates a unittest test suite from a doctest.
We create a Suite by providing a module. A module can be provided
by passing a module object:
>>> import test.sample_doctest
>>> suite = doctest.DocTestSuite(test.sample_doctest)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=4>
We can also supply the module by name:
>>> suite = doctest.DocTestSuite('test.sample_doctest')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=4>
We can use the current module:
>>> suite = test.sample_doctest.test_suite()
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=4>
We can supply global variables. If we pass globs, they will be
used instead of the module globals. Here we'll pass an empty
globals, triggering an extra error:
>>> suite = doctest.DocTestSuite('test.sample_doctest', globs={})
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=5>
Alternatively, we can provide extra globals. Here we'll make an
error go away by providing an extra global variable:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=3>
You can pass option flags. Here we'll cause an extra error
by disabling the blank-line feature:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=5>
You can supply setUp and tearDown functions:
... import test.test_doctest
... test.test_doctest.sillySetup = True
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=3>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects. Here
we'll use the setUp function to supply the missing variable y:
>>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=3>
Here, we didn't need to use a tearDown function because we
modified the test globals, which are a copy of the
sample_doctest module dictionary. The test globals are
automatically cleared for us after a test.
Finally, you can provide an alternate test finder. Here we'll
use a custom test_finder to to run just the test named bar.
However, the test in the module docstring, and the two tests
in the module __test__ dict, aren't filtered, so we actually
run three tests besides bar's. The filtering mechanisms are
poorly conceived, and will go away someday.
>>> finder = doctest.DocTestFinder(
... _namefilter=lambda prefix, base: base!='bar')
>>> suite = doctest.DocTestSuite('test.sample_doctest',
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=4 errors=0 failures=1>
"""We can test tests found in text files using a DocFileSuite.
We create a suite by providing the names of one or more text
files that include examples:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=2 errors=0 failures=2>
The test files are looked for in the directory containing the
calling module. A package keyword argument can be provided to
specify a different relative location.
>>> suite = doctest.DocFileSuite('test_doctest.txt',
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=2 errors=0 failures=2>
'/' should be used as a path separator. It will be converted
to a native separator at run time:
>>> suite = doctest.DocFileSuite('../test/test_doctest.txt')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=1 errors=0 failures=1>
If DocFileSuite is used from an interactive session, then files
are resolved relative to the directory of sys.argv[0]:
>>> import new, os.path, test.test_doctest
>>> sys.argv = [test.test_doctest.__file__]
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... package=new.module('__main__'))
By setting `module_relative=False`, os-specific paths may be
used (including absolute paths and paths relative to the
>>> # Get the absolute path of the test package.
>>> test_doctest_path = os.path.abspath(test.test_doctest.__file__)
>>> test_pkg_path = os.path.split(test_doctest_path)[0]
>>> # Use it to find the absolute path of test_doctest.txt.
>>> test_file = os.path.join(test_pkg_path, 'test_doctest.txt')
>>> suite = doctest.DocFileSuite(test_file, module_relative=False)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=1 errors=0 failures=1>
It is an error to specify `package` when `module_relative=False`:
>>> suite = doctest.DocFileSuite(test_file, module_relative=False,
Traceback (most recent call last):
ValueError: Package may only be specified for module-relative paths.
You can specify initial global variables:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=2 errors=0 failures=1>
In this case, we supplied a missing favorite color. You can
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE,
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=2 errors=0 failures=2>
And, you can provide setUp and tearDown functions:
You can supply setUp and teatDoen functions:
... import test.test_doctest
... test.test_doctest.sillySetup = True
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=2 errors=0 failures=1>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects.
Here, we'll use a setUp function to set the favorite color in
... test.globs['favorite_color'] = 'blue'
>>> suite = doctest.DocFileSuite('test_doctest.txt', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=1 errors=0 failures=0>
Here, we didn't need to use a tearDown function because we
modified the test globals. The test globals are
automatically cleared for us after a test.
def test_trailing_space_in_test():
Trailing spaces in expected output are significant:
def test_unittest_reportflags():
"""Default unittest reporting flags can be set to control reporting
Here, we'll set the REPORT_ONLY_FIRST_FAILURE option so we see
only the first failure of each test. First, we'll look at the
output without the flag. The file test_doctest.txt file has two
tests. They both fail if blank lines are disabled:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Note that we see both failures displayed.
>>> old = doctest.set_unittest_reportflags(
... doctest.REPORT_ONLY_FIRST_FAILURE)
Now, when we run the test:
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
NameError: name 'favorite_color' is not defined
We get only the first failure.
If we give any reporting options when we set up the tests,
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF)
Then the default eporting options are ignored:
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Differences (ndiff with -expected +actual):
Test runners can restore the formatting flags after they run:
>>> ignored = doctest.set_unittest_reportflags(old)
def test_testfile(): r
"""
Tests for the `testfile()` function. This function runs all the
doctest examples in a given file. In its simple invokation, it is
called with the name of a file, which is taken to be relative to the
calling module. The return value is (#failures, #tests).
>>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
NameError: name 'favorite_color' is not defined
**********************************************************************
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
>>> doctest.master = None # Reset master.
(Note: we'll be clearing doctest.master after each call to
`doctest.testfile`, to supress warnings about multiple tests with the
Globals may be specified with the `globs` and `extraglobs` parameters:
>>> globs = {'favorite_color': 'blue'}
>>> doctest.testfile('test_doctest.txt', globs=globs)
>>> doctest.master = None # Reset master.
>>> extraglobs = {'favorite_color': 'red'}
>>> doctest.testfile('test_doctest.txt', globs=globs,
... extraglobs=extraglobs) # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
**********************************************************************
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
>>> doctest.master = None # Reset master.
The file may be made relative to a given module or package, using the
optional `module_relative` parameter:
>>> doctest.testfile('test_doctest.txt', globs=globs,
... module_relative='test')
>>> doctest.master = None # Reset master.
Verbosity can be increased with the optional `verbose` paremter:
>>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True)
1 items passed all tests:
2 tests in test_doctest.txt
>>> doctest.master = None # Reset master.
The name of the test may be specified with the optional `name`
>>> doctest.testfile('test_doctest.txt', name='newname')
**********************************************************************
File "...", line 6, in newname
>>> doctest.master = None # Reset master.
The summary report may be supressed with the optional `report`
>>> doctest.testfile('test_doctest.txt', report=False)
**********************************************************************
File "...", line 6, in test_doctest.txt
NameError: name 'favorite_color' is not defined
>>> doctest.master = None # Reset master.
The optional keyword argument `raise_on_error` can be used to raise an
exception on the first error (which may be useful for postmortem
>>> doctest.testfile('test_doctest.txt', raise_on_error=True)
Traceback (most recent call last):
>>> doctest.master = None # Reset master.
# old_test1, ... used to live in doctest.py, but cluttered it. Note
# that these use the deprecated doctest.Tester, so should go away (or
# Ignore all warnings about the use of class Tester in this module.
# Note that the name of this module may differ depending on how it's
# imported, so the use of __name__ is important.
warnings
.filterwarnings("ignore", "class Tester", DeprecationWarning,
>>> from doctest import Tester
>>> t = Tester(globs={'x': 42}, verbose=0)
**********************************************************************
>>> t.runstring(">>> x = x * 2\n>>> print x\n84\n", 'example2')
**********************************************************************
***Test Failed*** 1 failures.
>>> t.summarize(verbose=1)
1 items passed all tests:
**********************************************************************
***Test Failed*** 1 failures.
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=1)
>>> t.runstring(test, "Example")
0 of 2 examples failed in string Example
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=0)
... '''Trivial docstring example.
>>> t.rundoc(_f) # expect 0 failures in 1 example
>>> m1 = new.module('_m1')
>>> m2 = new.module('_m2')
>>> exec test_data in m1.__dict__
>>> exec test_data in m2.__dict__
>>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H})
Tests that objects outside m1 are excluded:
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=0)
>>> t.rundict(m1.__dict__, "rundict_test", m1) # f2 and g2 and h2 skipped
Once more, not excluding stuff outside m1:
>>> t = Tester(globs={}, verbose=0)
>>> t.rundict(m1.__dict__, "rundict_test_pvt") # None are skipped.
The exclusion of objects from outside the designated module is
meant to be invoked automagically by testmod.
>>> doctest.testmod(m1, verbose=False)
######################################################################
######################################################################
# Check the doctest cases in doctest itself:
test_support
.run_doctest(doctest
, verbosity
=True)
# Check the doctest cases defined here:
from test
import test_doctest
test_support
.run_doctest(test_doctest
, verbosity
=True)
import trace
, sys
, re
, StringIO
def test_coverage(coverdir
):
tracer
= trace
.Trace(ignoredirs
=[sys
.prefix
, sys
.exec_prefix
,],
tracer
.run('reload(doctest); test_main()')
print 'Writing coverage results...'
r
.write_results(show_missing
=True, summary
=True,
if __name__
== '__main__':
test_coverage('/tmp/doctest.cover')