# HG changeset patch
# User David Roe <roed.math@gmail.com>
# Date 1363323961 21600
# Node ID f147e60a82181792d3b8b59e90958b7c66dd50ff
# Parent e6706131e8949d0248e0ebb8df7232c3ff62a4e8
Add a "# require failure" flag for doctests.
diff --git a/doc/en/developer/conventions.rst b/doc/en/developer/conventions.rst
a
|
b
|
|
914 | 914 | ``todo: not implemented``, one can use the results of such a search to |
915 | 915 | direct further development on Sage. |
916 | 916 | |
| 917 | - Alternatively, you can mark a known bug as ``require failure``. |
| 918 | This way if the test ever starts working (due to some change |
| 919 | elsewhere in Sage), the test will fail. The ``require failure`` tag |
| 920 | may then be removed, and a later regression would be revealed. For |
| 921 | example:: |
| 922 | |
| 923 | sage: 1 + 1 # require failure: if this is ever 17 we want to know! |
| 924 | 17 |
| 925 | |
917 | 926 | - Some tests (hashing for example) behave differently on 32-bit and |
918 | 927 | 64-bit platforms. You can mark a line (generally the output) with |
919 | 928 | either ``# 32-bit`` or ``# 64-bit`` and the testing framework will |
diff --git a/sage/doctest/forker.py b/sage/doctest/forker.py
a
|
b
|
|
470 | 470 | if check(example.want, got, self.optionflags): |
471 | 471 | outcome = SUCCESS |
472 | 472 | |
| 473 | # If we wanted failure then an exception can still succeed |
| 474 | elif hasattr(example.want, 'require_failure') and example.want.require_failure: |
| 475 | if check(example.want, got, self.optionflags): |
| 476 | outcome = SUCCESS |
| 477 | |
473 | 478 | # The example raised an exception: check if it was expected. |
474 | 479 | else: |
475 | 480 | exc_info = sys.exc_info() |
diff --git a/sage/doctest/parsing.py b/sage/doctest/parsing.py
a
|
b
|
|
29 | 29 | find_sage_prompt = re.compile(r"^(\s*)sage: ", re.M) |
30 | 30 | find_sage_continuation = re.compile(r"^(\s*)\.\.\.\.:", re.M) |
31 | 31 | random_marker = re.compile('.*random', re.I) |
| 32 | expect_failure_marker = re.compile('.*require failure', re.I) |
32 | 33 | tolerance_pattern = re.compile(r'\b((?:abs(?:olute)?)|(?:rel(?:ative)?))? *?tol(?:erance)?\b( +[0-9.e+-]+)?') |
33 | 34 | backslash_replacer = re.compile(r"""(\s*)sage:(.*)\\\ * |
34 | 35 | \ *(((\.){4}:)|((\.){3}))?\ *""") |
… |
… |
|
144 | 145 | want = MarkedOutput(want).update(abs_tol=epsilon) |
145 | 146 | else: |
146 | 147 | raise RuntimeError |
| 148 | if expect_failure_marker.search(comment): |
| 149 | if not isinstance(want, MarkedOutput): |
| 150 | want = MarkedOutput(want) |
| 151 | want.update(require_failure=True) |
147 | 152 | return want |
148 | 153 | |
149 | 154 | def pre_hash(s): |
… |
… |
|
217 | 222 | 0.0500000000000000 |
218 | 223 | """ |
219 | 224 | random = False |
| 225 | require_failure = False |
220 | 226 | rel_tol = 0 |
221 | 227 | abs_tol = 0 |
222 | 228 | tol = 0 |
… |
… |
|
662 | 668 | -0.5 |
663 | 669 | sage: print "1.000009" # abs tol 1e-5 |
664 | 670 | 1.0 |
| 671 | |
| 672 | Check the ``require failure`` option:: |
| 673 | |
| 674 | sage: 1 + 1 # require failure |
| 675 | 17 |
| 676 | sage: 1 / 0 # require failure |
| 677 | Infinity |
665 | 678 | """ |
| 679 | ok = None |
666 | 680 | if isinstance(want, MarkedOutput): |
667 | 681 | if want.random: |
668 | 682 | return True |
… |
… |
|
676 | 690 | want_values = [float(g[0]) for g in float_regex.findall(want)] |
677 | 691 | got_values = [float(g[0]) for g in float_regex.findall(got)] |
678 | 692 | if len(want_values) != len(got_values): |
679 | | return False |
| 693 | ok = False |
680 | 694 | if not doctest.OutputChecker.check_output(self, |
681 | 695 | float_regex.sub('*', want), float_regex.sub('*', got), optionflags): |
682 | | return False |
683 | | return all(check_tol(*ab) for ab in zip(want_values, got_values)) |
684 | | ok = doctest.OutputChecker.check_output(self, want, got, optionflags) |
685 | | #sys.stderr.write(str(ok) + " want: " + repr(want) + " got: " + repr(got) + "\n") |
686 | | return ok |
| 696 | ok = False |
| 697 | else: |
| 698 | ok = all(check_tol(*ab) for ab in zip(want_values, got_values)) |
| 699 | if ok is None: |
| 700 | ok = doctest.OutputChecker.check_output(self, want, got, optionflags) |
| 701 | if isinstance(want, MarkedOutput) and want.require_failure: |
| 702 | return not ok |
| 703 | else: |
| 704 | return ok |
687 | 705 | |
688 | 706 | def output_difference(self, example, got, optionflags): |
689 | 707 | r""" |
… |
… |
|
850 | 868 | else: |
851 | 869 | diff += "Tolerance exceeded in %s of %s\n"%(len(fails), len(want_values)) |
852 | 870 | diff += "\n".join(fails[:3]) + "\n" |
| 871 | elif isinstance(want, MarkedOutput) and want.require_failure: |
| 872 | if got: |
| 873 | return 'Expected failure, but answer was correct.\nGot:\n%s'%(doctest._indent(got)) |
| 874 | else: |
| 875 | return 'Expected failure, but answer was correct.\nGot nothing\n' |
853 | 876 | return diff |
diff --git a/sage/doctest/reporting.py b/sage/doctest/reporting.py
a
|
b
|
|
401 | 401 | pass |
402 | 402 | else: |
403 | 403 | if self.controller.options.optional is not True: # if True we test all optional tags |
| 404 | untested = optionals.pop("not tested",0) + optionals.pop("not implemented",0) |
404 | 405 | tags = sorted(optionals.keys()) |
405 | | untested = optionals.pop("not tested",0) + optionals.pop("not implemented",0) |
406 | 406 | seen_other = False |
407 | 407 | for tag in tags: |
408 | 408 | nskipped = optionals[tag] |