Skip to content

Commit

Permalink
remove cancelled training sidebar
Browse files Browse the repository at this point in the history
Add link to technical aspects issue to the py27-py34 docs

Update pdb++ link (moved to GitHub)

Add pudb to project examples

Casting fixture parameter to list at the beginning of parameter parsing.

Added changelog file.

Fixed linting.

Always creating list for consistency.

Co-Authored-By: Bruno Oliveira <nicoddemus@gmail.com>

Workaround curl bug which makes retries of fetching codecov.io/bash not work

Port CHANGELOG from 4.6.6 release

changelog: pytest-dev#5523 was fixed in 5.0.1 already

Ref: pytest-dev#5952 (comment)

doc: caplog: add caplog.messages

Add missing version added/changed markers to docs

Notice some features since 5.0 were not being properly
marked in which version they have been added/changed.

tests: keep numpy being optional

Ref: pytest-dev#5950 (comment)

ci: Travis: move py37-pexpect to another job

It does not have to run all tests again by itself.

Remove redundant mention from 5.2.0 release notes.

Fix plurality mismatch for 'warnings' and 'error' in terminal summary

Make sure plurality match is respected in rest of codebase

Fix collection message

Add changelog entry

Update tests

Change keys so by default they're plural and rename _match_plurality to _make_plural

Fix rebase

Fix rebase

Fix rebase
  • Loading branch information
obestwalter authored and Marco Gorelli committed Oct 18, 2019
1 parent 914a946 commit b91db0d
Show file tree
Hide file tree
Showing 18 changed files with 43 additions and 33 deletions.
1 change: 1 addition & 0 deletions AUTHORS
Expand Up @@ -160,6 +160,7 @@ Manuel Krebber
Marc Schlaich
Marcelo Duarte Trevisani
Marcin Bachry
Marco Gorelli
Mark Abramowitz
Markus Unterwaditzer
Martijn Faassen
Expand Down
1 change: 1 addition & 0 deletions changelog/5990.improvement.rst
@@ -0,0 +1 @@
Fix plurality mismatch in test summary (e.g. display "1 error" instead of "1 errors").
2 changes: 1 addition & 1 deletion src/_pytest/helpconfig.py
Expand Up @@ -201,7 +201,7 @@ def showhelp(config):
"with the '-v' option"
)

for warningreport in reporter.stats.get("warnings", []):
for warningreport in reporter.stats.get("warning", []):
tw.line("warning : " + warningreport.message, red=True)
return

Expand Down
5 changes: 4 additions & 1 deletion src/_pytest/main.py
Expand Up @@ -246,7 +246,10 @@ def pytest_collection(session):

def pytest_runtestloop(session):
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
raise session.Interrupted(
"%d error%s during collection"
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
)

if session.config.option.collectonly:
return True
Expand Down
19 changes: 12 additions & 7 deletions src/_pytest/terminal.py
Expand Up @@ -352,7 +352,7 @@ def pytest_warning_captured(self, warning_message, item):
# from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str

warnings = self.stats.setdefault("warnings", [])
warnings = self.stats.setdefault("warning", [])
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)

Expand Down Expand Up @@ -529,7 +529,7 @@ def report_collect(self, final=False):
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d errors" % errors
line += " / %d error%s" % (errors, "s" if errors != 1 else "")
if deselected:
line += " / %d deselected" % deselected
if skipped:
Expand Down Expand Up @@ -746,7 +746,7 @@ def getreports(self, name):

def summary_warnings(self):
if self.hasopt("w"):
all_warnings = self.stats.get("warnings")
all_warnings = self.stats.get("warning")
if not all_warnings:
return

Expand Down Expand Up @@ -1007,9 +1007,15 @@ def _folded_skips(skipped):
return values


def _make_plural(count, key):
if key in ["error", "warning"] and count != 1:
key += "s"
return count, key


def build_summary_stats_line(stats):
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
"failed passed skipped deselected xfailed xpassed warning error".split()
)
unknown_type_seen = False
for found_type in stats:
Expand All @@ -1024,16 +1030,15 @@ def build_summary_stats_line(stats):
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
parts.append("%d %s" % (count, key))

parts.append("%d %s" % (_make_plural(count, key)))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"

if "failed" in stats or "error" in stats:
color = "red"
elif "warnings" in stats or unknown_type_seen:
elif "warning" in stats or unknown_type_seen:
color = "yellow"
elif "passed" in stats:
color = "green"
Expand Down
4 changes: 2 additions & 2 deletions testing/acceptance_test.py
Expand Up @@ -628,7 +628,7 @@ def test_pyargs_importerror(self, testdir, monkeypatch):
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret != 0

result.stdout.fnmatch_lines(["collected*0*items*/*1*errors"])
result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])

def test_pyargs_only_imported_once(self, testdir):
pkg = testdir.mkpydir("foo")
Expand Down Expand Up @@ -951,7 +951,7 @@ def test_with_failing_collection(self, testdir):
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"])
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
result.stdout.no_fnmatch_line("*duration*")
Expand Down
2 changes: 1 addition & 1 deletion testing/python/collect.py
Expand Up @@ -1167,7 +1167,7 @@ def test_real():
[
"*collected 1 item*",
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
"*1 passed, 1 warnings in *",
"*1 passed, 1 warning in *",
]
)

Expand Down
2 changes: 1 addition & 1 deletion testing/python/fixtures.py
Expand Up @@ -3081,7 +3081,7 @@ def test_3():
*KeyError*
*ERROR*teardown*test_2*
*KeyError*
*3 pass*2 error*
*3 pass*2 errors*
"""
)

Expand Down
2 changes: 1 addition & 1 deletion testing/test_assertrewrite.py
Expand Up @@ -119,7 +119,7 @@ def test_dont_rewrite_plugin(self, testdir):
}
testdir.makepyfile(**contents)
result = testdir.runpytest_subprocess()
assert "warnings" not in "".join(result.outlines)
assert "warning" not in "".join(result.outlines)

def test_rewrites_plugin_as_a_package(self, testdir):
pkgdir = testdir.mkpydir("plugin")
Expand Down
2 changes: 1 addition & 1 deletion testing/test_capture.py
Expand Up @@ -452,7 +452,7 @@ def test_two(capfd, capsys):
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 error*",
"*2 errors*",
]
)

Expand Down
7 changes: 3 additions & 4 deletions testing/test_collection.py
Expand Up @@ -892,7 +892,7 @@ def test_continue_on_collection_errors(testdir):
assert res.ret == 1

res.stdout.fnmatch_lines(
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 errors*"]
)


Expand All @@ -908,8 +908,7 @@ def test_continue_on_collection_errors_maxfail(testdir):

res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1

res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"])


def test_fixture_scope_sibling_conftests(testdir):
Expand Down Expand Up @@ -1253,7 +1252,7 @@ def test_collector_respects_tbstyle(testdir):
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
" assert 0",
"AssertionError: assert 0",
"*! Interrupted: 1 errors during collection !*",
"*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
]
)
2 changes: 1 addition & 1 deletion testing/test_doctest.py
Expand Up @@ -334,7 +334,7 @@ def test_doctest_unex_importerror_with_module(self, testdir):
[
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
"*Interrupted: 1 errors during collection*",
"*Interrupted: 1 error during collection*",
]
)

Expand Down
2 changes: 1 addition & 1 deletion testing/test_mark.py
Expand Up @@ -891,7 +891,7 @@ def test():
result = testdir.runpytest(str(p1))
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 errors",
"collected 0 items / 1 error",
"* ERROR collecting test_parameterset_for_fail_at_collect.py *",
"Empty parameter set in 'test' at line 3",
"*= 1 error in *",
Expand Down
2 changes: 1 addition & 1 deletion testing/test_runner_xunit.py
Expand Up @@ -234,7 +234,7 @@ def test_function2(hello):
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
"*2 error*",
"*2 errors*",
]
)
result.stdout.no_fnmatch_line("*xyz43*")
Expand Down
2 changes: 1 addition & 1 deletion testing/test_skipping.py
Expand Up @@ -886,7 +886,7 @@ def test_func():
" syntax error",
markline,
"SyntaxError: invalid syntax",
"*1 pass*2 error*",
"*1 pass*2 errors*",
]
)

Expand Down
2 changes: 1 addition & 1 deletion testing/test_stepwise.py
Expand Up @@ -164,7 +164,7 @@ def test_stop_on_collection_errors(broken_testdir, broken_first):
if broken_first:
files.reverse()
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
result.stdout.fnmatch_lines("*errors during collection*")
result.stdout.fnmatch_lines("*error during collection*")


def test_xfail_handling(testdir):
Expand Down
7 changes: 4 additions & 3 deletions testing/test_terminal.py
Expand Up @@ -1236,7 +1236,7 @@ def test_failure():
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
"*== 1 failed, 1 warnings in *",
"*== 1 failed, 1 warning in *",
]
)
result.stdout.no_fnmatch_line("*None*")
Expand All @@ -1255,12 +1255,13 @@ def test_failure():
("red", "1 failed", {"failed": (1,)}),
("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),
("red", "1 error", {"error": (1,)}),
("red", "2 errors", {"error": (1, 2)}),
("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),
# (a status that's not known to the code)
("yellow", "1 weird", {"weird": (1,)}),
("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),
("yellow", "1 warnings", {"warnings": (1,)}),
("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}),
("yellow", "1 warning", {"warning": (1,)}),
("yellow", "1 passed, 1 warning", {"warning": (1,), "passed": (1,)}),
("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}),
# "Boring" statuses. These have no effect on the color of the summary
# line. Thus, if *every* test has a boring status, the summary line stays
Expand Down
12 changes: 6 additions & 6 deletions testing/test_warnings.py
Expand Up @@ -142,7 +142,7 @@ def test_func(fix):
[
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
"*test_unicode.py:7: UserWarning: \u6d4b\u8bd5*",
"* 1 passed, 1 warnings*",
"* 1 passed, 1 warning*",
]
)

Expand Down Expand Up @@ -201,7 +201,7 @@ def test_show_warning():
"""
)
result = testdir.runpytest("-W always" if default_config == "cmdline" else "")
result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warnings in *"])
result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warning in *"])


def test_non_string_warning_argument(testdir):
Expand All @@ -216,7 +216,7 @@ def test():
"""
)
result = testdir.runpytest("-W", "always")
result.stdout.fnmatch_lines(["*= 1 passed, 1 warnings in *"])
result.stdout.fnmatch_lines(["*= 1 passed, 1 warning in *"])


def test_filterwarnings_mark_registration(testdir):
Expand Down Expand Up @@ -302,7 +302,7 @@ def test_foo():
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
" *collection_warnings.py:3: UserWarning: collection warning",
' warnings.warn(UserWarning("collection warning"))',
"* 1 passed, 1 warnings*",
"* 1 passed, 1 warning*",
]
)

Expand Down Expand Up @@ -358,7 +358,7 @@ def test_bar():
[
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
"*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning",
"* 1 passed, 1 warnings *",
"* 1 passed, 1 warning *",
]
)

Expand Down Expand Up @@ -476,7 +476,7 @@ def test_hidden_by_mark(self, testdir):
[
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
"*test_hidden_by_mark.py:3: DeprecationWarning: collection",
"* 1 passed, 1 warnings*",
"* 1 passed, 1 warning*",
]
)

Expand Down

0 comments on commit b91db0d

Please sign in to comment.