Skip to content

Commit

Permalink
Merge pull request #3444 from Zac-HD/annotate-internals
Browse files Browse the repository at this point in the history
Skip uninformative locations in explain mode, misc internal cleanups
  • Loading branch information
Zac-HD committed Aug 20, 2022
2 parents 45b6484 + 90c4b7b commit d319af6
Show file tree
Hide file tree
Showing 15 changed files with 124 additions and 129 deletions.
5 changes: 5 additions & 0 deletions hypothesis-python/RELEASE.rst
@@ -0,0 +1,5 @@
RELEASE_TYPE: patch

This patch fixes some type annotations for Python 3.9 and earlier (:issue:`3397`),
and teaches :ref:`explain mode <phases>` about certain locations it should not
bother reporting (:issue:`3439`).
3 changes: 1 addition & 2 deletions hypothesis-python/src/hypothesis/core.py
Expand Up @@ -881,8 +881,7 @@ def run_engine(self):
errors_to_report.append((fragments, err))
except BaseException as e:
# If we have anything for explain-mode, this is the time to report.
for line in explanations[falsifying_example.interesting_origin]:
fragments.append(line)
fragments.extend(explanations[falsifying_example.interesting_origin])
errors_to_report.append(
(fragments, e.with_traceback(get_trimmed_traceback()))
)
Expand Down
13 changes: 13 additions & 0 deletions hypothesis-python/src/hypothesis/internal/compat.py
Expand Up @@ -22,6 +22,19 @@
BaseExceptionGroup as BaseExceptionGroup,
ExceptionGroup as ExceptionGroup,
)
if typing.TYPE_CHECKING: # pragma: no cover
from typing_extensions import Concatenate as Concatenate, ParamSpec as ParamSpec
else:
try:
from typing import Concatenate as Concatenate, ParamSpec as ParamSpec
except ImportError:
try:
from typing_extensions import (
Concatenate as Concatenate,
ParamSpec as ParamSpec,
)
except ImportError:
Concatenate, ParamSpec = None, None

PYPY = platform.python_implementation() == "PyPy"
WINDOWS = platform.system() == "Windows"
Expand Down
53 changes: 20 additions & 33 deletions hypothesis-python/src/hypothesis/internal/conjecture/shrinker.py
Expand Up @@ -278,8 +278,8 @@ def __init__(self, engine, initial, predicate, allow_transition):

# We keep track of the current best example on the shrink_target
# attribute.
self.shrink_target = None
self.update_shrink_target(initial)
self.shrink_target = initial
self.clear_change_tracking()
self.shrinks = 0

# We terminate shrinks that seem to have reached their logical
Expand Down Expand Up @@ -447,23 +447,15 @@ def s(n):
return "s" if n != 1 else ""

total_deleted = self.initial_size - len(self.shrink_target.buffer)

self.debug("---------------------")
self.debug("Shrink pass profiling")
self.debug("---------------------")
self.debug("")
calls = self.engine.call_count - self.initial_calls

self.debug(
"Shrinking made a total of %d call%s "
"of which %d shrank. This deleted %d byte%s out of %d."
% (
calls,
s(calls),
self.shrinks,
total_deleted,
s(total_deleted),
self.initial_size,
)
"---------------------\n"
"Shrink pass profiling\n"
"---------------------\n\n"
f"Shrinking made a total of {calls} call{s(calls)} of which "
f"{self.shrinks} shrank. This deleted {total_deleted} bytes out "
f"of {self.initial_size}."
)
for useful in [True, False]:
self.debug("")
Expand Down Expand Up @@ -828,22 +820,17 @@ def __changed_blocks(self):

def update_shrink_target(self, new_target):
assert isinstance(new_target, ConjectureResult)
if self.shrink_target is not None:
self.shrinks += 1
# If we are just taking a long time to shrink we don't want to
# trigger this heuristic, so whenever we shrink successfully
# we give ourselves a bit of breathing room to make sure we
# would find a shrink that took that long to find the next time.
# The case where we're taking a long time but making steady
# progress is handled by `finish_shrinking_deadline` in engine.py
self.max_stall = max(
self.max_stall, (self.calls - self.calls_at_last_shrink) * 2
)
self.calls_at_last_shrink = self.calls
else:
self.__all_changed_blocks = set()
self.__last_checked_changed_at = new_target

self.shrinks += 1
# If we are just taking a long time to shrink we don't want to
# trigger this heuristic, so whenever we shrink successfully
# we give ourselves a bit of breathing room to make sure we
# would find a shrink that took that long to find the next time.
# The case where we're taking a long time but making steady
# progress is handled by `finish_shrinking_deadline` in engine.py
self.max_stall = max(
self.max_stall, (self.calls - self.calls_at_last_shrink) * 2
)
self.calls_at_last_shrink = self.calls
self.shrink_target = new_target
self.__derived_values = {}

Expand Down
4 changes: 0 additions & 4 deletions hypothesis-python/src/hypothesis/internal/reflection.py
Expand Up @@ -64,10 +64,6 @@ def function_digest(function):
hasher.update(function.__name__.encode())
except AttributeError:
pass
try:
hasher.update(function.__module__.__name__.encode())
except AttributeError:
pass
try:
# We prefer to use the modern signature API, but left this for compatibility.
# While we don't promise stability of the database, there's no advantage to
Expand Down
32 changes: 25 additions & 7 deletions hypothesis-python/src/hypothesis/internal/scrutineer.py
Expand Up @@ -12,6 +12,7 @@
from collections import defaultdict
from functools import lru_cache, reduce
from itertools import groupby
from os import sep
from pathlib import Path

from hypothesis._settings import Phase, Verbosity
Expand Down Expand Up @@ -45,6 +46,20 @@ def trace(self, frame, event, arg):
self._previous_location = current_location


UNHELPFUL_LOCATIONS = (
# There's a branch which is only taken when an exception is active while exiting
# a contextmanager; this is probably after the fault has been triggered.
# Similar reasoning applies to a few other standard-library modules: even
# if the fault was later, these still aren't useful locations to report!
f"{sep}contextlib.py",
f"{sep}inspect.py",
f"{sep}re.py",
f"{sep}re{sep}__init__.py", # refactored in Python 3.11
# Quite rarely, the first AFNP line is in Pytest's assertion-rewriting module.
f"{sep}_pytest{sep}assertion{sep}rewrite.py",
)


def get_explaining_locations(traces):
# Traces is a dict[interesting_origin | None, set[frozenset[tuple[str, int]]]]
# Each trace in the set might later become a Counter instead of frozenset.
Expand Down Expand Up @@ -84,21 +99,25 @@ def get_explaining_locations(traces):
else:
queue.update(cf_graphs[origin][src] - seen)

return explanations
# The last step is to filter out explanations that we know would be uninformative.
# When this is the first AFNP location, we conclude that Scrutineer missed the
# real divergence (earlier in the trace) and drop that unhelpful explanation.
return {
origin: {loc for loc in afnp_locs if not loc[0].endswith(UNHELPFUL_LOCATIONS)}
for origin, afnp_locs in explanations.items()
}


LIB_DIR = str(Path(sys.executable).parent / "lib")
EXPLANATION_STUB = (
"Explanation:",
" These lines were always and only run by failing examples:",
)
HAD_TRACE = " We didn't try to explain this, because sys.gettrace()="


def make_report(explanations, cap_lines_at=5):
report = defaultdict(list)
for origin, locations in explanations.items():
assert locations # or else we wouldn't have stored the key, above.
report_lines = [
" {}:{}".format(k, ", ".join(map(str, sorted(l for _, l in v))))
for k, v in groupby(locations, lambda kv: kv[0])
Expand All @@ -107,15 +126,14 @@ def make_report(explanations, cap_lines_at=5):
if len(report_lines) > cap_lines_at + 1:
msg = " (and {} more with settings.verbosity >= verbose)"
report_lines[cap_lines_at:] = [msg.format(len(report_lines[cap_lines_at:]))]
report[origin] = list(EXPLANATION_STUB) + report_lines
if report_lines: # We might have filtered out every location as uninformative.
report[origin] = list(EXPLANATION_STUB) + report_lines
return report


def explanatory_lines(traces, settings):
if Phase.explain in settings.phases and sys.gettrace() and not traces:
return defaultdict(
lambda: [EXPLANATION_STUB[0], HAD_TRACE + repr(sys.gettrace())]
)
return defaultdict(list)
# Return human-readable report lines summarising the traces
explanations = get_explaining_locations(traces)
max_lines = 5 if settings.verbosity <= Verbosity.normal else 100
Expand Down
17 changes: 8 additions & 9 deletions hypothesis-python/src/hypothesis/strategies/_internal/core.py
Expand Up @@ -49,7 +49,14 @@
from hypothesis.errors import InvalidArgument, ResolutionFailed
from hypothesis.internal.cathetus import cathetus
from hypothesis.internal.charmap import as_general_categories
from hypothesis.internal.compat import ceil, floor, get_type_hints, is_typed_named_tuple
from hypothesis.internal.compat import (
Concatenate,
ParamSpec,
ceil,
floor,
get_type_hints,
is_typed_named_tuple,
)
from hypothesis.internal.conjecture.utils import (
calc_label_from_cls,
check_sample,
Expand Down Expand Up @@ -122,14 +129,6 @@
except ImportError: # < py3.8
Protocol = object # type: ignore[assignment]

try:
from typing import Concatenate, ParamSpec
except ImportError:
try:
from typing_extensions import Concatenate, ParamSpec
except ImportError:
ParamSpec = None # type: ignore


@cacheable
@defines_strategy()
Expand Down
21 changes: 7 additions & 14 deletions hypothesis-python/tests/common/setup.py
Expand Up @@ -12,7 +12,7 @@
from tempfile import mkdtemp
from warnings import filterwarnings

from hypothesis import Verbosity, settings
from hypothesis import Phase, Verbosity, settings
from hypothesis._settings import not_set
from hypothesis.configuration import set_hypothesis_home_dir
from hypothesis.errors import NonInteractiveExampleWarning
Expand All @@ -25,10 +25,7 @@ def run():
filterwarnings("ignore", category=ImportWarning)
filterwarnings("ignore", category=FutureWarning, module="pandas._version")

# Fixed in recent versions but allowed by pytest=3.0.0; see #1630
filterwarnings("ignore", category=DeprecationWarning, module="pluggy")

# See https://github.com/numpy/numpy/pull/432
# See https://github.com/numpy/numpy/pull/432; still a thing as of 2022.
filterwarnings("ignore", message="numpy.dtype size changed")
filterwarnings("ignore", message="numpy.ufunc size changed")

Expand All @@ -42,14 +39,6 @@ def run():
category=UserWarning,
)

# Imported by Pandas in version 1.9, but fixed in later versions.
filterwarnings(
"ignore", message="Importing from numpy.testing.decorators is deprecated"
)
filterwarnings(
"ignore", message="Importing from numpy.testing.nosetester is deprecated"
)

# User-facing warning which does not apply to our own tests
filterwarnings("ignore", category=NonInteractiveExampleWarning)

Expand Down Expand Up @@ -77,7 +66,11 @@ def run():
)

settings.register_profile(
"default", settings(max_examples=20 if IN_COVERAGE_TESTS else not_set)
"default",
settings(
max_examples=20 if IN_COVERAGE_TESTS else not_set,
phases=list(Phase), # Dogfooding the explain phase
),
)

settings.register_profile("speedy", settings(max_examples=5))
Expand Down
3 changes: 2 additions & 1 deletion hypothesis-python/tests/cover/test_phases.py
Expand Up @@ -11,6 +11,7 @@
import pytest

from hypothesis import Phase, example, given, settings, strategies as st
from hypothesis._settings import all_settings
from hypothesis.database import ExampleDatabase, InMemoryExampleDatabase
from hypothesis.errors import InvalidArgument

Expand Down Expand Up @@ -47,7 +48,7 @@ def test_sorts_and_dedupes_phases(arg, expected):


def test_phases_default_to_all_except_explain():
assert settings().phases + (Phase.explain,) == tuple(Phase)
assert all_settings["phases"].default + (Phase.explain,) == tuple(Phase)


def test_does_not_reuse_saved_examples_if_reuse_not_in_phases():
Expand Down
46 changes: 0 additions & 46 deletions hypothesis-python/tests/cover/test_scrutineer.py

This file was deleted.

2 changes: 1 addition & 1 deletion hypothesis-python/tests/cover/test_settings.py
Expand Up @@ -464,7 +464,7 @@ def __repr__(self):


def test_show_changed():
s = settings(max_examples=999, database=None)
s = settings(max_examples=999, database=None, phases=tuple(Phase)[:-1])
assert s.show_changed() == "database=None, max_examples=999"


Expand Down

0 comments on commit d319af6

Please sign in to comment.