Skip to content

Commit

Permalink
Merge pull request #1853 from PyCQA/improve-coverage
Browse files Browse the repository at this point in the history
improve coverage
  • Loading branch information
asottile committed Jul 29, 2023
2 parents 7ef0350 + 4a47bab commit 15f4569
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 79 deletions.
26 changes: 0 additions & 26 deletions .coveragerc

This file was deleted.

9 changes: 9 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,15 @@ flake8.report =
[bdist_wheel]
universal = 1

[coverage:run]
source =
flake8
tests
plugins = covdefaults

[coverage:report]
fail_under = 97

[mypy]
check_untyped_defs = true
disallow_any_generics = true
Expand Down
4 changes: 2 additions & 2 deletions src/flake8/_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
import sys
import tokenize

if sys.version_info >= (3, 12):
if sys.version_info >= (3, 12): # pragma: >=3.12 cover
FSTRING_START = tokenize.FSTRING_START
FSTRING_MIDDLE = tokenize.FSTRING_MIDDLE
FSTRING_END = tokenize.FSTRING_END
else:
else: # pragma: <3.12 cover
FSTRING_START = FSTRING_MIDDLE = FSTRING_END = -1
87 changes: 36 additions & 51 deletions src/flake8/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import argparse
import ast
import functools
import logging
import tokenize
from typing import Any
Expand Down Expand Up @@ -114,31 +115,23 @@ def __init__(
self.verbose = options.verbose
#: Statistics dictionary
self.statistics = {"logical lines": 0}
self._file_tokens: list[tokenize.TokenInfo] | None = None
# map from line number to the line we'll search for `noqa` in
self._noqa_line_mapping: dict[int, str] | None = None
self._fstring_start = -1

@property
@functools.cached_property
def file_tokens(self) -> list[tokenize.TokenInfo]:
"""Return the complete set of tokens for a file."""
if self._file_tokens is None:
line_iter = iter(self.lines)
self._file_tokens = list(
tokenize.generate_tokens(lambda: next(line_iter))
)

return self._file_tokens
line_iter = iter(self.lines)
return list(tokenize.generate_tokens(lambda: next(line_iter)))

def fstring_start(self, lineno: int) -> None:
def fstring_start(self, lineno: int) -> None: # pragma: >=3.12 cover
"""Signal the beginning of an fstring."""
self._fstring_start = lineno

def multiline_string(
self, token: tokenize.TokenInfo
) -> Generator[str, None, None]:
"""Iterate through the lines of a multiline string."""
if token.type == FSTRING_END:
if token.type == FSTRING_END: # pragma: >=3.12 cover
start = self._fstring_start
else:
start = token.start[0]
Expand Down Expand Up @@ -209,7 +202,7 @@ def build_logical_line_tokens(self) -> _Logical: # noqa: C901
continue
if token_type == tokenize.STRING:
text = mutate_string(text)
elif token_type == FSTRING_MIDDLE:
elif token_type == FSTRING_MIDDLE: # pragma: >=3.12 cover
text = "x" * len(text)
if previous_row:
(start_row, start_column) = start
Expand Down Expand Up @@ -277,41 +270,37 @@ def _noqa_line_range(self, min_line: int, max_line: int) -> dict[int, str]:
joined = "".join(self.lines[min_line - 1 : max_line])
return dict.fromkeys(line_range, joined)

def noqa_line_for(self, line_number: int) -> str | None:
"""Retrieve the line which will be used to determine noqa."""
if self._noqa_line_mapping is None:
try:
file_tokens = self.file_tokens
except (tokenize.TokenError, SyntaxError):
# if we failed to parse the file tokens, we'll always fail in
# the future, so set this so the code does not try again
self._noqa_line_mapping = {}
else:
ret = {}

min_line = len(self.lines) + 2
max_line = -1
for tp, _, (s_line, _), (e_line, _), _ in file_tokens:
if tp == tokenize.ENDMARKER:
break

min_line = min(min_line, s_line)
max_line = max(max_line, e_line)

if tp in (tokenize.NL, tokenize.NEWLINE):
ret.update(self._noqa_line_range(min_line, max_line))

min_line = len(self.lines) + 2
max_line = -1

# in newer versions of python, a `NEWLINE` token is inserted
# at the end of the file even if it doesn't have one.
# on old pythons, they will not have hit a `NEWLINE`
if max_line != -1:
@functools.cached_property
def _noqa_line_mapping(self) -> dict[int, str]:
"""Map from line number to the line we'll search for `noqa` in."""
try:
file_tokens = self.file_tokens
except (tokenize.TokenError, SyntaxError):
# if we failed to parse the file tokens, we'll always fail in
# the future, so set this so the code does not try again
return {}
else:
ret = {}

min_line = len(self.lines) + 2
max_line = -1
for tp, _, (s_line, _), (e_line, _), _ in file_tokens:
if tp == tokenize.ENDMARKER or tp == tokenize.DEDENT:
continue

min_line = min(min_line, s_line)
max_line = max(max_line, e_line)

if tp in (tokenize.NL, tokenize.NEWLINE):
ret.update(self._noqa_line_range(min_line, max_line))

self._noqa_line_mapping = ret
min_line = len(self.lines) + 2
max_line = -1

return ret

def noqa_line_for(self, line_number: int) -> str | None:
"""Retrieve the line which will be used to determine noqa."""
# NOTE(sigmavirus24): Some plugins choose to report errors for empty
# files on Line 1. In those cases, we shouldn't bother trying to
# retrieve a physical line (since none exist).
Expand Down Expand Up @@ -377,12 +366,8 @@ def strip_utf_bom(self) -> None:
# If we have nothing to analyze quit early
return

first_byte = ord(self.lines[0][0])
if first_byte not in (0xEF, 0xFEFF):
return

# If the first byte of the file is a UTF-8 BOM, strip it
if first_byte == 0xFEFF:
if self.lines[0][:1] == "\uFEFF":
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == "\xEF\xBB\xBF":
self.lines[0] = self.lines[0][3:]
Expand Down
1 change: 1 addition & 0 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ envlist = py,flake8,linters,docs
deps =
pytest!=3.0.5,!=5.2.3
coverage>=6
covdefaults
commands =
coverage run -m pytest {posargs}
coverage report
Expand Down

0 comments on commit 15f4569

Please sign in to comment.