From 79cc5c03767a1655e0d16d8421641fd463be8073 Mon Sep 17 00:00:00 2001 From: Yilei Yang Date: Mon, 6 Feb 2023 08:20:27 -0800 Subject: [PATCH] Rebase Pyink to https://github.com/psf/black/commit/226cbf0226ee3bc26972357ba54c36409e9a84ae. Noticeable style changes: 1. Parenthesize multiple context managers https://github.com/psf/black/pull/3489. The following style changes are temporarily disabled when `--preview` is used together with `--pyink`: 2. Format unicode escape sequences https://github.com/psf/black/pull/2916. 3. Parenthesize conditional expressions https://github.com/psf/black/pull/2278. PiperOrigin-RevId: 507485670 --- README.md | 5 + patches/pyink.patch | 133 +++++++++------ src/pyink/__init__.py | 49 +++++- src/pyink/linegen.py | 160 ++++++++++++++---- src/pyink/mode.py | 7 + src/pyink/nodes.py | 11 ++ src/pyink/strings.py | 44 ++++- src/pyink/trans.py | 30 ++++ tests/data/conditional_expression.py | 160 ++++++++++++++++++ .../data/fast/pep_572_do_not_remove_parens.py | 4 + tests/data/miscellaneous/linelength6.py | 5 + .../data/preview/format_unicode_escape_seq.py | 33 ++++ .../data/preview/long_strings__regression.py | 18 ++ tests/data/preview/prefer_rhs_split.py | 2 +- tests/data/preview/remove_await_parens.py | 7 + .../auto_detect/features_3_10.py | 35 ++++ .../auto_detect/features_3_11.py | 37 ++++ .../auto_detect/features_3_8.py | 30 ++++ .../auto_detect/features_3_9.py | 34 ++++ .../targeting_py38.py | 38 +++++ .../targeting_py39.py | 104 ++++++++++++ tests/data/py_38/pep_572_remove_parens.py | 40 +++++ tests/data/simple_cases/docstring.py | 11 ++ tests/test_black.py | 47 +++++ tests/test_format.py | 31 ++++ 25 files changed, 978 insertions(+), 97 deletions(-) create mode 100644 tests/data/conditional_expression.py create mode 100644 tests/data/miscellaneous/linelength6.py create mode 100644 tests/data/preview/format_unicode_escape_seq.py create mode 100644 tests/data/preview_context_managers/auto_detect/features_3_10.py create mode 100644 tests/data/preview_context_managers/auto_detect/features_3_11.py create mode 100644 tests/data/preview_context_managers/auto_detect/features_3_8.py create mode 100644 tests/data/preview_context_managers/auto_detect/features_3_9.py create mode 100644 tests/data/preview_context_managers/targeting_py38.py create mode 100644 tests/data/preview_context_managers/targeting_py39.py diff --git a/README.md b/README.md index c8e7de99219..016aa1000d7 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,11 @@ patches as possible in the future. * Module docstrings are formatted same as other docstrings (see [psf/black#3493](https://github.com/psf/black/issues/3493)). +* Temporarily disabled the following _Black_ future style changes: + + * https://github.com/psf/black/pull/2916 + * https://github.com/psf/black/pull/2278 + ## Historical differences These are differences that existed in the past. We have upstreamed them to diff --git a/patches/pyink.patch b/patches/pyink.patch index d743cfa1174..d595eac4401 100644 --- a/patches/pyink.patch +++ b/patches/pyink.patch @@ -80,7 +80,7 @@ quiet: bool, verbose: bool, required_version: Optional[str], -@@ -536,6 +579,7 @@ def main( # noqa: C901 +@@ -540,6 +583,7 @@ def main( # noqa: C901 else: # We'll autodetect later. versions = set() @@ -88,7 +88,7 @@ mode = Mode( target_versions=versions, line_length=line_length, -@@ -547,8 +591,36 @@ def main( # noqa: C901 +@@ -551,8 +595,36 @@ def main( # noqa: C901 experimental_string_processing=experimental_string_processing, preview=preview, python_cell_magics=set(python_cell_magics), @@ -125,7 +125,7 @@ if code is not None: # Run in quiet mode by default with -c; the extra output isn't useful. # You can still pass -v to get verbose output. -@@ -592,6 +664,7 @@ def main( # noqa: C901 +@@ -596,6 +668,7 @@ def main( # noqa: C901 write_back=write_back, mode=mode, report=report, @@ -133,7 +133,7 @@ ) else: from pyink.concurrency import reformat_many -@@ -735,7 +808,13 @@ def reformat_code( +@@ -740,7 +813,13 @@ def reformat_code( # not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 @mypyc_attr(patchable=True) def reformat_one( @@ -148,7 +148,7 @@ ) -> None: """Reformat a single file under `src` without spawning child processes. -@@ -760,7 +839,9 @@ def reformat_one( +@@ -765,7 +844,9 @@ def reformat_one( mode = replace(mode, is_pyi=True) elif src.suffix == ".ipynb": mode = replace(mode, is_ipynb=True) @@ -159,7 +159,7 @@ changed = Changed.YES else: cache: Cache = {} -@@ -771,7 +852,7 @@ def reformat_one( +@@ -776,7 +857,7 @@ def reformat_one( if res_src_s in cache and cache[res_src_s] == get_cache_info(res_src): changed = Changed.CACHED if changed is not Changed.CACHED and format_file_in_place( @@ -168,7 +168,7 @@ ): changed = Changed.YES if (write_back is WriteBack.YES and changed is not Changed.CACHED) or ( -@@ -791,6 +872,8 @@ def format_file_in_place( +@@ -796,6 +877,8 @@ def format_file_in_place( mode: Mode, write_back: WriteBack = WriteBack.NO, lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy @@ -177,7 +177,7 @@ ) -> bool: """Format file under `src` path. Return True if changed. -@@ -810,7 +893,9 @@ def format_file_in_place( +@@ -815,7 +898,9 @@ def format_file_in_place( header = buf.readline() src_contents, encoding, newline = decode_bytes(buf.read()) try: @@ -188,7 +188,7 @@ except NothingChanged: return False except JSONDecodeError: -@@ -855,6 +940,7 @@ def format_stdin_to_stdout( +@@ -860,6 +945,7 @@ def format_stdin_to_stdout( content: Optional[str] = None, write_back: WriteBack = WriteBack.NO, mode: Mode, @@ -196,7 +196,7 @@ ) -> bool: """Format file on stdin. Return True if changed. -@@ -901,7 +987,11 @@ def format_stdin_to_stdout( +@@ -906,7 +992,11 @@ def format_stdin_to_stdout( def check_stability_and_equivalence( @@ -209,7 +209,7 @@ ) -> None: """Perform stability and equivalence checks. -@@ -910,10 +1000,16 @@ def check_stability_and_equivalence( +@@ -915,10 +1005,16 @@ def check_stability_and_equivalence( content differently. """ assert_equivalent(src_contents, dst_contents) @@ -228,7 +228,7 @@ """Reformat contents of a file and return new contents. If `fast` is False, additionally confirm that the reformatted code is -@@ -926,13 +1022,15 @@ def format_file_contents(src_contents: s +@@ -931,13 +1027,15 @@ def format_file_contents(src_contents: s if mode.is_ipynb: dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode) else: @@ -246,7 +246,7 @@ return dst_contents -@@ -1043,7 +1141,12 @@ def format_ipynb_string(src_contents: st +@@ -1048,7 +1146,12 @@ def format_ipynb_string(src_contents: st raise NothingChanged @@ -260,7 +260,7 @@ """Reformat a string and return new contents. `mode` determines formatting options, such as how many characters per line are -@@ -1072,17 +1175,28 @@ def format_str(src_contents: str, *, mod +@@ -1077,17 +1180,28 @@ def format_str(src_contents: str, *, mod ) -> None: hey @@ -292,22 +292,27 @@ src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) dst_blocks: List[LinesBlock] = [] if mode.target_versions: -@@ -1091,7 +1205,14 @@ def _format_str_once(src_contents: str, +@@ -1096,12 +1210,19 @@ def _format_str_once(src_contents: str, future_imports = get_future_imports(src_node) versions = detect_target_versions(src_node, future_imports=future_imports) + if mode.string_normalization and mode.quote_style == QuoteStyle.MAJORITY: + mode = replace(mode, majority_quote=ink.majority_quote(src_node)) + context_manager_features = { + feature + for feature in {Feature.PARENTHESIZED_CONTEXT_MANAGERS} + if supports_feature(versions, feature) + } normalize_fmt_off(src_node, preview=mode.preview) + + if lines: + # This should be called after normalize_fmt_off. + ink.convert_unchanged_lines(src_node, lines) + - lines = LineGenerator(mode=mode) + lines = LineGenerator(mode=mode, features=context_manager_features) elt = EmptyLineTracker(mode=mode) split_line_features = { -@@ -1344,12 +1465,20 @@ def assert_equivalent(src: str, dst: str +@@ -1375,12 +1496,20 @@ def assert_equivalent(src: str, dst: str ) from None @@ -349,7 +354,7 @@ Line, append_leaves, can_be_split, -@@ -78,6 +79,9 @@ LeafID = int +@@ -80,6 +81,9 @@ LeafID = int LN = Union[Leaf, Node] @@ -359,7 +364,7 @@ class CannotSplit(CannotTransform): """A readable split that fits the allotted line length is impossible.""" -@@ -96,7 +100,9 @@ class LineGenerator(Visitor[Line]): +@@ -99,7 +103,9 @@ class LineGenerator(Visitor[Line]): self.current_line: Line self.__post_init__() @@ -370,7 +375,7 @@ """Generate a line. If the line is empty, only emit if it makes sense. -@@ -105,11 +111,20 @@ class LineGenerator(Visitor[Line]): +@@ -108,11 +114,20 @@ class LineGenerator(Visitor[Line]): If any lines were generated, set up a new current_line. """ if not self.current_line: @@ -393,7 +398,7 @@ yield complete_line def visit_default(self, node: LN) -> Iterator[Line]: -@@ -135,7 +150,9 @@ class LineGenerator(Visitor[Line]): +@@ -138,7 +153,9 @@ class LineGenerator(Visitor[Line]): normalize_prefix(node, inside_brackets=any_open_brackets) if self.mode.string_normalization and node.type == token.STRING: node.value = normalize_string_prefix(node.value) @@ -404,7 +409,19 @@ if node.type == token.NUMBER: normalize_numeric_literal(node) if node.type not in WHITESPACE: -@@ -145,7 +162,7 @@ class LineGenerator(Visitor[Line]): +@@ -148,7 +165,10 @@ class LineGenerator(Visitor[Line]): + def visit_test(self, node: Node) -> Iterator[Line]: + """Visit an `x if y else z` test""" + +- if Preview.parenthesize_conditional_expressions in self.mode: ++ if ( ++ Preview.parenthesize_conditional_expressions in self.mode ++ and not self.mode.is_pyink ++ ): + already_parenthesized = ( + node.prev_sibling and node.prev_sibling.type == token.LPAR + ) +@@ -164,7 +184,7 @@ class LineGenerator(Visitor[Line]): def visit_INDENT(self, node: Leaf) -> Iterator[Line]: """Increase indentation level, maybe yield a line.""" # In blib2to3 INDENT never holds comments. @@ -413,7 +430,7 @@ yield from self.visit_default(node) def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: -@@ -160,7 +177,7 @@ class LineGenerator(Visitor[Line]): +@@ -179,7 +199,7 @@ class LineGenerator(Visitor[Line]): yield from self.visit_default(node) # Finally, emit the dedent. @@ -422,7 +439,7 @@ def visit_stmt( self, node: Node, keywords: Set[str], parens: Set[str] -@@ -255,9 +272,9 @@ class LineGenerator(Visitor[Line]): +@@ -278,9 +298,9 @@ class LineGenerator(Visitor[Line]): if self.mode.is_pyi and is_stub_body(node): yield from self.visit_default(node) else: @@ -434,16 +451,23 @@ else: if ( -@@ -348,7 +365,7 @@ class LineGenerator(Visitor[Line]): +@@ -371,10 +391,13 @@ class LineGenerator(Visitor[Line]): yield from self.visit_default(node) def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: +- if Preview.hex_codes_in_unicode_sequences in self.mode: ++ if ( ++ Preview.hex_codes_in_unicode_sequences in self.mode ++ and not self.mode.is_pyink ++ ): + normalize_unicode_escape_sequences(leaf) + - if is_docstring(leaf) and "\\\n" not in leaf.value: + if is_docstring(leaf, self.mode.is_pyink) and "\\\n" not in leaf.value: # We're ignoring docstrings with backslash newline escapes because changing # indentation of those changes the AST representation of the code. if Preview.normalize_docstring_quotes_and_prefixes_properly in self.mode: -@@ -363,7 +380,9 @@ class LineGenerator(Visitor[Line]): +@@ -389,7 +412,9 @@ class LineGenerator(Visitor[Line]): # formatting as visit_default() is called *after*. To avoid a # situation where this function formats a docstring differently on # the second pass, normalize it early. @@ -454,7 +478,7 @@ else: docstring = leaf.value else: -@@ -380,7 +399,7 @@ class LineGenerator(Visitor[Line]): +@@ -406,7 +431,7 @@ class LineGenerator(Visitor[Line]): quote_len = 1 if docstring[1] != quote_char else 3 docstring = docstring[quote_len:-quote_len] docstring_started_empty = not docstring @@ -463,7 +487,7 @@ if is_multiline_string(leaf): docstring = fix_docstring(docstring, indent) -@@ -463,7 +482,8 @@ class LineGenerator(Visitor[Line]): +@@ -493,7 +518,8 @@ class LineGenerator(Visitor[Line]): self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) @@ -473,7 +497,7 @@ self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) self.visit_async_funcdef = self.visit_async_stmt self.visit_decorated = self.visit_decorators -@@ -490,10 +510,11 @@ def transform_line( +@@ -520,10 +546,11 @@ def transform_line( ll = mode.line_length sn = mode.string_normalization @@ -489,7 +513,7 @@ transformers: List[Transformer] if ( -@@ -681,8 +702,7 @@ def _first_right_hand_split( +@@ -711,8 +738,7 @@ def _first_right_hand_split( omit: Collection[LeafID] = (), ) -> _RHSResult: """Split the line into head, body, tail starting with the last bracket pair. @@ -499,7 +523,7 @@ _maybe_split_omitting_optional_parens to get an opinion whether to prefer splitting on the right side of an assignment statement. """ -@@ -711,12 +731,51 @@ def _first_right_hand_split( +@@ -741,12 +767,51 @@ def _first_right_hand_split( tail_leaves.reverse() body_leaves.reverse() head_leaves.reverse() @@ -554,7 +578,7 @@ tail = bracket_split_build_line( tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail ) -@@ -874,7 +933,7 @@ def bracket_split_build_line( +@@ -904,7 +969,7 @@ def bracket_split_build_line( result = Line(mode=original.mode, depth=original.depth) if component is _BracketSplitComponent.body: result.inside_brackets = True @@ -563,7 +587,7 @@ if leaves: # Since body is a new indent level, remove spurious leading whitespace. normalize_prefix(leaves[0], inside_brackets=True) -@@ -1350,7 +1409,7 @@ def generate_trailers_to_omit(line: Line +@@ -1440,7 +1505,7 @@ def generate_trailers_to_omit(line: Line if not line.magic_trailing_comma: yield omit @@ -798,7 +822,7 @@ from warnings import warn if sys.version_info < (3, 8): -@@ -169,11 +169,26 @@ class Deprecated(UserWarning): +@@ -176,11 +176,26 @@ class Deprecated(UserWarning): """Visible deprecation warning.""" @@ -825,7 +849,7 @@ is_pyi: bool = False is_ipynb: bool = False skip_source_first_line: bool = False -@@ -181,6 +196,8 @@ class Mode: +@@ -188,6 +203,8 @@ class Mode: experimental_string_processing: bool = False python_cell_magics: Set[str] = field(default_factory=set) preview: bool = False @@ -834,7 +858,7 @@ def __post_init__(self) -> None: if self.experimental_string_processing: -@@ -215,12 +232,25 @@ class Mode: +@@ -222,12 +239,25 @@ class Mode: version_str, str(self.line_length), str(int(self.string_normalization)), @@ -1013,7 +1037,7 @@ markers = [ --- a/strings.py +++ b/strings.py -@@ -12,6 +12,8 @@ if sys.version_info < (3, 8): +@@ -14,6 +14,8 @@ if sys.version_info < (3, 8): else: from typing import Final @@ -1022,7 +1046,7 @@ STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. STRING_PREFIX_RE: Final = re.compile( -@@ -164,8 +166,10 @@ def _cached_compile(pattern: str) -> Pat +@@ -175,8 +177,10 @@ def _cached_compile(pattern: str) -> Pat return re.compile(pattern) @@ -1035,7 +1059,7 @@ Adds or removes backslashes as appropriate. Doesn't parse and fix strings nested in f-strings. -@@ -232,7 +236,7 @@ def normalize_string_quotes(s: str) -> s +@@ -243,8 +247,8 @@ def normalize_string_quotes(s: str) -> s if new_escape_count > orig_escape_count: return s # Do not introduce more escaping @@ -1045,6 +1069,7 @@ + return s # Prefer `preferred_quote`. return f"{prefix}{new_quote}{new_body}{new_quote}" + --- a/tests/data/simple_cases/fmtonoff.py +++ b/tests/data/simple_cases/fmtonoff.py @@ -195,7 +195,6 @@ import sys @@ -1065,7 +1090,7 @@ +pyink = false --- a/tests/test_black.py +++ b/tests/test_black.py -@@ -1240,7 +1240,7 @@ class BlackTestCase(BlackBaseTestCase): +@@ -1287,7 +1287,7 @@ class BlackTestCase(BlackBaseTestCase): report=report, ) fsts.assert_called_once_with( @@ -1074,7 +1099,7 @@ ) # __PYINK_STDIN_FILENAME__ should have been stripped report.done.assert_called_with(expected, pyink.Changed.YES) -@@ -1266,6 +1266,7 @@ class BlackTestCase(BlackBaseTestCase): +@@ -1313,6 +1313,7 @@ class BlackTestCase(BlackBaseTestCase): fast=True, write_back=pyink.WriteBack.YES, mode=replace(DEFAULT_MODE, is_pyi=True), @@ -1082,7 +1107,7 @@ ) # __PYINK_STDIN_FILENAME__ should have been stripped report.done.assert_called_with(expected, pyink.Changed.YES) -@@ -1291,6 +1292,7 @@ class BlackTestCase(BlackBaseTestCase): +@@ -1338,6 +1339,7 @@ class BlackTestCase(BlackBaseTestCase): fast=True, write_back=pyink.WriteBack.YES, mode=replace(DEFAULT_MODE, is_ipynb=True), @@ -1090,7 +1115,7 @@ ) # __PYINK_STDIN_FILENAME__ should have been stripped report.done.assert_called_with(expected, pyink.Changed.YES) -@@ -2345,6 +2347,113 @@ class TestFileCollection: +@@ -2392,6 +2394,113 @@ class TestFileCollection: stdin_filename=stdin_filename, ) @@ -1206,7 +1231,7 @@ with open(pyink.__file__, "r", encoding="utf-8") as _bf: --- a/tests/test_format.py +++ b/tests/test_format.py -@@ -58,6 +58,15 @@ def test_preview_minimum_python_310_form +@@ -59,6 +59,15 @@ def test_preview_minimum_python_310_form assert_format(source, expected, mode, minimum_version=(3, 10)) @@ -1219,9 +1244,9 @@ + ) + + - # =============== # - # Complex cases - # ============= # + def test_preview_context_managers_targeting_py38() -> None: + source, expected = read_data("preview_context_managers", "targeting_py38.py") + mode = pyink.Mode(preview=True, target_versions={pyink.TargetVersion.PY38}) --- a/tox.ini +++ b/tox.ini @@ -97,4 +97,4 @@ setenv = PYTHONPATH = {toxinidir}/src @@ -1257,7 +1282,7 @@ @abstractmethod def do_match(self, line: Line) -> TMatchResult: -@@ -639,7 +642,9 @@ class StringMerger(StringTransformer, Cu +@@ -645,7 +648,9 @@ class StringMerger(StringTransformer, Cu S_leaf = Leaf(token.STRING, S) if self.normalize_strings: @@ -1268,7 +1293,7 @@ # Fill the 'custom_splits' list with the appropriate CustomSplit objects. temp_string = S_leaf.value[len(prefix) + 1 : -1] -@@ -1078,7 +1083,7 @@ class BaseStringSplitter(StringTransform +@@ -1084,7 +1089,7 @@ class BaseStringSplitter(StringTransform # NN: The leaf that is after N. # WMA4 the whitespace at the beginning of the line. @@ -1277,7 +1302,7 @@ if is_valid_index(string_idx - 1): p_idx = string_idx - 1 -@@ -1392,7 +1397,7 @@ class StringSplitter(BaseStringSplitter, +@@ -1422,7 +1427,7 @@ class StringSplitter(BaseStringSplitter, line we will construct. """ result = self.line_length @@ -1286,7 +1311,7 @@ result -= 1 if ends_with_comma else 0 result -= string_op_leaves_length return result -@@ -1403,11 +1408,11 @@ class StringSplitter(BaseStringSplitter, +@@ -1433,11 +1438,11 @@ class StringSplitter(BaseStringSplitter, # The last index of a string of length N is N-1. max_break_idx -= 1 # Leading whitespace is not present in the string value (e.g. Leaf.value). @@ -1300,7 +1325,7 @@ ) return -@@ -1700,7 +1705,9 @@ class StringSplitter(BaseStringSplitter, +@@ -1730,7 +1735,9 @@ class StringSplitter(BaseStringSplitter, def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: if self.normalize_strings: @@ -1311,7 +1336,7 @@ def _normalize_f_string(self, string: str, prefix: str) -> str: """ -@@ -1820,7 +1827,8 @@ class StringParenWrapper(BaseStringSplit +@@ -1850,7 +1857,8 @@ class StringParenWrapper(BaseStringSplit # If the string has no spaces... if " " not in string_value: # And will still violate the line length limit when split... @@ -1321,7 +1346,7 @@ if len(string_value) > max_string_length: # And has no associated custom splits... if not self.has_custom_splits(string_value): -@@ -2066,7 +2074,7 @@ class StringParenWrapper(BaseStringSplit +@@ -2096,7 +2104,7 @@ class StringParenWrapper(BaseStringSplit string_value = LL[string_idx].value string_line = Line( mode=line.mode, diff --git a/src/pyink/__init__.py b/src/pyink/__init__.py index 3d0d2716d37..323c491b863 100644 --- a/src/pyink/__init__.py +++ b/src/pyink/__init__.py @@ -248,7 +248,7 @@ def validate_regex( multiple=True, help=( "When processing Jupyter Notebooks, add the given magic to the list" - f" of known python-magics ({', '.join(PYTHON_CELL_MAGICS)})." + f" of known python-magics ({', '.join(sorted(PYTHON_CELL_MAGICS))})." " Useful for formatting cells with custom python magics." ), default=[], @@ -521,16 +521,20 @@ def main( # noqa: C901 ) normalized = [ - (source, source) - if source == "-" - else (normalize_path_maybe_ignore(Path(source), root), source) + ( + (source, source) + if source == "-" + else (normalize_path_maybe_ignore(Path(source), root), source) + ) for source in src ] srcs_string = ", ".join( [ - f'"{_norm}"' - if _norm - else f'\033[31m"{source} (skipping - invalid)"\033[34m' + ( + f'"{_norm}"' + if _norm + else f'\033[31m"{source} (skipping - invalid)"\033[34m' + ) for _norm, source in normalized ] ) @@ -742,10 +746,11 @@ def get_sources( sources.add(p) elif p.is_dir(): + p = root / normalize_path_maybe_ignore(p, ctx.obj["root"], report) if using_default_exclude: gitignore = { root: root_gitignore, - root / p: get_gitignore(p), + p: get_gitignore(p), } sources.update( gen_python_files( @@ -1207,13 +1212,18 @@ def _format_str_once( if mode.string_normalization and mode.quote_style == QuoteStyle.MAJORITY: mode = replace(mode, majority_quote=ink.majority_quote(src_node)) + context_manager_features = { + feature + for feature in {Feature.PARENTHESIZED_CONTEXT_MANAGERS} + if supports_feature(versions, feature) + } normalize_fmt_off(src_node, preview=mode.preview) if lines: # This should be called after normalize_fmt_off. ink.convert_unchanged_lines(src_node, lines) - lines = LineGenerator(mode=mode) + lines = LineGenerator(mode=mode, features=context_manager_features) elt = EmptyLineTracker(mode=mode) split_line_features = { feature @@ -1275,6 +1285,10 @@ def get_features_used( # noqa: C901 - relaxed decorator syntax; - usage of __future__ flags (annotations); - print / exec statements; + - parenthesized context managers; + - match statements; + - except* clause; + - variadic generics; """ features: Set[Feature] = set() if future_imports: @@ -1350,6 +1364,23 @@ def get_features_used( # noqa: C901 ): features.add(Feature.ANN_ASSIGN_EXTENDED_RHS) + elif ( + n.type == syms.with_stmt + and len(n.children) > 2 + and n.children[1].type == syms.atom + ): + atom_children = n.children[1].children + if ( + len(atom_children) == 3 + and atom_children[0].type == token.LPAR + and atom_children[1].type == syms.testlist_gexp + and atom_children[2].type == token.RPAR + ): + features.add(Feature.PARENTHESIZED_CONTEXT_MANAGERS) + + elif n.type == syms.match_stmt: + features.add(Feature.PATTERN_MATCHING) + elif ( n.type == syms.except_clause and len(n.children) >= 2 diff --git a/src/pyink/linegen.py b/src/pyink/linegen.py index d63427a3ce4..6cae568c6ba 100644 --- a/src/pyink/linegen.py +++ b/src/pyink/linegen.py @@ -47,6 +47,7 @@ is_rpar_token, is_stub_body, is_stub_suite, + is_tuple_containing_walrus, is_vararg, is_walrus_assignment, is_yield, @@ -59,6 +60,7 @@ get_string_prefix, normalize_string_prefix, normalize_string_quotes, + normalize_unicode_escape_sequences, ) from pyink.trans import ( CannotTransform, @@ -93,8 +95,9 @@ class LineGenerator(Visitor[Line]): in ways that will no longer stringify to valid Python code on the tree. """ - def __init__(self, mode: Mode) -> None: + def __init__(self, mode: Mode, features: Collection[Feature]) -> None: self.mode = mode + self.features = features self.current_line: Line self.__post_init__() @@ -157,6 +160,25 @@ def visit_default(self, node: LN) -> Iterator[Line]: self.current_line.append(node) yield from super().visit_default(node) + def visit_test(self, node: Node) -> Iterator[Line]: + """Visit an `x if y else z` test""" + + if ( + Preview.parenthesize_conditional_expressions in self.mode + and not self.mode.is_pyink + ): + already_parenthesized = ( + node.prev_sibling and node.prev_sibling.type == token.LPAR + ) + + if not already_parenthesized: + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + node.insert_child(0, lpar) + node.append_child(rpar) + + yield from self.visit_default(node) + def visit_INDENT(self, node: Leaf) -> Iterator[Line]: """Increase indentation level, maybe yield a line.""" # In blib2to3 INDENT never holds comments. @@ -191,7 +213,9 @@ def visit_stmt( `parens` holds a set of string leaf values immediately after which invisible parens should be put. """ - normalize_invisible_parens(node, parens_after=parens, preview=self.mode.preview) + normalize_invisible_parens( + node, parens_after=parens, mode=self.mode, features=self.features + ) for child in node.children: if is_name_token(child) and child.value in keywords: yield from self.line() @@ -244,7 +268,9 @@ def visit_funcdef(self, node: Node) -> Iterator[Line]: def visit_match_case(self, node: Node) -> Iterator[Line]: """Visit either a match or case statement.""" - normalize_invisible_parens(node, parens_after=set(), preview=self.mode.preview) + normalize_invisible_parens( + node, parens_after=set(), mode=self.mode, features=self.features + ) yield from self.line() for child in node.children: @@ -363,6 +389,12 @@ def visit_factor(self, node: Node) -> Iterator[Line]: yield from self.visit_default(node) def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: + if ( + Preview.hex_codes_in_unicode_sequences in self.mode + and not self.mode.is_pyink + ): + normalize_unicode_escape_sequences(leaf) + if is_docstring(leaf, self.mode.is_pyink) and "\\\n" not in leaf.value: # We're ignoring docstrings with backslash newline escapes because changing # indentation of those changes the AST representation of the code. @@ -404,6 +436,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: else: docstring = docstring.strip() + has_trailing_backslash = False if docstring: # Add some padding if the docstring starts / ends with a quote mark. if docstring[0] == quote_char: @@ -416,6 +449,7 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: # Odd number of tailing backslashes, add some padding to # avoid escaping the closing string quote. docstring += " " + has_trailing_backslash = True elif not docstring_started_empty: docstring = " " @@ -438,6 +472,8 @@ def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: if ( len(lines) > 1 and last_line_length + quote_len > self.mode.line_length + and len(indent) + quote_len <= self.mode.line_length + and not has_trailing_backslash ): leaf.value = prefix + quote + docstring + "\n" + indent + quote else: @@ -1128,7 +1164,7 @@ def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: def normalize_invisible_parens( - node: Node, parens_after: Set[str], *, preview: bool + node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature] ) -> None: """Make existing optional parentheses invisible or create new ones. @@ -1138,17 +1174,24 @@ def normalize_invisible_parens( Standardizes on visible parentheses for single-element tuples, and keeps existing visible parentheses for other tuples and generator expressions. """ - for pc in list_comments(node.prefix, is_endmarker=False, preview=preview): + for pc in list_comments(node.prefix, is_endmarker=False, preview=mode.preview): if pc.value in FMT_OFF: # This `node` has a prefix with `# fmt: off`, don't mess with parens. return + + # The multiple context managers grammar has a different pattern, thus this is + # separate from the for-loop below. This possibly wraps them in invisible parens, + # and later will be removed in remove_with_parens when needed. + if node.type == syms.with_stmt: + _maybe_wrap_cms_in_parens(node, mode, features) + check_lpar = False for index, child in enumerate(list(node.children)): # Fixes a bug where invisible parens are not properly stripped from # assignment statements that contain type annotations. if isinstance(child, Node) and child.type == syms.annassign: normalize_invisible_parens( - child, parens_after=parens_after, preview=preview + child, parens_after=parens_after, mode=mode, features=features ) # Add parentheses around long tuple unpacking in assignments. @@ -1161,7 +1204,7 @@ def normalize_invisible_parens( if check_lpar: if ( - preview + mode.preview and child.type == syms.atom and node.type == syms.for_stmt and isinstance(child.prev_sibling, Leaf) @@ -1174,7 +1217,9 @@ def normalize_invisible_parens( remove_brackets_around_comma=True, ): wrap_in_parentheses(node, child, visible=False) - elif preview and isinstance(child, Node) and node.type == syms.with_stmt: + elif ( + mode.preview and isinstance(child, Node) and node.type == syms.with_stmt + ): remove_with_parens(child, node) elif child.type == syms.atom: if maybe_make_parens_invisible_in_atom( @@ -1185,17 +1230,7 @@ def normalize_invisible_parens( elif is_one_tuple(child): wrap_in_parentheses(node, child, visible=True) elif node.type == syms.import_from: - # "import from" nodes store parentheses directly as part of - # the statement - if is_lpar_token(child): - assert is_rpar_token(node.children[-1]) - # make parentheses invisible - child.value = "" - node.children[-1].value = "" - elif child.type != token.STAR: - # insert invisible parentheses - node.insert_child(index, Leaf(token.LPAR, "")) - node.append_child(Leaf(token.RPAR, "")) + _normalize_import_from(node, child, index) break elif ( index == 1 @@ -1210,13 +1245,27 @@ def normalize_invisible_parens( elif not (isinstance(child, Leaf) and is_multiline_string(child)): wrap_in_parentheses(node, child, visible=False) - comma_check = child.type == token.COMMA if preview else False + comma_check = child.type == token.COMMA if mode.preview else False check_lpar = isinstance(child, Leaf) and ( child.value in parens_after or comma_check ) +def _normalize_import_from(parent: Node, child: LN, index: int) -> None: + # "import from" nodes store parentheses directly as part of + # the statement + if is_lpar_token(child): + assert is_rpar_token(parent.children[-1]) + # make parentheses invisible + child.value = "" + parent.children[-1].value = "" + elif child.type != token.STAR: + # insert invisible parentheses + parent.insert_child(index, Leaf(token.LPAR, "")) + parent.append_child(Leaf(token.RPAR, "")) + + def remove_await_parens(node: Node) -> None: if node.children[0].type == token.AWAIT and len(node.children) > 1: if ( @@ -1239,18 +1288,62 @@ def remove_await_parens(node: Node) -> None: # N.B. We've still removed any redundant nested brackets though :) opening_bracket = cast(Leaf, node.children[1].children[0]) closing_bracket = cast(Leaf, node.children[1].children[-1]) - bracket_contents = cast(Node, node.children[1].children[1]) - if bracket_contents.type != syms.power: - ensure_visible(opening_bracket) - ensure_visible(closing_bracket) - elif ( - bracket_contents.type == syms.power - and bracket_contents.children[0].type == token.AWAIT - ): - ensure_visible(opening_bracket) - ensure_visible(closing_bracket) - # If we are in a nested await then recurse down. - remove_await_parens(bracket_contents) + bracket_contents = node.children[1].children[1] + if isinstance(bracket_contents, Node): + if bracket_contents.type != syms.power: + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + elif ( + bracket_contents.type == syms.power + and bracket_contents.children[0].type == token.AWAIT + ): + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + # If we are in a nested await then recurse down. + remove_await_parens(bracket_contents) + + +def _maybe_wrap_cms_in_parens( + node: Node, mode: Mode, features: Collection[Feature] +) -> None: + """When enabled and safe, wrap the multiple context managers in invisible parens. + + It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS. + """ + if ( + Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features + or Preview.wrap_multiple_context_managers_in_parens not in mode + or len(node.children) <= 2 + # If it's an atom, it's already wrapped in parens. + or node.children[1].type == syms.atom + ): + return + colon_index: Optional[int] = None + for i in range(2, len(node.children)): + if node.children[i].type == token.COLON: + colon_index = i + break + if colon_index is not None: + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + context_managers = node.children[1:colon_index] + for child in context_managers: + child.remove() + # After wrapping, the with_stmt will look like this: + # with_stmt + # NAME 'with' + # atom + # LPAR '' + # testlist_gexp + # ... <-- context_managers + # /testlist_gexp + # RPAR '' + # /atom + # COLON ':' + new_child = Node( + syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar] + ) + node.insert_child(1, new_child) def remove_with_parens(node: Node, parent: Node) -> None: @@ -1318,6 +1411,7 @@ def maybe_make_parens_invisible_in_atom( not remove_brackets_around_comma and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY ) + or is_tuple_containing_walrus(node) ): return False @@ -1329,9 +1423,11 @@ def maybe_make_parens_invisible_in_atom( syms.return_stmt, syms.except_clause, syms.funcdef, + syms.with_stmt, # these ones aren't useful to end users, but they do please fuzzers syms.for_stmt, syms.del_stmt, + syms.for_stmt, ]: return False diff --git a/src/pyink/mode.py b/src/pyink/mode.py index 0249dd840fb..ed7c49294ff 100644 --- a/src/pyink/mode.py +++ b/src/pyink/mode.py @@ -50,6 +50,7 @@ class Feature(Enum): EXCEPT_STAR = 14 VARIADIC_GENERICS = 15 DEBUG_F_STRINGS = 16 + PARENTHESIZED_CONTEXT_MANAGERS = 17 FORCE_OPTIONAL_PARENTHESES = 50 # __future__ flags @@ -106,6 +107,7 @@ class Feature(Enum): Feature.POS_ONLY_ARGUMENTS, Feature.UNPACKING_ON_FLOW, Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, }, TargetVersion.PY310: { Feature.F_STRINGS, @@ -120,6 +122,7 @@ class Feature(Enum): Feature.POS_ONLY_ARGUMENTS, Feature.UNPACKING_ON_FLOW, Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, Feature.PATTERN_MATCHING, }, TargetVersion.PY311: { @@ -135,6 +138,7 @@ class Feature(Enum): Feature.POS_ONLY_ARGUMENTS, Feature.UNPACKING_ON_FLOW, Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, Feature.PATTERN_MATCHING, Feature.EXCEPT_STAR, Feature.VARIADIC_GENERICS, @@ -149,6 +153,7 @@ def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> b class Preview(Enum): """Individual preview style features.""" + hex_codes_in_unicode_sequences = auto() annotation_parens = auto() empty_lines_before_class_or_def_with_leading_comments = auto() handle_trailing_commas_in_head = auto() @@ -161,8 +166,10 @@ class Preview(Enum): # NOTE: string_processing requires wrap_long_dict_values_in_parens # for https://github.com/psf/black/issues/3117 to be fixed. string_processing = auto() + parenthesize_conditional_expressions = auto() skip_magic_trailing_comma_in_subscript = auto() wrap_long_dict_values_in_parens = auto() + wrap_multiple_context_managers_in_parens = auto() class Deprecated(UserWarning): diff --git a/src/pyink/nodes.py b/src/pyink/nodes.py index 00333ae639f..b559218fdc9 100644 --- a/src/pyink/nodes.py +++ b/src/pyink/nodes.py @@ -571,6 +571,17 @@ def is_one_tuple(node: LN) -> bool: ) +def is_tuple_containing_walrus(node: LN) -> bool: + """Return True if `node` holds a tuple that contains a walrus operator.""" + if node.type != syms.atom: + return False + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return any(child.type == syms.namedexpr_test for child in gexp.children) + + def is_one_sequence_between( opening: Leaf, closing: Leaf, diff --git a/src/pyink/strings.py b/src/pyink/strings.py index ca95ba49420..4f43c8f4227 100644 --- a/src/pyink/strings.py +++ b/src/pyink/strings.py @@ -5,7 +5,9 @@ import re import sys from functools import lru_cache -from typing import List, Pattern +from typing import List, Match, Pattern + +from blib2to3.pytree import Leaf if sys.version_info < (3, 8): from typing_extensions import Final @@ -20,6 +22,15 @@ r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL ) FIRST_NON_WHITESPACE_RE: Final = re.compile(r"\s*\t+\s*(\S)") +UNICODE_ESCAPE_RE: Final = re.compile( + r"(?P\\+)(?P" + r"(u(?P[a-fA-F0-9]{4}))" # Character with 16-bit hex value xxxx + r"|(U(?P[a-fA-F0-9]{8}))" # Character with 32-bit hex value xxxxxxxx + r"|(x(?P[a-fA-F0-9]{2}))" # Character with hex value hh + r"|(N\{(?P[a-zA-Z0-9 \-]{2,})\})" # Character named name in the Unicode database + r")", + re.VERBOSE, +) def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: @@ -240,3 +251,34 @@ def normalize_string_quotes(s: str, *, preferred_quote: Quote) -> str: return s # Prefer `preferred_quote`. return f"{prefix}{new_quote}{new_body}{new_quote}" + + +def normalize_unicode_escape_sequences(leaf: Leaf) -> None: + """Replace hex codes in Unicode escape sequences with lowercase representation.""" + text = leaf.value + prefix = get_string_prefix(text) + if "r" in prefix.lower(): + return + + def replace(m: Match[str]) -> str: + groups = m.groupdict() + back_slashes = groups["backslashes"] + + if len(back_slashes) % 2 == 0: + return back_slashes + groups["body"] + + if groups["u"]: + # \u + return back_slashes + "u" + groups["u"].lower() + elif groups["U"]: + # \U + return back_slashes + "U" + groups["U"].lower() + elif groups["x"]: + # \x + return back_slashes + "x" + groups["x"].lower() + else: + assert groups["N"], f"Unexpected match: {m}" + # \N{} + return back_slashes + "N{" + groups["N"].upper() + "}" + + leaf.value = re.sub(UNICODE_ESCAPE_RE, replace, text) diff --git a/src/pyink/trans.py b/src/pyink/trans.py index 1432f15d7fd..de9bf7cae6e 100644 --- a/src/pyink/trans.py +++ b/src/pyink/trans.py @@ -575,6 +575,12 @@ def make_naked(string: str, string_prefix: str) -> str: characters have been escaped. """ assert_is_leaf_string(string) + if "f" in string_prefix: + string = _toggle_fexpr_quotes(string, QUOTE) + # After quotes toggling, quotes in expressions won't be escaped + # because quotes can't be reused in f-strings. So we can simply + # let the escaping logic below run without knowing f-string + # expressions. RE_EVEN_BACKSLASHES = r"(?:(? bool: return any(iter_fexpr_spans(s)) +def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str: + """ + Toggles quotes used in f-string expressions that are `old_quote`. + + f-string expressions can't contain backslashes, so we need to toggle the + quotes if the f-string itself will end up using the same quote. We can + simply toggle without escaping because, quotes can't be reused in f-string + expressions. They will fail to parse. + + NOTE: If PEP 701 is accepted, above statement will no longer be true. + Though if quotes can be reused, we can simply reuse them without updates or + escaping, once Black figures out how to parse the new grammar. + """ + new_quote = "'" if old_quote == '"' else '"' + parts = [] + previous_index = 0 + for start, end in iter_fexpr_spans(fstring): + parts.append(fstring[previous_index:start]) + parts.append(fstring[start:end].replace(old_quote, new_quote)) + previous_index = end + parts.append(fstring[previous_index:]) + return "".join(parts) + + class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): """ StringTransformer that splits "atom" strings (i.e. strings which exist on diff --git a/tests/data/conditional_expression.py b/tests/data/conditional_expression.py new file mode 100644 index 00000000000..620a12dc986 --- /dev/null +++ b/tests/data/conditional_expression.py @@ -0,0 +1,160 @@ +long_kwargs_single_line = my_function( + foo="test, this is a sample value", + bar=some_long_value_name_foo_bar_baz if some_boolean_variable else some_fallback_value_foo_bar_baz, + baz="hello, this is a another value", +) + +multiline_kwargs_indented = my_function( + foo="test, this is a sample value", + bar=some_long_value_name_foo_bar_baz + if some_boolean_variable + else some_fallback_value_foo_bar_baz, + baz="hello, this is a another value", +) + +imploding_kwargs = my_function( + foo="test, this is a sample value", + bar=a + if foo + else b, + baz="hello, this is a another value", +) + +imploding_line = ( + 1 + if 1 + 1 == 2 + else 0 +) + +exploding_line = "hello this is a slightly long string" if some_long_value_name_foo_bar_baz else "this one is a little shorter" + +positional_argument_test(some_long_value_name_foo_bar_baz if some_boolean_variable else some_fallback_value_foo_bar_baz) + +def weird_default_argument(x=some_long_value_name_foo_bar_baz + if SOME_CONSTANT + else some_fallback_value_foo_bar_baz): + pass + +nested = "hello this is a slightly long string" if (some_long_value_name_foo_bar_baz if + nesting_test_expressions else some_fallback_value_foo_bar_baz) \ + else "this one is a little shorter" + +generator_expression = ( + some_long_value_name_foo_bar_baz if some_boolean_variable else some_fallback_value_foo_bar_baz for some_boolean_variable in some_iterable +) + + +def limit_offset_sql(self, low_mark, high_mark): + """Return LIMIT/OFFSET SQL clause.""" + limit, offset = self._get_limit_offset_params(low_mark, high_mark) + return " ".join( + sql + for sql in ( + "LIMIT %d" % limit if limit else None, + ("OFFSET %d" % offset) if offset else None, + ) + if sql + ) + + +def something(): + clone._iterable_class = ( + NamedValuesListIterable + if named + else FlatValuesListIterable + if flat + else ValuesListIterable + ) + +# output + +long_kwargs_single_line = my_function( + foo="test, this is a sample value", + bar=( + some_long_value_name_foo_bar_baz + if some_boolean_variable + else some_fallback_value_foo_bar_baz + ), + baz="hello, this is a another value", +) + +multiline_kwargs_indented = my_function( + foo="test, this is a sample value", + bar=( + some_long_value_name_foo_bar_baz + if some_boolean_variable + else some_fallback_value_foo_bar_baz + ), + baz="hello, this is a another value", +) + +imploding_kwargs = my_function( + foo="test, this is a sample value", + bar=a if foo else b, + baz="hello, this is a another value", +) + +imploding_line = 1 if 1 + 1 == 2 else 0 + +exploding_line = ( + "hello this is a slightly long string" + if some_long_value_name_foo_bar_baz + else "this one is a little shorter" +) + +positional_argument_test( + some_long_value_name_foo_bar_baz + if some_boolean_variable + else some_fallback_value_foo_bar_baz +) + + +def weird_default_argument( + x=( + some_long_value_name_foo_bar_baz + if SOME_CONSTANT + else some_fallback_value_foo_bar_baz + ), +): + pass + + +nested = ( + "hello this is a slightly long string" + if ( + some_long_value_name_foo_bar_baz + if nesting_test_expressions + else some_fallback_value_foo_bar_baz + ) + else "this one is a little shorter" +) + +generator_expression = ( + ( + some_long_value_name_foo_bar_baz + if some_boolean_variable + else some_fallback_value_foo_bar_baz + ) + for some_boolean_variable in some_iterable +) + + +def limit_offset_sql(self, low_mark, high_mark): + """Return LIMIT/OFFSET SQL clause.""" + limit, offset = self._get_limit_offset_params(low_mark, high_mark) + return " ".join( + sql + for sql in ( + "LIMIT %d" % limit if limit else None, + ("OFFSET %d" % offset) if offset else None, + ) + if sql + ) + + +def something(): + clone._iterable_class = ( + NamedValuesListIterable + if named + else FlatValuesListIterable if flat else ValuesListIterable + ) diff --git a/tests/data/fast/pep_572_do_not_remove_parens.py b/tests/data/fast/pep_572_do_not_remove_parens.py index 20e80a69377..05619ddcc2b 100644 --- a/tests/data/fast/pep_572_do_not_remove_parens.py +++ b/tests/data/fast/pep_572_do_not_remove_parens.py @@ -19,3 +19,7 @@ @(please := stop) def sigh(): pass + + +for (x := 3, y := 4) in y: + pass diff --git a/tests/data/miscellaneous/linelength6.py b/tests/data/miscellaneous/linelength6.py new file mode 100644 index 00000000000..4fb342726f5 --- /dev/null +++ b/tests/data/miscellaneous/linelength6.py @@ -0,0 +1,5 @@ +# Regression test for #3427, which reproes only with line length <= 6 +def f(): + """ + x + """ diff --git a/tests/data/preview/format_unicode_escape_seq.py b/tests/data/preview/format_unicode_escape_seq.py new file mode 100644 index 00000000000..3440696c303 --- /dev/null +++ b/tests/data/preview/format_unicode_escape_seq.py @@ -0,0 +1,33 @@ +x = "\x1F" +x = "\\x1B" +x = "\\\x1B" +x = "\U0001F60E" +x = "\u0001F60E" +x = r"\u0001F60E" +x = "don't format me" +x = "\xA3" +x = "\u2717" +x = "\uFaCe" +x = "\N{ox}\N{OX}" +x = "\N{lAtIn smaLL letteR x}" +x = "\N{CYRILLIC small LETTER BYELORUSSIAN-UKRAINIAN I}" +x = b"\x1Fdon't byte" +x = rb"\x1Fdon't format" + +# output + +x = "\x1f" +x = "\\x1B" +x = "\\\x1b" +x = "\U0001f60e" +x = "\u0001F60E" +x = r"\u0001F60E" +x = "don't format me" +x = "\xa3" +x = "\u2717" +x = "\uface" +x = "\N{OX}\N{OX}" +x = "\N{LATIN SMALL LETTER X}" +x = "\N{CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I}" +x = b"\x1fdon't byte" +x = rb"\x1Fdon't format" diff --git a/tests/data/preview/long_strings__regression.py b/tests/data/preview/long_strings__regression.py index ef9007f4ce1..eead8c204a9 100644 --- a/tests/data/preview/long_strings__regression.py +++ b/tests/data/preview/long_strings__regression.py @@ -550,6 +550,16 @@ async def foo(self): ("item1", "item2", "item3"), } +# Regression test for https://github.com/psf/black/issues/3506. +s = ( + "With single quote: ' " + f" {my_dict['foo']}" + ' With double quote: " ' + f' {my_dict["bar"]}' +) + +s = f'Lorem Ipsum is simply dummy text of the printing and typesetting industry:\'{my_dict["foo"]}\'' + # output @@ -1235,3 +1245,11 @@ async def foo(self): # And there is a comment before the value ("item1", "item2", "item3"), } + +# Regression test for https://github.com/psf/black/issues/3506. +s = f"With single quote: ' {my_dict['foo']} With double quote: \" {my_dict['bar']}" + +s = ( + "Lorem Ipsum is simply dummy text of the printing and typesetting" + f" industry:'{my_dict['foo']}'" +) diff --git a/tests/data/preview/prefer_rhs_split.py b/tests/data/preview/prefer_rhs_split.py index 5b89113e618..2f3cf33db41 100644 --- a/tests/data/preview/prefer_rhs_split.py +++ b/tests/data/preview/prefer_rhs_split.py @@ -50,7 +50,7 @@ forth_item, fifth_item, last_item_very_loooooong, -) = everyting = some_loooooog_function_name( +) = everything = some_looooong_function_name( first_argument, second_argument, third_argument ) diff --git a/tests/data/preview/remove_await_parens.py b/tests/data/preview/remove_await_parens.py index 571210a2d80..8c7223d2f39 100644 --- a/tests/data/preview/remove_await_parens.py +++ b/tests/data/preview/remove_await_parens.py @@ -77,6 +77,9 @@ async def main(): async def main(): await (await (await (await (await (asyncio.sleep(1)))))) +async def main(): + await (yield) + # output import asyncio @@ -167,3 +170,7 @@ async def main(): async def main(): await (await (await (await (await asyncio.sleep(1))))) + + +async def main(): + await (yield) diff --git a/tests/data/preview_context_managers/auto_detect/features_3_10.py b/tests/data/preview_context_managers/auto_detect/features_3_10.py new file mode 100644 index 00000000000..1458df1cb41 --- /dev/null +++ b/tests/data/preview_context_managers/auto_detect/features_3_10.py @@ -0,0 +1,35 @@ +# This file uses pattern matching introduced in Python 3.10. + + +match http_code: + case 404: + print("Not found") + + +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + pass + + +# output + + +# This file uses pattern matching introduced in Python 3.10. + + +match http_code: + case 404: + print("Not found") + + +with ( + make_context_manager1() as cm1, + make_context_manager2() as cm2, + make_context_manager3() as cm3, + make_context_manager4() as cm4, +): + pass diff --git a/tests/data/preview_context_managers/auto_detect/features_3_11.py b/tests/data/preview_context_managers/auto_detect/features_3_11.py new file mode 100644 index 00000000000..f83c5330ab3 --- /dev/null +++ b/tests/data/preview_context_managers/auto_detect/features_3_11.py @@ -0,0 +1,37 @@ +# This file uses except* clause in Python 3.11. + + +try: + some_call() +except* Error as e: + pass + + +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + pass + + +# output + + +# This file uses except* clause in Python 3.11. + + +try: + some_call() +except* Error as e: + pass + + +with ( + make_context_manager1() as cm1, + make_context_manager2() as cm2, + make_context_manager3() as cm3, + make_context_manager4() as cm4, +): + pass diff --git a/tests/data/preview_context_managers/auto_detect/features_3_8.py b/tests/data/preview_context_managers/auto_detect/features_3_8.py new file mode 100644 index 00000000000..e05094e1421 --- /dev/null +++ b/tests/data/preview_context_managers/auto_detect/features_3_8.py @@ -0,0 +1,30 @@ +# This file doesn't use any Python 3.9+ only grammars. + + +# Make sure parens around a single context manager don't get autodetected as +# Python 3.9+. +with (a): + pass + + +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + pass + + +# output +# This file doesn't use any Python 3.9+ only grammars. + + +# Make sure parens around a single context manager don't get autodetected as +# Python 3.9+. +with a: + pass + + +with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4: + pass diff --git a/tests/data/preview_context_managers/auto_detect/features_3_9.py b/tests/data/preview_context_managers/auto_detect/features_3_9.py new file mode 100644 index 00000000000..0d28f993108 --- /dev/null +++ b/tests/data/preview_context_managers/auto_detect/features_3_9.py @@ -0,0 +1,34 @@ +# This file uses parenthesized context managers introduced in Python 3.9. + + +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + pass + + +with ( + new_new_new1() as cm1, + new_new_new2() +): + pass + + +# output +# This file uses parenthesized context managers introduced in Python 3.9. + + +with ( + make_context_manager1() as cm1, + make_context_manager2() as cm2, + make_context_manager3() as cm3, + make_context_manager4() as cm4, +): + pass + + +with new_new_new1() as cm1, new_new_new2(): + pass diff --git a/tests/data/preview_context_managers/targeting_py38.py b/tests/data/preview_context_managers/targeting_py38.py new file mode 100644 index 00000000000..6ec4684e441 --- /dev/null +++ b/tests/data/preview_context_managers/targeting_py38.py @@ -0,0 +1,38 @@ +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + pass + + +with \ + make_context_manager1() as cm1, \ + make_context_manager2(), \ + make_context_manager3() as cm3, \ + make_context_manager4() \ +: + pass + + +with \ + new_new_new1() as cm1, \ + new_new_new2() \ +: + pass + + +# output + + +with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4: + pass + + +with make_context_manager1() as cm1, make_context_manager2(), make_context_manager3() as cm3, make_context_manager4(): + pass + + +with new_new_new1() as cm1, new_new_new2(): + pass diff --git a/tests/data/preview_context_managers/targeting_py39.py b/tests/data/preview_context_managers/targeting_py39.py new file mode 100644 index 00000000000..5cb8763040a --- /dev/null +++ b/tests/data/preview_context_managers/targeting_py39.py @@ -0,0 +1,104 @@ +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + pass + + +# Leading comment +with \ + make_context_manager1() as cm1, \ + make_context_manager2(), \ + make_context_manager3() as cm3, \ + make_context_manager4() \ +: + pass + + +with \ + new_new_new1() as cm1, \ + new_new_new2() \ +: + pass + + +with ( + new_new_new1() as cm1, + new_new_new2() +): + pass + + +# Leading comment. +with ( + # First comment. + new_new_new1() as cm1, + # Second comment. + new_new_new2() + # Last comment. +): + pass + + +with \ + this_is_a_very_long_call(looong_arg1=looong_value1, looong_arg2=looong_value2) as cm1, \ + this_is_a_very_long_call(looong_arg1=looong_value1, looong_arg2=looong_value2, looong_arg3=looong_value3, looong_arg4=looong_value4) as cm2 \ +: + pass + + +# output + + +with ( + make_context_manager1() as cm1, + make_context_manager2() as cm2, + make_context_manager3() as cm3, + make_context_manager4() as cm4, +): + pass + + +# Leading comment +with ( + make_context_manager1() as cm1, + make_context_manager2(), + make_context_manager3() as cm3, + make_context_manager4(), +): + pass + + +with new_new_new1() as cm1, new_new_new2(): + pass + + +with new_new_new1() as cm1, new_new_new2(): + pass + + +# Leading comment. +with ( + # First comment. + new_new_new1() as cm1, + # Second comment. + new_new_new2() + # Last comment. +): + pass + + +with ( + this_is_a_very_long_call( + looong_arg1=looong_value1, looong_arg2=looong_value2 + ) as cm1, + this_is_a_very_long_call( + looong_arg1=looong_value1, + looong_arg2=looong_value2, + looong_arg3=looong_value3, + looong_arg4=looong_value4, + ) as cm2, +): + pass diff --git a/tests/data/py_38/pep_572_remove_parens.py b/tests/data/py_38/pep_572_remove_parens.py index 9718d95b499..4e95fb07f3a 100644 --- a/tests/data/py_38/pep_572_remove_parens.py +++ b/tests/data/py_38/pep_572_remove_parens.py @@ -49,6 +49,26 @@ def a(): def this_is_so_dumb() -> (please := no): pass +async def await_the_walrus(): + with (x := y): + pass + + with (x := y) as z, (a := b) as c: + pass + + with (x := await y): + pass + + with (x := await a, y := await b): + pass + + # Ideally we should remove one set of parentheses + with ((x := await a, y := await b)): + pass + + with (x := await a), (y := await b): + pass + # output if foo := 0: @@ -103,3 +123,23 @@ def a(): def this_is_so_dumb() -> (please := no): pass + +async def await_the_walrus(): + with (x := y): + pass + + with (x := y) as z, (a := b) as c: + pass + + with (x := await y): + pass + + with (x := await a, y := await b): + pass + + # Ideally we should remove one set of parentheses + with ((x := await a, y := await b)): + pass + + with (x := await a), (y := await b): + pass diff --git a/tests/data/simple_cases/docstring.py b/tests/data/simple_cases/docstring.py index f08bba575fe..c31d6a68783 100644 --- a/tests/data/simple_cases/docstring.py +++ b/tests/data/simple_cases/docstring.py @@ -173,6 +173,11 @@ def multiline_backslash_2(): ''' hey there \ ''' +# Regression test for #3425 +def multiline_backslash_really_long_dont_crash(): + """ + hey there hello guten tag hi hoow are you ola zdravstvuyte ciao como estas ca va \ """ + def multiline_backslash_3(): ''' @@ -391,6 +396,12 @@ def multiline_backslash_2(): hey there \ """ +# Regression test for #3425 +def multiline_backslash_really_long_dont_crash(): + """ + hey there hello guten tag hi hoow are you ola zdravstvuyte ciao como estas ca va \ """ + + def multiline_backslash_3(): """ already escaped \\""" diff --git a/tests/test_black.py b/tests/test_black.py index 4b2d485b392..cc1635e9c02 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -475,6 +475,53 @@ def test_tab_comment_indentation(self) -> None: self.assertFormatEqual(contents_spc, fs(contents_spc)) self.assertFormatEqual(contents_spc, fs(contents_tab)) + def test_false_positive_symlink_output_issue_3384(self) -> None: + # Emulate the behavior when using the CLI (`black ./child --verbose`), which + # involves patching some `pathlib.Path` methods. In particular, `is_dir` is + # patched only on its first call: when checking if "./child" is a directory it + # should return True. The "./child" folder exists relative to the cwd when + # running from CLI, but fails when running the tests because cwd is different + project_root = Path(THIS_DIR / "data" / "nested_gitignore_tests") + working_directory = project_root / "root" + target_abspath = working_directory / "child" + target_contents = ( + src.relative_to(working_directory) for src in target_abspath.iterdir() + ) + + def mock_n_calls(responses: List[bool]) -> Callable[[], bool]: + def _mocked_calls() -> bool: + if responses: + return responses.pop(0) + return False + + return _mocked_calls + + with patch("pathlib.Path.iterdir", return_value=target_contents), patch( + "pathlib.Path.cwd", return_value=working_directory + ), patch("pathlib.Path.is_dir", side_effect=mock_n_calls([True])): + ctx = FakeContext() + ctx.obj["root"] = project_root + report = MagicMock(verbose=True) + pyink.get_sources( + ctx=ctx, + src=("./child",), + quiet=False, + verbose=True, + include=DEFAULT_INCLUDE, + exclude=None, + report=report, + extend_exclude=None, + force_exclude=None, + stdin_filename=None, + ) + assert not any( + mock_args[1].startswith("is a symbolic link that points outside") + for _, mock_args, _ in report.path_ignored.mock_calls + ), "A symbolic link was reported." + report.path_ignored.assert_called_once_with( + Path("child", "b.py"), "matches a .gitignore file content" + ) + def test_report_verbose(self) -> None: report = Report(verbose=True) out_lines = [] diff --git a/tests/test_format.py b/tests/test_format.py index 95a5f5b1129..b6f4b6a585d 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -1,3 +1,4 @@ +import re from dataclasses import replace from typing import Any, Iterator from unittest.mock import patch @@ -67,6 +68,29 @@ def test_pyink_format(filename: str) -> None: ) +def test_preview_context_managers_targeting_py38() -> None: + source, expected = read_data("preview_context_managers", "targeting_py38.py") + mode = pyink.Mode(preview=True, target_versions={pyink.TargetVersion.PY38}) + assert_format(source, expected, mode, minimum_version=(3, 8)) + + +def test_preview_context_managers_targeting_py39() -> None: + source, expected = read_data("preview_context_managers", "targeting_py39.py") + mode = pyink.Mode(preview=True, target_versions={pyink.TargetVersion.PY39}) + assert_format(source, expected, mode, minimum_version=(3, 9)) + + +@pytest.mark.parametrize( + "filename", all_data_cases("preview_context_managers/auto_detect") +) +def test_preview_context_managers_auto_detect(filename: str) -> None: + match = re.match(r"features_3_(\d+)", filename) + assert match is not None, "Unexpected filename format: %s" % filename + source, expected = read_data("preview_context_managers/auto_detect", filename) + mode = pyink.Mode(preview=True) + assert_format(source, expected, mode, minimum_version=(3, int(match.group(1)))) + + # =============== # # Complex cases # ============= # @@ -155,6 +179,13 @@ def test_docstring_no_string_normalization() -> None: assert_format(source, expected, mode) +def test_docstring_line_length_6() -> None: + """Like test_docstring but with line length set to 6.""" + source, expected = read_data("miscellaneous", "linelength6") + mode = pyink.Mode(line_length=6) + assert_format(source, expected, mode) + + def test_preview_docstring_no_string_normalization() -> None: """ Like test_docstring but with string normalization off *and* the preview style