diff --git a/ChangeLog b/ChangeLog index 2dc3aba713..33128066dd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -26,6 +26,10 @@ Release date: TBA * Add an faq detailing which messages to disable to avoid duplicates w/ other popular linters +* Fix superfluous-parens false-positive for the walrus operator + + Close #3383 + * Fix a bug with `ignore-docstrings` ignoring all lines in a module * Fix `pre-commit` config that could lead to undetected duplicate lines of code diff --git a/doc/whatsnew/2.6.rst b/doc/whatsnew/2.6.rst index 5ab2028c16..dc659d6562 100644 --- a/doc/whatsnew/2.6.rst +++ b/doc/whatsnew/2.6.rst @@ -23,3 +23,5 @@ Other Changes * The `no-space-check` option has been removed, it's no longer possible to consider empty line like a `trailing-whitespace` by using clever options. * `mixed-indentation` has been removed, it is no longer useful since TabError is included directly in python3 + +* Fix superfluous-parens false-positive for the walrus operator diff --git a/pylint/checkers/format.py b/pylint/checkers/format.py index 130ddd0c5f..52d852a258 100644 --- a/pylint/checkers/format.py +++ b/pylint/checkers/format.py @@ -52,8 +52,8 @@ import tokenize from functools import reduce # pylint: disable=redefined-builtin -from typing import List from tokenize import TokenInfo +from typing import List from astroid import nodes @@ -371,14 +371,25 @@ def _check_keyword_parentheses(self, tokens: List[TokenInfo], start: int) -> Non if tokens[start + 1].string != "(": return found_and_or = False + contains_walrus_operator = False + walrus_operator_depth = 0 depth = 0 keyword_token = str(tokens[start].string) line_num = tokens[start].start[0] for i in range(start, len(tokens) - 1): token = tokens[i] + # If we hit a newline, then assume any parens were for continuation. if token.type == tokenize.NL: return + # Since the walrus operator doesn't exist below python3.8, the tokenizer + # generates independent tokens + if ( + token.string == ":=" # <-- python3.8+ path + or token.string + tokens[i + 1].string == ":=" + ): + contains_walrus_operator = True + walrus_operator_depth = depth if token.string == "(": depth += 1 elif token.string == ")": @@ -386,10 +397,14 @@ def _check_keyword_parentheses(self, tokens: List[TokenInfo], start: int) -> Non if depth: continue # ')' can't happen after if (foo), since it would be a syntax error. - if (tokens[i + 1].string in (":", ")", "]", "}", "in") or - tokens[i + 1].type in - (tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT)): + if tokens[i + 1].string in (":", ")", "]", "}", "in") or tokens[ + i + 1 + ].type in (tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT): # The empty tuple () is always accepted. + if contains_walrus_operator and walrus_operator_depth - 1 == depth: + # Reset variable for possible following expressions + contains_walrus_operator = False + continue if i == start + 2: return if keyword_token == "not": diff --git a/tests/checkers/unittest_format.py b/tests/checkers/unittest_format.py index 7c07ac9ed7..562649eb93 100644 --- a/tests/checkers/unittest_format.py +++ b/tests/checkers/unittest_format.py @@ -184,6 +184,27 @@ def testCheckKeywordParensHandlesUnnecessaryParens(self): with self.assertAddsMessages(msg): self.checker._check_keyword_parentheses(_tokenize_str(code), offset) + def testNoSuperfluousParensWalrusOperatorIf(self): + """Parenthesis change the meaning of assignment in the walrus operator + and so are not superfluous:""" + code = "if (odd := is_odd(i))" + offset = 0 + with self.assertNoMessages(): + self.checker._check_keyword_parentheses(_tokenize_str(code), offset) + + def testPositiveSuperfluousParensWalrusOperatorIf(self): + """Test positive superfluous parens with the walrus operator""" + code = "if ((odd := is_odd(i))):" + msg = Message("superfluous-parens", line=1, args="if") + with self.assertAddsMessages(msg): + self.checker._check_keyword_parentheses(_tokenize_str(code), 0) + + def testNoSuperfluousParensWalrusOperatorNot(self): + """Test superfluous-parens with the not operator""" + code = "not (foo := 5)" + with self.assertNoMessages(): + self.checker._check_keyword_parentheses(_tokenize_str(code), 0) + def testCheckIfArgsAreNotUnicode(self): cases = [("if (foo):", 0), ("assert (1 == 1)", 0)]