diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py index 42200e940a..329120e5a2 100644 --- a/pygments/lexers/javascript.py +++ b/pygments/lexers/javascript.py @@ -509,7 +509,7 @@ class TypeScriptLexer(RegexLexer): (r'(super)(\s*)(\([\w,?.$\s]+\s*\))', bygroups(Keyword.Reserved, Text), 'slashstartsregex'), # Match stuff like: function() {...} - (r'([a-zA-Z_?.$][\w?.$]*)\(\) \{', Name.Other, 'slashstartsregex'), + (r'([a-zA-Z_?.$][\w?.$]*)(?=\(\) \{)', Name.Other, 'slashstartsregex'), # Match stuff like: (function: return type) (r'([\w?.$][\w?.$]*)(\s*:\s*)([\w?.$][\w?.$]*)', bygroups(Name.Other, Text, Keyword.Type)), diff --git a/tests/test_javascript.py b/tests/test_javascript.py index cdccfde5b6..ea0691d62e 100644 --- a/tests/test_javascript.py +++ b/tests/test_javascript.py @@ -9,8 +9,8 @@ import pytest -from pygments.lexers.javascript import JavascriptLexer -from pygments.token import Number +from pygments.lexers.javascript import JavascriptLexer, TypeScriptLexer +from pygments.token import Number, Token @pytest.fixture(scope='module') @@ -82,3 +82,25 @@ def test_hexadecimal_literal_positive_matches(lexer, text): def test_hexadecimal_literals_negative_matches(lexer, text): """Test text that should **not** be tokenized as hexadecimal literals.""" assert list(lexer.get_tokens(text))[0] != (Number.Hex, text) + +@pytest.fixture(scope='module') +def ts_lexer(): + yield TypeScriptLexer() + +def test_function_definition(ts_lexer): + fragment = u'async function main() {\n}' + tokens = [ + (Token.Keyword, u'async'), + (Token.Text, u' '), + (Token.Keyword.Declaration, u'function'), + (Token.Text, u' '), + (Token.Name.Other, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(ts_lexer.get_tokens(fragment)) == tokens