Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Unclosed script/style tag handling Fixes #1614
Explicitly handle unclosed <script> and <style> tags which previously would result in O(n^2) work to lex as Error tokens per character up to the end of the line or end of file (whichever comes first). Now we try lexing the rest of the line as Javascript/CSS if there's no closing script/style tag. We recover on the next line in the root state if there is a newline, otherwise just keep parsing as Javascript/CSS. This is similar to how the error handling in lexer.py works except we get Javascript or CSS tokens instead of Error tokens. And we get to the end of the line much faster since we don't apply an O(n) regex for every character in the line. I added a new test suite for html lexer (there wasn't one except for coverage in test_examplefiles.py) including a trivial happy-path case and several cases around <script> and <style> fragments, including regression coverage that fails on the old logic.
- Loading branch information
Showing
2 changed files
with
92 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
# -*- coding: utf-8 -*- | ||
""" | ||
HTML Lexer Tests | ||
~~~~~~~~~~~~~~~~ | ||
:copyright: Copyright 2020-2020 by the Pygments team, see AUTHORS. | ||
:license: BSD, see LICENSE for details. | ||
""" | ||
|
||
import time | ||
|
||
import pytest | ||
|
||
from pygments.lexers.html import HtmlLexer | ||
from pygments.token import Token | ||
|
||
@pytest.fixture(scope='module') | ||
def lexer_html(): | ||
yield HtmlLexer() | ||
|
||
def test_simple_html(lexer_html): | ||
""" extremely basic happy-path case | ||
more tests are in test_examplefiles """ | ||
|
||
fragment = "<html>\n\t<body>\n\t\thello world\n\t</body>\n</html>" | ||
tokens = list(lexer_html.get_tokens(fragment)) | ||
assert all(x[1] != Token.Error for x in tokens) | ||
|
||
def test_happy_javascript_fragment(lexer_html): | ||
""" valid, even long Javascript fragments should still get parsed ok """ | ||
|
||
fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*2000+"</script>" | ||
start_time = time.time() | ||
tokens = list(lexer_html.get_tokens(fragment)) | ||
assert all(x[1] != Token.Error for x in tokens) | ||
assert time.time() - start_time < 1, 'The HTML lexer might have an expensive happy-path script case' | ||
|
||
def test_happy_css_fragment(lexer_html): | ||
""" valid, even long CSS fragments should still get parsed ok """ | ||
|
||
fragment = "<style>"+".ui-helper-hidden{display:none}"*2000+"</style>" | ||
start_time = time.time() | ||
tokens = list(lexer_html.get_tokens(fragment)) | ||
assert all(x[1] != Token.Error for x in tokens) | ||
assert time.time() - start_time < 1, 'The HTML lexer might have an expensive happy-path style case' | ||
|
||
def test_long_unclosed_javascript_fragment(lexer_html): | ||
""" unclosed, long Javascript fragments should parse quickly """ | ||
|
||
fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*2000 | ||
start_time = time.time() | ||
tokens = list(lexer_html.get_tokens(fragment)) | ||
assert time.time() - start_time < 1, 'The HTML lexer might have an expensive error script case' | ||
|
||
def test_long_unclosed_css_fragment(lexer_html): | ||
""" unclosed, long CSS fragments should parse quickly """ | ||
|
||
fragment = "<style>"+".ui-helper-hidden{display:none}"*2000 | ||
start_time = time.time() | ||
tokens = list(lexer_html.get_tokens(fragment)) | ||
assert time.time() - start_time < 1, 'The HTML lexer might have an expensive error style case' | ||
|
||
def test_unclosed_fragment_with_newline_recovery(lexer_html): | ||
""" unclosed Javascript fragments should recover on the next line """ | ||
|
||
fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*20+"\n<div>hi</div>" | ||
tokens = list(lexer_html.get_tokens(fragment)) | ||
recovery_tokens = [ | ||
(Token.Punctuation, '<'), | ||
(Token.Name.Tag, 'div'), | ||
(Token.Punctuation, '>'), | ||
(Token.Text, 'hi'), | ||
(Token.Punctuation, '<'), | ||
(Token.Punctuation, '/'), | ||
(Token.Name.Tag, 'div'), | ||
(Token.Punctuation, '>'), | ||
(Token.Text, '\n')] | ||
assert tokens[-1*len(recovery_tokens):] == recovery_tokens | ||
|