Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop support for EOL Python 3.6 #629

Merged
merged 2 commits into from Feb 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Expand Up @@ -9,7 +9,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "pypy3"]
python-version: ["3.7", "3.8", "3.9", "3.10", "pypy-3.8"]
os: [ubuntu-latest, macos-latest, windows-latest]

steps:
Expand Down
2 changes: 0 additions & 2 deletions bleach/__init__.py
@@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-

import packaging.version

from bleach.linkifier import (
Expand Down
19 changes: 8 additions & 11 deletions bleach/html5lib_shim.py
Expand Up @@ -257,7 +257,7 @@ class BleachHTMLTokenizer(HTMLTokenizer):
"""Tokenizer that doesn't consume character entities"""

def __init__(self, consume_entities=False, **kwargs):
super(BleachHTMLTokenizer, self).__init__(**kwargs)
super().__init__(**kwargs)

self.consume_entities = consume_entities

Expand All @@ -267,7 +267,7 @@ def __init__(self, consume_entities=False, **kwargs):
def __iter__(self):
last_error_token = None

for token in super(BleachHTMLTokenizer, self).__iter__():
for token in super().__iter__():
if last_error_token is not None:
if (
last_error_token["data"] == "invalid-character-in-attribute-name"
Expand Down Expand Up @@ -342,9 +342,7 @@ def consumeEntity(self, allowedChar=None, fromAttribute=False):
# If this tokenizer is set to consume entities, then we can let the
# superclass do its thing.
if self.consume_entities:
return super(BleachHTMLTokenizer, self).consumeEntity(
allowedChar, fromAttribute
)
return super().consumeEntity(allowedChar, fromAttribute)

# If this tokenizer is set to not consume entities, then we don't want
# to consume and convert them, so this overrides the html5lib tokenizer's
Expand All @@ -364,7 +362,7 @@ def tagOpenState(self):
# we've collected so far and we do that by calling start_tag() on
# the input stream wrapper.
self.stream.start_tag()
return super(BleachHTMLTokenizer, self).tagOpenState()
return super().tagOpenState()

def emitCurrentToken(self):
token = self.currentToken
Expand Down Expand Up @@ -397,7 +395,7 @@ def emitCurrentToken(self):
self.state = self.dataState
return

super(BleachHTMLTokenizer, self).emitCurrentToken()
super().emitCurrentToken()


class BleachHTMLParser(HTMLParser):
Expand All @@ -416,7 +414,7 @@ def __init__(self, tags, strip, consume_entities, **kwargs):
self.tags = [tag.lower() for tag in tags] if tags is not None else None
self.strip = strip
self.consume_entities = consume_entities
super(BleachHTMLParser, self).__init__(**kwargs)
super().__init__(**kwargs)

def _parse(
self, stream, innerHTML=False, container="div", scripting=True, **kwargs
Expand Down Expand Up @@ -642,15 +640,14 @@ def serialize(self, treewalker, encoding=None):
in_tag = False
after_equals = False

for stoken in super(BleachHTMLSerializer, self).serialize(treewalker, encoding):
for stoken in super().serialize(treewalker, encoding):
if in_tag:
if stoken == ">":
in_tag = False

elif after_equals:
if stoken != '"':
for part in self.escape_base_amp(stoken):
yield part
yield from self.escape_base_amp(stoken)

after_equals = False
continue
Expand Down
16 changes: 6 additions & 10 deletions bleach/linkifier.py
Expand Up @@ -228,7 +228,7 @@ def __init__(
:arg re email_re: email matching regex

"""
super(LinkifyFilter, self).__init__(source)
super().__init__(source)

self.callbacks = callbacks or []
self.skip_tags = skip_tags or []
Expand Down Expand Up @@ -332,8 +332,7 @@ def handle_email_addresses(self, src_iter):
if end < len(text):
new_tokens.append({"type": "Characters", "data": text[end:]})

for new_token in new_tokens:
yield new_token
yield from new_tokens

continue

Expand Down Expand Up @@ -460,8 +459,7 @@ def handle_links(self, src_iter):
if end < len(text):
new_tokens.append({"type": "Characters", "data": text[end:]})

for new_token in new_tokens:
yield new_token
yield from new_tokens

continue

Expand Down Expand Up @@ -499,8 +497,7 @@ def handle_a_tag(self, token_buffer):
# The callbacks didn't change the text, so we yield the new "a"
# token, then whatever else was there, then the end "a" token
yield a_token
for mem in token_buffer[1:]:
yield mem
yield from token_buffer[1:]

else:
# If the callbacks changed the text, then we're going to drop
Expand All @@ -516,16 +513,15 @@ def __iter__(self):

token_buffer = []

for token in super(LinkifyFilter, self).__iter__():
for token in super().__iter__():
if in_a:
# Handle the case where we're in an "a" tag--we want to buffer tokens
# until we hit an end "a" tag.
if token["type"] == "EndTag" and token["name"] == "a":
# Add the end tag to the token buffer and then handle them
# and yield anything returned
token_buffer.append(token)
for new_token in self.handle_a_tag(token_buffer):
yield new_token
yield from self.handle_a_tag(token_buffer)

# Clear "a" related state and continue since we've yielded all
# the tokens we're going to yield
Expand Down
9 changes: 4 additions & 5 deletions bleach/sanitizer.py
Expand Up @@ -280,7 +280,7 @@ def __init__(
category=DeprecationWarning,
module="bleach._vendor.html5lib",
)
return super(BleachSanitizerFilter, self).__init__(source, **kwargs)
return super().__init__(source, **kwargs)

def sanitize_stream(self, token_iterator):
for token in token_iterator:
Expand All @@ -290,8 +290,7 @@ def sanitize_stream(self, token_iterator):
continue

if isinstance(ret, list):
for subtoken in ret:
yield subtoken
yield from ret
else:
yield ret

Expand Down Expand Up @@ -575,7 +574,7 @@ def disallowed_token(self, token):
if ns is None or ns not in html5lib_shim.prefixes:
namespaced_name = name
else:
namespaced_name = "%s:%s" % (html5lib_shim.prefixes[ns], name)
namespaced_name = "{}:{}".format(html5lib_shim.prefixes[ns], name)

attrs.append(
' %s="%s"'
Expand All @@ -587,7 +586,7 @@ def disallowed_token(self, token):
v,
)
)
token["data"] = "<%s%s>" % (token["name"], "".join(attrs))
token["data"] = "<{}{}>".format(token["name"], "".join(attrs))

else:
token["data"] = "<%s>" % token["name"]
Expand Down
17 changes: 8 additions & 9 deletions docs/conf.py
@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
#
# Bleach documentation build configuration file, created by
# sphinx-quickstart on Fri May 11 21:11:39 2012.
Expand Down Expand Up @@ -40,8 +39,8 @@
master_doc = 'index'

# General information about the project.
project = u'Bleach'
copyright = u'2012-2015, James Socol; 2015-2017, Mozilla Foundation'
project = 'Bleach'
copyright = '2012-2015, James Socol; 2015-2017, Mozilla Foundation'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
Expand Down Expand Up @@ -196,8 +195,8 @@
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bleach.tex', u'Bleach Documentation',
u'Will Kahn-Greene', 'manual'),
('index', 'Bleach.tex', 'Bleach Documentation',
'Will Kahn-Greene', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
Expand Down Expand Up @@ -226,8 +225,8 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bleach', u'Bleach Documentation',
[u'Will Kahn-Greene'], 1)
('index', 'bleach', 'Bleach Documentation',
['Will Kahn-Greene'], 1)
]

# If true, show URL addresses after external links.
Expand All @@ -240,8 +239,8 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bleach', u'Bleach Documentation',
u'Will Kahn-Greene', 'Bleach', 'One line description of project.',
('index', 'Bleach', 'Bleach Documentation',
'Will Kahn-Greene', 'Bleach', 'One line description of project.',
'Miscellaneous'),
]

Expand Down
4 changes: 2 additions & 2 deletions scripts/run_tests.sh
Expand Up @@ -23,10 +23,10 @@ case "${MODE}" in
tox -e docs
;;
format)
black --target-version=py36 bleach/*.py tests/ tests_website/
black --target-version=py37 bleach/*.py tests/ tests_website/
;;
format-check)
black --target-version=py36 --check --diff bleach/*.py tests/ tests_website/
black --target-version=py37 --check --diff bleach/*.py tests/ tests_website/
;;
check-reqs)
mv requirements-dev.txt requirements-dev.txt.orig
Expand Down
3 changes: 0 additions & 3 deletions setup.cfg
Expand Up @@ -14,6 +14,3 @@ max-line-length = 100

[tool:pytest]
addopts = -W error:html5lib:DeprecationWarning

[wheel]
universal=1
10 changes: 5 additions & 5 deletions setup.py
Expand Up @@ -16,18 +16,18 @@


def get_long_desc():
with io.open('README.rst', encoding='utf-8') as fp:
with open('README.rst', encoding='utf-8') as fp:
desc = fp.read()
desc += '\n\n'
with io.open('CHANGES', encoding='utf-8') as fp:
with open('CHANGES', encoding='utf-8') as fp:
desc += fp.read()
return desc


def get_version():
fn = os.path.join('bleach', '__init__.py')
vsre = r"""^__version__ = ['"]([^'"]*)['"]"""
with io.open(fn, encoding='utf-8') as fp:
with open(fn, encoding='utf-8') as fp:
version_file = fp.read()
return re.search(vsre, version_file, re.M).group(1)

Expand All @@ -45,7 +45,7 @@ def get_version():
include_package_data=True,
package_data={'': ['README.rst']},
zip_safe=False,
python_requires='>=3.6',
python_requires='>=3.7',
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
Expand All @@ -55,7 +55,7 @@ def get_version():
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
Expand Down
2 changes: 1 addition & 1 deletion tests/test_linkify.py
Expand Up @@ -48,7 +48,7 @@ def test_mangle_link():
def filter_url(attrs, new=False):
if not attrs.get((None, "href"), "").startswith("http://bouncer"):
quoted = quote_plus(attrs[(None, "href")])
attrs[(None, "href")] = "http://bouncer/?u={0!s}".format(quoted)
attrs[(None, "href")] = "http://bouncer/?u={!s}".format(quoted)
return attrs

assert (
Expand Down
1 change: 0 additions & 1 deletion tests/test_unicode.py
@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
import pytest

from bleach import clean, linkify
Expand Down
2 changes: 1 addition & 1 deletion tests_website/data_to_json.py
Expand Up @@ -39,7 +39,7 @@ def main():
for infn in ins:
case_name = infn.rsplit(".test", 1)[0]

with open(infn, "r") as fin:
with open(infn) as fin:
data, expected = fin.read().split("\n--\n")
data = data.strip()
expected = expected.strip()
Expand Down
2 changes: 1 addition & 1 deletion tests_website/open_test_page.py
Expand Up @@ -36,4 +36,4 @@
browser = webbrowser.get(browser_name)
browser.open_new_tab("http://localhost:8080")
except Exception as error:
print("error getting test browser %s: %s" % (browser_name, error))
print("error getting test browser {}: {}".format(browser_name, error))
2 changes: 1 addition & 1 deletion tests_website/server.py
Expand Up @@ -27,7 +27,7 @@ class BleachCleanHandler(http.server.SimpleHTTPRequestHandler):
def do_POST(self):
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
print("read %s bytes: %s" % (content_len, body))
print("read {} bytes: {}".format(content_len, body))

body = body.decode("utf-8")
print("input: %r" % body)
Expand Down
10 changes: 2 additions & 8 deletions tox.ini
Expand Up @@ -2,8 +2,8 @@

[tox]
envlist =
py{36,37,38,39,310,py3}
py{36,37,38,39,310}-build-no-lang
py{37,38,39,310,py3}
py{37,38,39,310}-build-no-lang
docs
format-check
lint
Expand All @@ -16,12 +16,6 @@ commands =
pytest {posargs:-v}
python setup.py build

[testenv:py36-build-no-lang]
setenv =
LANG=
commands =
python setup.py build

[testenv:py37-build-no-lang]
setenv =
LANG=
Expand Down