Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump black from 22.12.0 to 23.1.0 #2159

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion requirements-dev.txt
@@ -1,7 +1,7 @@
mock==5.0.1
nose2[coverage_plugin]==0.12.0
cov-core==1.15.0
black==22.12.0
black==23.1.0
pylint==2.15.10
splinter==0.19.0
selenium==4.8.0
Expand Down
1 change: 0 additions & 1 deletion searx/compat.py
Expand Up @@ -14,7 +14,6 @@
from functools import cached_property # type: ignore

except ImportError:

# cache_property has been added in py3.8 [1]
#
# To support cache_property in py3.7 the implementation from 3.8 has been
Expand Down
1 change: 0 additions & 1 deletion searx/engines/apkmirror.py
Expand Up @@ -49,7 +49,6 @@ def response(resp):

# parse results
for result in eval_xpath_list(dom, "//div[@id='content']//div[@class='listWidget']/div/div[@class='appRow']"):

link = eval_xpath_getindex(result, './/h5/a', 0)

url = base_url + link.attrib.get('href') + '#downloads'
Expand Down
3 changes: 0 additions & 3 deletions searx/engines/artic.py
Expand Up @@ -29,7 +29,6 @@


def request(query, params):

args = urlencode(
{
'q': query,
Expand All @@ -45,12 +44,10 @@ def request(query, params):


def response(resp):

results = []
json_data = loads(resp.text)

for result in json_data['data']:

if not result['image_id']:
continue

Expand Down
1 change: 0 additions & 1 deletion searx/engines/bandcamp.py
Expand Up @@ -63,7 +63,6 @@ def response(resp):
dom = html.fromstring(resp.text)

for result in eval_xpath_list(dom, '//li[contains(@class, "searchresult")]'):

link = eval_xpath_getindex(result, './/div[@class="itemurl"]/a', 0, default=None)
if link is None:
continue
Expand Down
5 changes: 0 additions & 5 deletions searx/engines/bing.py
Expand Up @@ -45,7 +45,6 @@ def _get_offset_from_pageno(pageno):


def request(query, params):

offset = _get_offset_from_pageno(params.get('pageno', 1))

# logger.debug("params['pageno'] --> %s", params.get('pageno'))
Expand Down Expand Up @@ -86,7 +85,6 @@ def response(resp):
url_to_resolve_index = []
i = 0
for result in eval_xpath_list(dom, '//ol[@id="b_results"]/li[contains(@class, "b_algo")]'):

link = eval_xpath_getindex(result, './/h2/a', 0, None)
if link is None:
continue
Expand Down Expand Up @@ -138,7 +136,6 @@ def response(resp):
try:
result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
if "-" in result_len_container:

# Remove the part "from-to" for paginated request ...
result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :]

Expand All @@ -159,14 +156,12 @@ def response(resp):

# get supported languages from their site
def _fetch_supported_languages(resp):

lang_tags = set()

dom = html.fromstring(resp.text)
lang_links = eval_xpath(dom, '//div[@id="language-section"]//li')

for _li in lang_links:

href = eval_xpath(_li, './/@href')[0]
(_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href)
query = parse_qs(query, keep_blank_values=True)
Expand Down
2 changes: 0 additions & 2 deletions searx/engines/bing_news.py
Expand Up @@ -90,7 +90,6 @@ def _get_url(query, language, offset, time_range):


def request(query, params):

if params['time_range'] and params['time_range'] not in time_range_dict:
return params

Expand All @@ -105,7 +104,6 @@ def request(query, params):


def response(resp):

results = []
rss = etree.fromstring(resp.content)
namespaces = rss.nsmap
Expand Down
1 change: 0 additions & 1 deletion searx/engines/core.py
Expand Up @@ -29,7 +29,6 @@


def request(query, params):

if api_key == 'unset':
raise SearxEngineAPIException('missing CORE API key')

Expand Down
2 changes: 0 additions & 2 deletions searx/engines/dailymotion.py
Expand Up @@ -77,7 +77,6 @@ def init(_engine_settings):


def request(query, params):

if not query:
return False

Expand Down Expand Up @@ -127,7 +126,6 @@ def response(resp):

# parse results
for res in search_res.get('list', []):

title = res['title']
url = res['url']

Expand Down
1 change: 1 addition & 0 deletions searx/engines/deezer.py
Expand Up @@ -25,6 +25,7 @@
search_url = url + 'search?{query}&index={offset}'
iframe_src = "https://www.deezer.com/plugins/player?type=tracks&id={audioid}"


# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 25
Expand Down
1 change: 0 additions & 1 deletion searx/engines/demo_online.py
Expand Up @@ -81,7 +81,6 @@ def response(resp):
json_data = loads(resp.text)

for result in json_data['data']:

if not result['image_id']:
continue

Expand Down
3 changes: 0 additions & 3 deletions searx/engines/deviantart.py
Expand Up @@ -34,7 +34,6 @@


def request(query, params):

# https://www.deviantart.com/search/deviations?page=5&q=foo

query = {
Expand All @@ -50,14 +49,12 @@ def request(query, params):


def response(resp):

results = []

dom = html.fromstring(resp.text)

for row in dom.xpath('//div[contains(@data-hook, "content_row")]'):
for result in row.xpath('./div'):

a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0]
noscript_tag = a_tag.xpath('.//noscript')

Expand Down
1 change: 0 additions & 1 deletion searx/engines/docker_hub.py
Expand Up @@ -25,7 +25,6 @@


def request(query, params):

params['url'] = search_url.format(query=urlencode(dict(q=query, page=params["pageno"])))
params["headers"]["Search-Version"] = "v3"

Expand Down
1 change: 0 additions & 1 deletion searx/engines/doku.py
Expand Up @@ -37,7 +37,6 @@

# do search-request
def request(query, params):

params['url'] = base_url + search_url.format(query=urlencode({'id': query}))

return params
Expand Down
5 changes: 1 addition & 4 deletions searx/engines/duckduckgo.py
Expand Up @@ -49,6 +49,7 @@
url = 'https://lite.duckduckgo.com/lite/'
url_ping = 'https://duckduckgo.com/t/sl_l'


# match query's language to a region code that duckduckgo will accept
def get_region_code(lang, lang_list=None):
if lang == 'all':
Expand All @@ -62,7 +63,6 @@ def get_region_code(lang, lang_list=None):


def request(query, params):

params['url'] = url
params['method'] = 'POST'

Expand Down Expand Up @@ -118,7 +118,6 @@ def request(query, params):

# get response from search-request
def response(resp):

headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])
get(url_ping, headers=headers_ping)

Expand All @@ -143,7 +142,6 @@ def response(resp):
offset = 0

while len_tr_rows >= offset + 4:

# assemble table rows we need to scrap
tr_title = tr_rows[offset]
tr_content = tr_rows[offset + 1]
Expand Down Expand Up @@ -174,7 +172,6 @@ def response(resp):

# get supported languages from their site
def _fetch_supported_languages(resp):

# response is a js file with regions as an embedded object
response_page = resp.text
response_page = response_page[response_page.find('regions:{') + 8 :]
Expand Down
1 change: 0 additions & 1 deletion searx/engines/emojipedia.py
Expand Up @@ -48,7 +48,6 @@ def response(resp):
dom = html.fromstring(resp.text)

for result in eval_xpath_list(dom, "//ol[@class='search-results']/li"):

extracted_desc = extract_text(eval_xpath_getindex(result, './/p', 0))

if 'No results found.' in extracted_desc:
Expand Down
1 change: 1 addition & 0 deletions searx/engines/freesound.py
Expand Up @@ -29,6 +29,7 @@
url + "search/text/?query={query}&page={page}&fields=name,url,download,created,description,type&token={api_key}"
)


# search request
def request(query, params):
params["url"] = search_url.format(
Expand Down
1 change: 0 additions & 1 deletion searx/engines/gigablast.py
Expand Up @@ -41,7 +41,6 @@


def fetch_extra_param(query_args, headers):

# example:
#
# var uxrl='/search?c=main&qlangcountry=en-us&q=south&s=10&rand=1590740241635&n';
Expand Down
3 changes: 0 additions & 3 deletions searx/engines/google.py
Expand Up @@ -220,7 +220,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
# https://developers.google.com/custom-search/docs/xml_results_appendices#languageCollections

if _any_language and supported_any_language:

# interpretation is left up to Google (based on whoogle)
#
# - add parameter ``source=lnt``
Expand All @@ -230,7 +229,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
ret_val['params']['source'] = 'lnt'

else:

# restricts search results to documents written in a particular
# language.
ret_val['params']['lr'] = "lang_" + lang_list.get(lang_country, language)
Expand Down Expand Up @@ -323,7 +321,6 @@ def response(resp):
# parse results

for result in eval_xpath_list(dom, results_xpath):

# google *sections*
if extract_text(eval_xpath(result, g_section_with_header)):
logger.debug("ignoring <g-section-with-header>")
Expand Down
1 change: 0 additions & 1 deletion searx/engines/google_images.py
Expand Up @@ -93,7 +93,6 @@ def response(resp):
json_data = loads(resp.text[json_start:])

for item in json_data["ischj"]["metadata"]:

result_item = {
'url': item["result"]["referrer_url"],
'title': item["result"]["page_title"],
Expand Down
1 change: 0 additions & 1 deletion searx/engines/google_news.py
Expand Up @@ -119,7 +119,6 @@ def response(resp):
dom = html.fromstring(resp.text)

for result in eval_xpath_list(dom, '//div[@class="xrnccd"]'):

# The first <a> tag in the <article> contains the link to the
# article The href attribute of the <a> is a google internal link,
# we can't use. The real link is hidden in the jslog attribute:
Expand Down
1 change: 0 additions & 1 deletion searx/engines/google_scholar.py
Expand Up @@ -151,7 +151,6 @@ def response(resp): # pylint: disable=too-many-locals

# parse results
for result in eval_xpath_list(dom, '//div[@data-cid]'):

title = extract_text(eval_xpath(result, './/h3[1]//a'))

if not title:
Expand Down
1 change: 0 additions & 1 deletion searx/engines/google_videos.py
Expand Up @@ -147,7 +147,6 @@ def response(resp):

# parse results
for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):

# ignore google *sections*
if extract_text(eval_xpath(result, g_section_with_header)):
logger.debug("ignoring <g-section-with-header>")
Expand Down
3 changes: 0 additions & 3 deletions searx/engines/imdb.py
Expand Up @@ -39,20 +39,17 @@


def request(query, params):

query = query.replace(" ", "_").lower()
params['url'] = suggestion_url.format(letter=query[0], query=query)

return params


def response(resp):

suggestions = json.loads(resp.text)
results = []

for entry in suggestions.get('d', []):

# https://developer.imdb.com/documentation/key-concepts#imdb-ids
entry_id = entry['id']
categ = search_categories.get(entry_id[:2])
Expand Down
1 change: 0 additions & 1 deletion searx/engines/json_engine.py
Expand Up @@ -64,7 +64,6 @@ def do_query(data, q):
qkey = q[0]

for key, value in iterate(data):

if len(q) == 1:
if key == qkey:
ret.append(value)
Expand Down
1 change: 0 additions & 1 deletion searx/engines/loc.py
Expand Up @@ -33,7 +33,6 @@


def request(query, params):

search_path = search_string.format(query=urlencode({'q': query}), page=params['pageno'])

params['url'] = base_url + search_path
Expand Down
3 changes: 0 additions & 3 deletions searx/engines/mediathekviewweb.py
Expand Up @@ -24,7 +24,6 @@


def request(query, params):

params['url'] = 'https://mediathekviewweb.de/api/query'
params['method'] = 'POST'
params['headers']['Content-type'] = 'text/plain'
Expand All @@ -50,7 +49,6 @@ def request(query, params):


def response(resp):

resp = loads(resp.text)

mwv_result = resp['result']
Expand All @@ -59,7 +57,6 @@ def response(resp):
results = []

for item in mwv_result_list:

item['hms'] = str(datetime.timedelta(seconds=item['duration']))

results.append(
Expand Down
1 change: 0 additions & 1 deletion searx/engines/mixcloud.py
Expand Up @@ -38,7 +38,6 @@ def response(resp):
search_res = resp.json()

for result in search_res.get('data', []):

r_url = result['url']
publishedDate = parser.parse(result['created_time'])
res = {
Expand Down
1 change: 0 additions & 1 deletion searx/engines/openverse.py
Expand Up @@ -28,7 +28,6 @@


def request(query, params):

search_path = search_string.format(query=urlencode({'q': query}), nb_per_page=nb_per_page, page=params['pageno'])

params['url'] = base_url + search_path
Expand Down