From b4c80d9ebba1a527f0e212e3864ff3ec5aeb4d0f Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Sat, 4 Feb 2023 17:58:53 +0100 Subject: [PATCH 1/2] [mod] make format.python : prepare python code for black 23.1.0 Signed-off-by: Markus Heiser --- searx/compat.py | 1 - searx/engines/apkmirror.py | 1 - searx/engines/artic.py | 3 --- searx/engines/bandcamp.py | 1 - searx/engines/bing.py | 5 ----- searx/engines/bing_news.py | 2 -- searx/engines/core.py | 1 - searx/engines/dailymotion.py | 2 -- searx/engines/deezer.py | 1 + searx/engines/demo_online.py | 1 - searx/engines/deviantart.py | 3 --- searx/engines/docker_hub.py | 1 - searx/engines/doku.py | 1 - searx/engines/duckduckgo.py | 5 +---- searx/engines/emojipedia.py | 1 - searx/engines/freesound.py | 1 + searx/engines/gigablast.py | 1 - searx/engines/google.py | 3 --- searx/engines/google_images.py | 1 - searx/engines/google_news.py | 1 - searx/engines/google_scholar.py | 1 - searx/engines/google_videos.py | 1 - searx/engines/imdb.py | 3 --- searx/engines/json_engine.py | 1 - searx/engines/loc.py | 1 - searx/engines/mediathekviewweb.py | 3 --- searx/engines/mixcloud.py | 1 - searx/engines/openverse.py | 1 - searx/engines/pdbe.py | 6 ++---- searx/engines/petal_images.py | 1 - searx/engines/photon.py | 1 - searx/engines/qwant.py | 4 ---- searx/engines/reddit.py | 2 -- searx/engines/scanr_structures.py | 1 - searx/engines/searx_engine.py | 1 - searx/engines/sjp.py | 2 +- searx/engines/soundcloud.py | 1 - searx/engines/spotify.py | 1 + searx/engines/sqlite.py | 1 - searx/engines/stackexchange.py | 3 --- searx/engines/startpage.py | 2 -- searx/engines/tineye.py | 3 --- searx/engines/torznab.py | 1 - searx/engines/wikidata.py | 3 --- searx/engines/wolframalpha_api.py | 1 - searx/engines/xpath.py | 1 - searx/engines/yacy.py | 1 - searx/engines/yahoo.py | 1 - searx/engines/yahoo_news.py | 1 - searx/flaskfix.py | 2 -- searx/locales.py | 1 - searx/metrics/__init__.py | 3 --- searx/metrics/error_recorder.py | 1 - searx/metrics/models.py | 3 --- searx/network/network.py | 2 -- searx/plugins/hostname_replace.py | 4 +--- searx/plugins/tor_check.py | 2 -- searx/query.py | 1 - searx/search/checker/impl.py | 4 ---- searx/webapp.py | 4 ---- searx/webutils.py | 1 - searxng_extra/update/update_languages.py | 2 -- searxng_extra/update/update_osm_keys_tags.py | 1 - searxng_extra/update/update_pygments.py | 1 - tests/unit/network/test_network.py | 2 -- tests/unit/test_external_bangs.py | 1 - tests/unit/test_query.py | 1 - tests/unit/test_utils.py | 1 - 68 files changed, 8 insertions(+), 115 deletions(-) diff --git a/searx/compat.py b/searx/compat.py index 15e27d45db0..5c7e6d7cbcb 100644 --- a/searx/compat.py +++ b/searx/compat.py @@ -14,7 +14,6 @@ from functools import cached_property # type: ignore except ImportError: - # cache_property has been added in py3.8 [1] # # To support cache_property in py3.7 the implementation from 3.8 has been diff --git a/searx/engines/apkmirror.py b/searx/engines/apkmirror.py index ac7cd7431a5..e077c8c1054 100644 --- a/searx/engines/apkmirror.py +++ b/searx/engines/apkmirror.py @@ -49,7 +49,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, "//div[@id='content']//div[@class='listWidget']/div/div[@class='appRow']"): - link = eval_xpath_getindex(result, './/h5/a', 0) url = base_url + link.attrib.get('href') + '#downloads' diff --git a/searx/engines/artic.py b/searx/engines/artic.py index c0ae0a5e7a9..efec3307cc5 100644 --- a/searx/engines/artic.py +++ b/searx/engines/artic.py @@ -29,7 +29,6 @@ def request(query, params): - args = urlencode( { 'q': query, @@ -45,12 +44,10 @@ def request(query, params): def response(resp): - results = [] json_data = loads(resp.text) for result in json_data['data']: - if not result['image_id']: continue diff --git a/searx/engines/bandcamp.py b/searx/engines/bandcamp.py index 8feff1fe0dd..2895c682dbd 100644 --- a/searx/engines/bandcamp.py +++ b/searx/engines/bandcamp.py @@ -63,7 +63,6 @@ def response(resp): dom = html.fromstring(resp.text) for result in eval_xpath_list(dom, '//li[contains(@class, "searchresult")]'): - link = eval_xpath_getindex(result, './/div[@class="itemurl"]/a', 0, default=None) if link is None: continue diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 783c0056acf..b9541677e63 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -45,7 +45,6 @@ def _get_offset_from_pageno(pageno): def request(query, params): - offset = _get_offset_from_pageno(params.get('pageno', 1)) # logger.debug("params['pageno'] --> %s", params.get('pageno')) @@ -86,7 +85,6 @@ def response(resp): url_to_resolve_index = [] i = 0 for result in eval_xpath_list(dom, '//ol[@id="b_results"]/li[contains(@class, "b_algo")]'): - link = eval_xpath_getindex(result, './/h2/a', 0, None) if link is None: continue @@ -138,7 +136,6 @@ def response(resp): try: result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()')) if "-" in result_len_container: - # Remove the part "from-to" for paginated request ... result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :] @@ -159,14 +156,12 @@ def response(resp): # get supported languages from their site def _fetch_supported_languages(resp): - lang_tags = set() dom = html.fromstring(resp.text) lang_links = eval_xpath(dom, '//div[@id="language-section"]//li') for _li in lang_links: - href = eval_xpath(_li, './/@href')[0] (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href) query = parse_qs(query, keep_blank_values=True) diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py index 7eea17bb45c..4f5e2ef34e2 100644 --- a/searx/engines/bing_news.py +++ b/searx/engines/bing_news.py @@ -90,7 +90,6 @@ def _get_url(query, language, offset, time_range): def request(query, params): - if params['time_range'] and params['time_range'] not in time_range_dict: return params @@ -105,7 +104,6 @@ def request(query, params): def response(resp): - results = [] rss = etree.fromstring(resp.content) namespaces = rss.nsmap diff --git a/searx/engines/core.py b/searx/engines/core.py index 2fa66e226af..a56f258be8f 100644 --- a/searx/engines/core.py +++ b/searx/engines/core.py @@ -29,7 +29,6 @@ def request(query, params): - if api_key == 'unset': raise SearxEngineAPIException('missing CORE API key') diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py index 7dd84dd2738..f1721a9ba07 100644 --- a/searx/engines/dailymotion.py +++ b/searx/engines/dailymotion.py @@ -77,7 +77,6 @@ def init(_engine_settings): def request(query, params): - if not query: return False @@ -127,7 +126,6 @@ def response(resp): # parse results for res in search_res.get('list', []): - title = res['title'] url = res['url'] diff --git a/searx/engines/deezer.py b/searx/engines/deezer.py index 63c71e3ccfa..3dd787c4856 100644 --- a/searx/engines/deezer.py +++ b/searx/engines/deezer.py @@ -25,6 +25,7 @@ search_url = url + 'search?{query}&index={offset}' iframe_src = "https://www.deezer.com/plugins/player?type=tracks&id={audioid}" + # do search-request def request(query, params): offset = (params['pageno'] - 1) * 25 diff --git a/searx/engines/demo_online.py b/searx/engines/demo_online.py index 08add5371d1..858839865b1 100644 --- a/searx/engines/demo_online.py +++ b/searx/engines/demo_online.py @@ -81,7 +81,6 @@ def response(resp): json_data = loads(resp.text) for result in json_data['data']: - if not result['image_id']: continue diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py index e44ac28e5a3..8194dd09f9f 100644 --- a/searx/engines/deviantart.py +++ b/searx/engines/deviantart.py @@ -34,7 +34,6 @@ def request(query, params): - # https://www.deviantart.com/search/deviations?page=5&q=foo query = { @@ -50,14 +49,12 @@ def request(query, params): def response(resp): - results = [] dom = html.fromstring(resp.text) for row in dom.xpath('//div[contains(@data-hook, "content_row")]'): for result in row.xpath('./div'): - a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0] noscript_tag = a_tag.xpath('.//noscript') diff --git a/searx/engines/docker_hub.py b/searx/engines/docker_hub.py index 1e492b196bc..9a57c4236c7 100644 --- a/searx/engines/docker_hub.py +++ b/searx/engines/docker_hub.py @@ -25,7 +25,6 @@ def request(query, params): - params['url'] = search_url.format(query=urlencode(dict(q=query, page=params["pageno"]))) params["headers"]["Search-Version"] = "v3" diff --git a/searx/engines/doku.py b/searx/engines/doku.py index 08f56bbe75d..29c113b46b5 100644 --- a/searx/engines/doku.py +++ b/searx/engines/doku.py @@ -37,7 +37,6 @@ # do search-request def request(query, params): - params['url'] = base_url + search_url.format(query=urlencode({'id': query})) return params diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index 2a7956ca8f7..f294d15a16d 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -49,6 +49,7 @@ url = 'https://lite.duckduckgo.com/lite/' url_ping = 'https://duckduckgo.com/t/sl_l' + # match query's language to a region code that duckduckgo will accept def get_region_code(lang, lang_list=None): if lang == 'all': @@ -62,7 +63,6 @@ def get_region_code(lang, lang_list=None): def request(query, params): - params['url'] = url params['method'] = 'POST' @@ -118,7 +118,6 @@ def request(query, params): # get response from search-request def response(resp): - headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie']) get(url_ping, headers=headers_ping) @@ -143,7 +142,6 @@ def response(resp): offset = 0 while len_tr_rows >= offset + 4: - # assemble table rows we need to scrap tr_title = tr_rows[offset] tr_content = tr_rows[offset + 1] @@ -174,7 +172,6 @@ def response(resp): # get supported languages from their site def _fetch_supported_languages(resp): - # response is a js file with regions as an embedded object response_page = resp.text response_page = response_page[response_page.find('regions:{') + 8 :] diff --git a/searx/engines/emojipedia.py b/searx/engines/emojipedia.py index 020bf689b68..b6c04f48a1f 100644 --- a/searx/engines/emojipedia.py +++ b/searx/engines/emojipedia.py @@ -48,7 +48,6 @@ def response(resp): dom = html.fromstring(resp.text) for result in eval_xpath_list(dom, "//ol[@class='search-results']/li"): - extracted_desc = extract_text(eval_xpath_getindex(result, './/p', 0)) if 'No results found.' in extracted_desc: diff --git a/searx/engines/freesound.py b/searx/engines/freesound.py index ea666662132..b56b5a414bf 100644 --- a/searx/engines/freesound.py +++ b/searx/engines/freesound.py @@ -29,6 +29,7 @@ url + "search/text/?query={query}&page={page}&fields=name,url,download,created,description,type&token={api_key}" ) + # search request def request(query, params): params["url"] = search_url.format( diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py index 1c40ff331b2..c53dd26a42e 100644 --- a/searx/engines/gigablast.py +++ b/searx/engines/gigablast.py @@ -41,7 +41,6 @@ def fetch_extra_param(query_args, headers): - # example: # # var uxrl='/search?c=main&qlangcountry=en-us&q=south&s=10&rand=1590740241635&n'; diff --git a/searx/engines/google.py b/searx/engines/google.py index bdb35143234..4626be9e04a 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -220,7 +220,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language): # https://developers.google.com/custom-search/docs/xml_results_appendices#languageCollections if _any_language and supported_any_language: - # interpretation is left up to Google (based on whoogle) # # - add parameter ``source=lnt`` @@ -230,7 +229,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language): ret_val['params']['source'] = 'lnt' else: - # restricts search results to documents written in a particular # language. ret_val['params']['lr'] = "lang_" + lang_list.get(lang_country, language) @@ -323,7 +321,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, results_xpath): - # google *sections* if extract_text(eval_xpath(result, g_section_with_header)): logger.debug("ignoring ") diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py index 528f8d21dc3..46798f6c2ad 100644 --- a/searx/engines/google_images.py +++ b/searx/engines/google_images.py @@ -93,7 +93,6 @@ def response(resp): json_data = loads(resp.text[json_start:]) for item in json_data["ischj"]["metadata"]: - result_item = { 'url': item["result"]["referrer_url"], 'title': item["result"]["page_title"], diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py index 1ada2d64d04..2831fda093e 100644 --- a/searx/engines/google_news.py +++ b/searx/engines/google_news.py @@ -119,7 +119,6 @@ def response(resp): dom = html.fromstring(resp.text) for result in eval_xpath_list(dom, '//div[@class="xrnccd"]'): - # The first tag in the
contains the link to the # article The href attribute of the is a google internal link, # we can't use. The real link is hidden in the jslog attribute: diff --git a/searx/engines/google_scholar.py b/searx/engines/google_scholar.py index c07cd4cea5e..f62255e101f 100644 --- a/searx/engines/google_scholar.py +++ b/searx/engines/google_scholar.py @@ -151,7 +151,6 @@ def response(resp): # pylint: disable=too-many-locals # parse results for result in eval_xpath_list(dom, '//div[@data-cid]'): - title = extract_text(eval_xpath(result, './/h3[1]//a')) if not title: diff --git a/searx/engines/google_videos.py b/searx/engines/google_videos.py index fc574bd4813..6d158b1164a 100644 --- a/searx/engines/google_videos.py +++ b/searx/engines/google_videos.py @@ -147,7 +147,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'): - # ignore google *sections* if extract_text(eval_xpath(result, g_section_with_header)): logger.debug("ignoring ") diff --git a/searx/engines/imdb.py b/searx/engines/imdb.py index 0897b8dcaac..f2f4c1457dc 100644 --- a/searx/engines/imdb.py +++ b/searx/engines/imdb.py @@ -39,7 +39,6 @@ def request(query, params): - query = query.replace(" ", "_").lower() params['url'] = suggestion_url.format(letter=query[0], query=query) @@ -47,12 +46,10 @@ def request(query, params): def response(resp): - suggestions = json.loads(resp.text) results = [] for entry in suggestions.get('d', []): - # https://developer.imdb.com/documentation/key-concepts#imdb-ids entry_id = entry['id'] categ = search_categories.get(entry_id[:2]) diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py index 2dd3bc55e2a..9d3db4b6be5 100644 --- a/searx/engines/json_engine.py +++ b/searx/engines/json_engine.py @@ -64,7 +64,6 @@ def do_query(data, q): qkey = q[0] for key, value in iterate(data): - if len(q) == 1: if key == qkey: ret.append(value) diff --git a/searx/engines/loc.py b/searx/engines/loc.py index 0b2f3a6896b..830b7e0aa2c 100644 --- a/searx/engines/loc.py +++ b/searx/engines/loc.py @@ -33,7 +33,6 @@ def request(query, params): - search_path = search_string.format(query=urlencode({'q': query}), page=params['pageno']) params['url'] = base_url + search_path diff --git a/searx/engines/mediathekviewweb.py b/searx/engines/mediathekviewweb.py index 5570ebe2499..16abaa50342 100644 --- a/searx/engines/mediathekviewweb.py +++ b/searx/engines/mediathekviewweb.py @@ -24,7 +24,6 @@ def request(query, params): - params['url'] = 'https://mediathekviewweb.de/api/query' params['method'] = 'POST' params['headers']['Content-type'] = 'text/plain' @@ -50,7 +49,6 @@ def request(query, params): def response(resp): - resp = loads(resp.text) mwv_result = resp['result'] @@ -59,7 +57,6 @@ def response(resp): results = [] for item in mwv_result_list: - item['hms'] = str(datetime.timedelta(seconds=item['duration'])) results.append( diff --git a/searx/engines/mixcloud.py b/searx/engines/mixcloud.py index 3f255697ea4..e10ce0b638e 100644 --- a/searx/engines/mixcloud.py +++ b/searx/engines/mixcloud.py @@ -38,7 +38,6 @@ def response(resp): search_res = resp.json() for result in search_res.get('data', []): - r_url = result['url'] publishedDate = parser.parse(result['created_time']) res = { diff --git a/searx/engines/openverse.py b/searx/engines/openverse.py index 9f4636e4166..d47f196b743 100644 --- a/searx/engines/openverse.py +++ b/searx/engines/openverse.py @@ -28,7 +28,6 @@ def request(query, params): - search_path = search_string.format(query=urlencode({'q': query}), nb_per_page=nb_per_page, page=params['pageno']) params['url'] = base_url + search_path diff --git a/searx/engines/pdbe.py b/searx/engines/pdbe.py index 34c8d322744..2a8c2d9b9d1 100644 --- a/searx/engines/pdbe.py +++ b/searx/engines/pdbe.py @@ -31,7 +31,6 @@ def request(query, params): - params['url'] = pdbe_solr_url params['method'] = 'POST' params['data'] = {'q': query, 'wt': "json"} # request response in parsable format @@ -66,21 +65,20 @@ def construct_body(result): year=result['release_year'], ) img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) - except (KeyError): + except KeyError: content = None img_src = None # construct url for preview image try: img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) - except (KeyError): + except KeyError: img_src = None return [title, content, img_src] def response(resp): - results = [] json = loads(resp.text)['response']['docs'] diff --git a/searx/engines/petal_images.py b/searx/engines/petal_images.py index 88853c1bd18..2a6826f8eb3 100644 --- a/searx/engines/petal_images.py +++ b/searx/engines/petal_images.py @@ -32,7 +32,6 @@ def request(query, params): - search_path = search_string.format( query=urlencode({'query': query}), page=params['pageno'], diff --git a/searx/engines/photon.py b/searx/engines/photon.py index 2ea393679d2..25bce90ab3e 100644 --- a/searx/engines/photon.py +++ b/searx/engines/photon.py @@ -53,7 +53,6 @@ def response(resp): # parse results for r in json.get('features', {}): - properties = r.get('properties') if not properties: diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py index 6de2176d049..58e0fed95e8 100644 --- a/searx/engines/qwant.py +++ b/searx/engines/qwant.py @@ -149,7 +149,6 @@ def response(resp): return [] for row in mainline: - mainline_type = row.get('type', 'web') if mainline_type != qwant_categ: continue @@ -160,7 +159,6 @@ def response(resp): mainline_items = row.get('items', []) for item in mainline_items: - title = item.get('title', None) res_url = item.get('url', None) @@ -175,7 +173,6 @@ def response(resp): ) elif mainline_type == 'news': - pub_date = item['date'] if pub_date is not None: pub_date = datetime.fromtimestamp(pub_date) @@ -244,7 +241,6 @@ def response(resp): def _fetch_supported_languages(resp): - text = resp.text text = text[text.find('INITIAL_PROPS') :] text = text[text.find('{') : text.find('')] diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py index 36d92339da3..73ed752492f 100644 --- a/searx/engines/reddit.py +++ b/searx/engines/reddit.py @@ -27,7 +27,6 @@ def request(query, params): - query = urlencode({'q': query, 'limit': page_size}) params['url'] = search_url.format(query=query) @@ -35,7 +34,6 @@ def request(query, params): def response(resp): - img_results = [] text_results = [] diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py index ad27079dd5b..7bdc30c435e 100644 --- a/searx/engines/scanr_structures.py +++ b/searx/engines/scanr_structures.py @@ -28,7 +28,6 @@ # do search-request def request(query, params): - params['url'] = search_url params['method'] = 'POST' params['headers']['Content-type'] = "application/json" diff --git a/searx/engines/searx_engine.py b/searx/engines/searx_engine.py index 84a8e644945..ddf55b72dc0 100644 --- a/searx/engines/searx_engine.py +++ b/searx/engines/searx_engine.py @@ -45,7 +45,6 @@ def request(query, params): # get response from search-request def response(resp): - response_json = loads(resp.text) results = response_json['results'] diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py index 6daa46e7878..711fed73646 100644 --- a/searx/engines/sjp.py +++ b/searx/engines/sjp.py @@ -80,7 +80,7 @@ def response(resp): for src in definitions: infobox += f"
{src[0]}" infobox += "
    " - for (def_text, sub_def) in src[1]: + for def_text, sub_def in src[1]: infobox += f"
  • {def_text}
  • " if sub_def: infobox += "
      " diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py index 78947c69c5a..edb934aebd2 100644 --- a/searx/engines/soundcloud.py +++ b/searx/engines/soundcloud.py @@ -85,7 +85,6 @@ def response(resp): # parse results for result in search_res.get('collection', []): - if result['kind'] in ('track', 'playlist'): uri = quote_plus(result['uri']) res = { diff --git a/searx/engines/spotify.py b/searx/engines/spotify.py index 87edb7f1b3b..491a7058e1d 100644 --- a/searx/engines/spotify.py +++ b/searx/engines/spotify.py @@ -29,6 +29,7 @@ url = 'https://api.spotify.com/' search_url = url + 'v1/search?{query}&type=track&offset={offset}' + # do search-request def request(query, params): offset = (params['pageno'] - 1) * 20 diff --git a/searx/engines/sqlite.py b/searx/engines/sqlite.py index 6de12f5fec5..0aa94a4c9ad 100644 --- a/searx/engines/sqlite.py +++ b/searx/engines/sqlite.py @@ -54,7 +54,6 @@ def search(query, params): query_to_run = query_str + ' LIMIT :limit OFFSET :offset' with sqlite_cursor() as cur: - cur.execute(query_to_run, query_params) col_names = [cn[0] for cn in cur.description] diff --git a/searx/engines/stackexchange.py b/searx/engines/stackexchange.py index 99615b1a7f7..b93324f7d9d 100644 --- a/searx/engines/stackexchange.py +++ b/searx/engines/stackexchange.py @@ -31,7 +31,6 @@ def request(query, params): - args = urlencode( { 'q': query, @@ -48,12 +47,10 @@ def request(query, params): def response(resp): - results = [] json_data = loads(resp.text) for result in json_data['items']: - content = "[%s]" % ", ".join(result['tags']) content += " %s" % result['owner']['display_name'] if result['is_answered']: diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index f857f7b6d8e..d46595d402a 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -60,7 +60,6 @@ def raise_captcha(resp): - if str(resp.url).startswith('https://www.startpage.com/sp/captcha'): raise SearxEngineCaptchaException() @@ -104,7 +103,6 @@ def get_sc_code(headers): # do search-request def request(query, params): - # pylint: disable=line-too-long # The format string from Startpage's FFox add-on [1]:: # diff --git a/searx/engines/tineye.py b/searx/engines/tineye.py index 6c5ff134caa..353a85747ab 100644 --- a/searx/engines/tineye.py +++ b/searx/engines/tineye.py @@ -114,7 +114,6 @@ def parse_tineye_match(match_json): backlinks = [] if "backlinks" in match_json: - for backlink_json in match_json["backlinks"]: if not isinstance(backlink_json, dict): continue @@ -164,7 +163,6 @@ def response(resp): if resp.is_error: if resp.status_code in (400, 422): - message = 'HTTP status: %s' % resp.status_code error = json_data.get('error') s_key = json_data.get('suggestions', {}).get('key', '') @@ -195,7 +193,6 @@ def response(resp): # append results from matches for match_json in json_data['matches']: - tineye_match = parse_tineye_match(match_json) if not tineye_match['backlinks']: continue diff --git a/searx/engines/torznab.py b/searx/engines/torznab.py index a48017c133e..7ddb79178a8 100644 --- a/searx/engines/torznab.py +++ b/searx/engines/torznab.py @@ -42,7 +42,6 @@ def init(engine_settings=None): # pylint: disable=unused-argument def request(query, params): - search_url = base_url + '?t=search&q={search_query}' if len(api_key) > 0: search_url += '&apikey={api_key}' diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py index e0ad2e6c9e2..b270a8afcb3 100644 --- a/searx/engines/wikidata.py +++ b/searx/engines/wikidata.py @@ -522,7 +522,6 @@ def get_str(self, result, language): class WDArticle(WDAttribute): - __slots__ = 'language', 'kwargs' def __init__(self, language, kwargs=None): @@ -568,7 +567,6 @@ def get_str(self, result, language): class WDURLAttribute(WDAttribute): - HTTP_WIKIMEDIA_IMAGE = 'http://commons.wikimedia.org/wiki/Special:FilePath/' __slots__ = 'url_id', 'kwargs' @@ -623,7 +621,6 @@ def get_geo_url(self, result, osm_zoom=19): class WDImageAttribute(WDURLAttribute): - __slots__ = ('priority',) def __init__(self, name, url_id=None, priority=100): diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py index 6a2423b5174..f54ac6962e7 100644 --- a/searx/engines/wolframalpha_api.py +++ b/searx/engines/wolframalpha_api.py @@ -100,7 +100,6 @@ def response(resp): image = subpod.xpath(image_xpath) if content and pod_id not in image_pods: - if pod_is_result or not result_content: if pod_id != "Input": result_content = "%s: %s" % (pod_title, content) diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index 2dc22028f25..3a18ec302e7 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -204,7 +204,6 @@ def response(resp): # pylint: disable=too-many-branches if results_xpath: for result in eval_xpath_list(dom, results_xpath): - url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url) title = extract_text(eval_xpath_list(result, title_xpath, min_len=1)) content = extract_text(eval_xpath_list(result, content_xpath)) diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py index 12e7305db2e..de2b85c8f22 100644 --- a/searx/engines/yacy.py +++ b/searx/engines/yacy.py @@ -79,7 +79,6 @@ def response(resp): for result in search_results[0].get('items', []): # parse image results if resp.search_params.get('category') == 'images': - result_url = '' if 'url' in result: result_url = result['url'] diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py index c13ce6d7893..efdcf8952d7 100644 --- a/searx/engines/yahoo.py +++ b/searx/engines/yahoo.py @@ -69,7 +69,6 @@ def _get_language(params): - lang = language_aliases.get(params['language']) if lang is None: lang = match_language(params['language'], supported_languages, language_aliases) diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py index 00f208b1761..8bf406c9774 100644 --- a/searx/engines/yahoo_news.py +++ b/searx/engines/yahoo_news.py @@ -71,7 +71,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, '//ol[contains(@class,"searchCenterMiddle")]//li'): - url = eval_xpath_getindex(result, './/h4/a/@href', 0, None) if url is None: continue diff --git a/searx/flaskfix.py b/searx/flaskfix.py index 326c4b98184..9d58a549130 100644 --- a/searx/flaskfix.py +++ b/searx/flaskfix.py @@ -33,14 +33,12 @@ class ReverseProxyPathFix: # pylint: disable=too-few-public-methods def __init__(self, wsgi_app): - self.wsgi_app = wsgi_app self.script_name = None self.scheme = None self.server = None if settings['server']['base_url']: - # If base_url is specified, then these values from are given # preference over any Flask's generics. diff --git a/searx/locales.py b/searx/locales.py index 9e06bf39d44..3b4edb29ff5 100644 --- a/searx/locales.py +++ b/searx/locales.py @@ -246,7 +246,6 @@ def get_engine_locale(searxng_locale, engine_locales, default=None): # engine does support the searxng_lang in this other territory. if locale.language: - searxng_lang = locale.language if locale.script: searxng_lang += '_' + locale.script diff --git a/searx/metrics/__init__.py b/searx/metrics/__init__.py index 18d2170df44..7cefe7e3726 100644 --- a/searx/metrics/__init__.py +++ b/searx/metrics/__init__.py @@ -179,7 +179,6 @@ def get_engines_stats(engine_name_list): max_time_total = max_result_count = None for engine_name in engine_name_list: - sent_count = counter('engine', engine_name, 'search', 'count', 'sent') if sent_count == 0: continue @@ -218,7 +217,6 @@ def get_engines_stats(engine_name_list): time_http_p80 = time_http_p95 = 0 if time_http is not None: - time_http_p80 = histogram('engine', engine_name, 'time', 'http').percentage(80) time_http_p95 = histogram('engine', engine_name, 'time', 'http').percentage(95) @@ -227,7 +225,6 @@ def get_engines_stats(engine_name_list): stats['http_p95'] = round(time_http_p95, 1) if time_total is not None: - time_total_p80 = histogram('engine', engine_name, 'time', 'total').percentage(80) time_total_p95 = histogram('engine', engine_name, 'time', 'total').percentage(95) diff --git a/searx/metrics/error_recorder.py b/searx/metrics/error_recorder.py index 1d0d6e7a3da..5e68592685e 100644 --- a/searx/metrics/error_recorder.py +++ b/searx/metrics/error_recorder.py @@ -17,7 +17,6 @@ class ErrorContext: - __slots__ = ( 'filename', 'function', diff --git a/searx/metrics/models.py b/searx/metrics/models.py index 900a7fa93f8..8ab17d605b4 100644 --- a/searx/metrics/models.py +++ b/searx/metrics/models.py @@ -12,7 +12,6 @@ class Histogram: - _slots__ = '_lock', '_size', '_sum', '_quartiles', '_count', '_width' def __init__(self, width=10, size=200): @@ -101,7 +100,6 @@ def __repr__(self): class HistogramStorage: - __slots__ = 'measures', 'histogram_class' def __init__(self, histogram_class=Histogram): @@ -127,7 +125,6 @@ def dump(self): class CounterStorage: - __slots__ = 'counters', 'lock' def __init__(self): diff --git a/searx/network/network.py b/searx/network/network.py index 6e1825dd953..a7f9b74131d 100644 --- a/searx/network/network.py +++ b/searx/network/network.py @@ -37,7 +37,6 @@ class Network: - __slots__ = ( 'enable_http', 'verify', @@ -76,7 +75,6 @@ def __init__( max_redirects=30, logger_name=None, ): - self.enable_http = enable_http self.verify = verify self.enable_http2 = enable_http2 diff --git a/searx/plugins/hostname_replace.py b/searx/plugins/hostname_replace.py index 039aadb9114..ff0f63596fc 100644 --- a/searx/plugins/hostname_replace.py +++ b/searx/plugins/hostname_replace.py @@ -21,9 +21,7 @@ def on_result(request, search, result): - - for (pattern, replacement) in replacements.items(): - + for pattern, replacement in replacements.items(): if parsed in result: if pattern.search(result[parsed].netloc): # to keep or remove this result from the result list depends diff --git a/searx/plugins/tor_check.py b/searx/plugins/tor_check.py index 7d50bbcb5d1..ae85ae2da4e 100644 --- a/searx/plugins/tor_check.py +++ b/searx/plugins/tor_check.py @@ -45,12 +45,10 @@ def post_search(request, search): - if search.search_query.pageno > 1: return True if search.search_query.query.lower() == "tor-check": - # Request the list of tor exit nodes. try: resp = get("https://check.torproject.org/exit-addresses") diff --git a/searx/query.py b/searx/query.py index b8e1c1275a3..6f5ec818cdc 100644 --- a/searx/query.py +++ b/searx/query.py @@ -12,7 +12,6 @@ class QueryPartParser(ABC): - __slots__ = "raw_text_query", "enable_autocomplete" @staticmethod diff --git a/searx/search/checker/impl.py b/searx/search/checker/impl.py index 37f145e1ed3..1136bc4545f 100644 --- a/searx/search/checker/impl.py +++ b/searx/search/checker/impl.py @@ -150,7 +150,6 @@ def _search_query_diff( class TestResults: - __slots__ = 'errors', 'logs', 'languages' def __init__(self): @@ -182,7 +181,6 @@ def __iter__(self): class ResultContainerTests: - __slots__ = 'test_name', 'search_query', 'result_container', 'languages', 'stop_test', 'test_results' def __init__( @@ -320,7 +318,6 @@ def one_title_contains(self, title: str): class CheckerTests: - __slots__ = 'test_results', 'test_name', 'result_container_tests_list' def __init__( @@ -352,7 +349,6 @@ def unique_results(self): class Checker: - __slots__ = 'processor', 'tests', 'test_results' def __init__(self, processor: EngineProcessor): diff --git a/searx/webapp.py b/searx/webapp.py index d9ca3941cda..a328ea6a89f 100755 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -268,7 +268,6 @@ def code_highlighter(codelines, language=None): # new codeblock is detected if last_line is not None and last_line + 1 != line: - # highlight last codepart formatter = HtmlFormatter(linenos='inline', linenostart=line_code_start, cssclass="code-highlight") html_code = html_code + highlight(tmp_code, lexer, formatter) @@ -334,7 +333,6 @@ def morty_proxify(url: str): def image_proxify(url: str): - if url.startswith('//'): url = 'https:' + url @@ -405,7 +403,6 @@ def get_client_settings(): def render(template_name: str, **kwargs): - kwargs['client_settings'] = str( base64.b64encode( bytes( @@ -896,7 +893,6 @@ def autocompleter(): # normal autocompletion results only appear if no inner results returned # and there is a query part if len(raw_text_query.autocomplete_list) == 0 and len(sug_prefix) > 0: - # get language from cookie language = request.preferences.get_value('language') if not language or language == 'all': diff --git a/searx/webutils.py b/searx/webutils.py index 7b9a8045c67..e8e5b7d5cb2 100644 --- a/searx/webutils.py +++ b/searx/webutils.py @@ -157,7 +157,6 @@ def regex_highlight_cjk(word: str) -> str: def highlight_content(content, query): - if not content: return None diff --git a/searxng_extra/update/update_languages.py b/searxng_extra/update/update_languages.py index 87b13b27614..946aec74156 100755 --- a/searxng_extra/update/update_languages.py +++ b/searxng_extra/update/update_languages.py @@ -129,7 +129,6 @@ def join_language_lists(engines_languages): language_list = {} for engine_name in engines_languages: for lang_code in engines_languages[engine_name]: - # apply custom fixes if necessary if lang_code in getattr(engines[engine_name], 'language_aliases', {}).values(): lang_code = next( @@ -275,7 +274,6 @@ def write_languages_file(languages): language_codes = [] for code in sorted(languages): - name = languages[code]['name'] if name is None: print("ERROR: languages['%s'] --> %s" % (code, languages[code])) diff --git a/searxng_extra/update/update_osm_keys_tags.py b/searxng_extra/update/update_osm_keys_tags.py index 72197498dba..266421efae4 100755 --- a/searxng_extra/update/update_osm_keys_tags.py +++ b/searxng_extra/update/update_osm_keys_tags.py @@ -208,7 +208,6 @@ def get_osm_tags_filename(): if __name__ == '__main__': - set_timeout_for_thread(60) result = { 'keys': optimize_keys(get_keys()), diff --git a/searxng_extra/update/update_pygments.py b/searxng_extra/update/update_pygments.py index ca14868a279..966b6dbf469 100755 --- a/searxng_extra/update/update_pygments.py +++ b/searxng_extra/update/update_pygments.py @@ -58,7 +58,6 @@ def get_css(cssclass, style): def main(): - fname = 'static/themes/simple/src/generated/pygments.less' print("update: %s" % fname) with open(get_output_filename(fname), 'w') as f: diff --git a/tests/unit/network/test_network.py b/tests/unit/network/test_network.py index 905b981c1e4..0738fa7f082 100644 --- a/tests/unit/network/test_network.py +++ b/tests/unit/network/test_network.py @@ -122,7 +122,6 @@ async def test_request(self): class TestNetworkRequestRetries(SearxTestCase): - TEXT = 'Lorem Ipsum' @classmethod @@ -195,7 +194,6 @@ async def get_response(*args, **kwargs): class TestNetworkStreamRetries(SearxTestCase): - TEXT = 'Lorem Ipsum' @classmethod diff --git a/tests/unit/test_external_bangs.py b/tests/unit/test_external_bangs.py index 794edf159e3..258c178e823 100644 --- a/tests/unit/test_external_bangs.py +++ b/tests/unit/test_external_bangs.py @@ -32,7 +32,6 @@ class TestGetNode(SearxTestCase): - DB = { 'trie': { 'exam': { diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index db25da8f3b9..8acb72ed3a8 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -228,7 +228,6 @@ def test_external_bang_autocomplete_empty(self): class TestBang(SearxTestCase): - SPECIFIC_BANGS = ['!dummy_engine', '!du', '!general'] THE_QUERY = 'the query' diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 6f51f1ee33e..100a9bcbd48 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -154,7 +154,6 @@ def test_invalid_html(self): class TestXPathUtils(SearxTestCase): - TEST_DOC = """
      • Text in bold and italic
      • Another text
      • From 39a31b87aa3fdfd2a07fbafa59d3f20e7a21062f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Feb 2023 08:07:50 +0000 Subject: [PATCH 2/2] Bump black from 22.12.0 to 23.1.0 Bumps [black](https://github.com/psf/black) from 22.12.0 to 23.1.0. - [Release notes](https://github.com/psf/black/releases) - [Changelog](https://github.com/psf/black/blob/main/CHANGES.md) - [Commits](https://github.com/psf/black/compare/22.12.0...23.1.0) --- updated-dependencies: - dependency-name: black dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 0c4b07311b4..b36c0deb580 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ mock==5.0.1 nose2[coverage_plugin]==0.12.0 cov-core==1.15.0 -black==22.12.0 +black==23.1.0 pylint==2.15.10 splinter==0.19.0 selenium==4.8.0