summaryrefslogtreecommitdiff
path: root/searx/engines/startpage.py
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines/startpage.py')
-rw-r--r--searx/engines/startpage.py206
1 files changed, 172 insertions, 34 deletions
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
index 7656739..513f508 100644
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -1,22 +1,40 @@
-# Startpage (Web)
-#
-# @website https://startpage.com
-# @provide-api no (nothing found)
-#
-# @using-api no
-# @results HTML
-# @stable no (HTML can change)
-# @parse url, title, content
-#
-# @todo paging
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+"""Startpage (Web)
+
+"""
-from lxml import html
-from dateutil import parser
-from datetime import datetime, timedelta
import re
-from searx.engines.xpath import extract_text
-from searx.languages import language_codes
-from searx.utils import eval_xpath
+from time import time
+
+from urllib.parse import urlencode
+from unicodedata import normalize, combining
+from datetime import datetime, timedelta
+
+from dateutil import parser
+from lxml import html
+from babel import Locale
+from babel.localedata import locale_identifiers
+
+from searx import logger
+from searx.poolrequests import get
+from searx.utils import extract_text, eval_xpath, match_language
+from searx.exceptions import (
+ SearxEngineResponseException,
+ SearxEngineCaptchaException,
+)
+
+logger = logger.getChild('startpage')
+
+# about
+about = {
+ "website": 'https://startpage.com',
+ "wikidata_id": 'Q2333295',
+ "official_api_documentation": None,
+ "use_official_api": False,
+ "require_api_key": False,
+ "results": 'HTML',
+}
# engine dependent config
categories = ['general']
@@ -25,42 +43,98 @@ categories = ['general']
# storing of qid's between mulitble search-calls
paging = True
-language_support = True
+supported_languages_url = 'https://www.startpage.com/do/settings'
# search-url
base_url = 'https://startpage.com/'
-search_url = base_url + 'do/search'
+search_url = base_url + 'sp/search?'
# specific xpath variables
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
# not ads: div[@class="result"] are the direct childs of div[@id="results"]
-results_xpath = '//div[@class="w-gl__result"]'
-link_xpath = './/a[@class="w-gl__result-title"]'
+results_xpath = '//div[@class="w-gl__result__main"]'
+link_xpath = './/a[@class="w-gl__result-title result-link"]'
content_xpath = './/p[@class="w-gl__description"]'
+# timestamp of the last fetch of 'sc' code
+sc_code_ts = 0
+sc_code = ''
+
+
+def raise_captcha(resp):
+
+ if str(resp.url).startswith('https://www.startpage.com/sp/captcha'):
+ # suspend CAPTCHA for 7 days
+ raise SearxEngineCaptchaException(suspended_time=7 * 24 * 3600)
+
+
+def get_sc_code(headers):
+ """Get an actual `sc` argument from startpage's home page.
+
+ Startpage puts a `sc` argument on every link. Without this argument
+ startpage considers the request is from a bot. We do not know what is
+ encoded in the value of the `sc` argument, but it seems to be a kind of a
+ *time-stamp*. This *time-stamp* is valid for a few hours.
+
+ This function scrap a new *time-stamp* from startpage's home page every hour
+ (3000 sec).
+
+ """
+
+ global sc_code_ts, sc_code # pylint: disable=global-statement
+
+ if time() > (sc_code_ts + 3000):
+ logger.debug("query new sc time-stamp ...")
+
+ resp = get(base_url, headers=headers)
+ raise_captcha(resp)
+ dom = html.fromstring(resp.text)
+
+ try:
+ # href --> '/?sc=adrKJMgF8xwp20'
+ href = eval_xpath(dom, '//a[@class="footer-home__logo"]')[0].get('href')
+ except IndexError as exc:
+ # suspend startpage API --> https://github.com/searxng/searxng/pull/695
+ raise SearxEngineResponseException(
+ suspended_time=7 * 24 * 3600, message="PR-695: query new sc time-stamp failed!"
+ ) from exc
+
+ sc_code = href[5:]
+ sc_code_ts = time()
+ logger.debug("new value is: %s", sc_code)
+
+ return sc_code
+
# do search-request
def request(query, params):
- params['url'] = search_url
- params['method'] = 'POST'
- params['data'] = {
+ # pylint: disable=line-too-long
+ # The format string from Startpage's FFox add-on [1]::
+ #
+ # https://www.startpage.com/do/dsearch?query={searchTerms}&cat=web&pl=ext-ff&language=__MSG_extensionUrlLanguage__&extVersion=1.3.0
+ #
+ # [1] https://addons.mozilla.org/en-US/firefox/addon/startpage-private-search/
+
+ args = {
'query': query,
'page': params['pageno'],
'cat': 'web',
- 'cmd': 'process_search',
- 'engine0': 'v1all',
+ # 'pl': 'ext-ff',
+ # 'extVersion': '1.3.0',
+ # 'abp': "-1",
+ 'sc': get_sc_code(params['headers']),
}
# set language if specified
if params['language'] != 'all':
- language = 'english'
- for lc, _, _, lang in language_codes:
- if lc == params['language']:
- language = lang
- params['data']['language'] = language
- params['data']['lui'] = language
+ lang_code = match_language(params['language'], supported_languages, fallback=None)
+ if lang_code:
+ language_name = supported_languages[lang_code]['alias']
+ args['language'] = language_name
+ args['lui'] = language_name
+ params['url'] = search_url + urlencode(args)
return params
@@ -99,11 +173,14 @@ def response(resp):
if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
date_pos = content.find('...') + 4
date_string = content[0:date_pos - 5]
- published_date = parser.parse(date_string, dayfirst=True)
-
# fix content string
content = content[date_pos:]
+ try:
+ published_date = parser.parse(date_string, dayfirst=True)
+ except ValueError:
+ pass
+
# check if search result starts with something like: "5 days ago ... "
elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
date_pos = content.find('...') + 4
@@ -129,3 +206,64 @@ def response(resp):
# return results
return results
+
+
+# get supported languages from their site
+def _fetch_supported_languages(resp):
+ # startpage's language selector is a mess each option has a displayed name
+ # and a value, either of which may represent the language name in the native
+ # script, the language name in English, an English transliteration of the
+ # native name, the English name of the writing script used by the language,
+ # or occasionally something else entirely.
+
+ # this cases are so special they need to be hardcoded, a couple of them are mispellings
+ language_names = {
+ 'english_uk': 'en-GB',
+ 'fantizhengwen': ['zh-TW', 'zh-HK'],
+ 'hangul': 'ko',
+ 'malayam': 'ml',
+ 'norsk': 'nb',
+ 'sinhalese': 'si',
+ 'sudanese': 'su'
+ }
+
+ # get the English name of every language known by babel
+ language_names.update(
+ {
+ # fmt: off
+ name.lower(): lang_code
+ # pylint: disable=protected-access
+ for lang_code, name in Locale('en')._data['languages'].items()
+ # fmt: on
+ }
+ )
+
+ # get the native name of every language known by babel
+ for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
+ native_name = Locale(lang_code).get_language_name().lower()
+ # add native name exactly as it is
+ language_names[native_name] = lang_code
+
+ # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
+ unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
+ if len(unaccented_name) == len(unaccented_name.encode()):
+ # add only if result is ascii (otherwise "normalization" didn't work)
+ language_names[unaccented_name] = lang_code
+
+ dom = html.fromstring(resp.text)
+ sp_lang_names = []
+ for option in dom.xpath('//form[@id="settings-form"]//select[@name="language"]/option'):
+ sp_lang_names.append((option.get('value'), extract_text(option).lower()))
+
+ supported_languages = {}
+ for sp_option_value, sp_option_text in sp_lang_names:
+ lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
+ if isinstance(lang_code, str):
+ supported_languages[lang_code] = {'alias': sp_option_value}
+ elif isinstance(lang_code, list):
+ for _lc in lang_code:
+ supported_languages[_lc] = {'alias': sp_option_value}
+ else:
+ print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
+
+ return supported_languages