summaryrefslogtreecommitdiff
path: root/searx/engines/google_news.py
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines/google_news.py')
-rw-r--r--searx/engines/google_news.py230
1 files changed, 172 insertions, 58 deletions
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
index 9c837b4..ff39543 100644
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -1,86 +1,200 @@
-"""
- Google (News)
+# SPDX-License-Identifier: AGPL-3.0-or-later
+"""Google (News)
+
+For detailed description of the *REST-full* API see: `Query Parameter
+Definitions`_. Not all parameters can be appied:
+
+- num_ : the number of search results is ignored
+- save_ : is ignored / Google-News results are always *SafeSearch*
- @website https://news.google.com
- @provide-api no
+.. _Query Parameter Definitions:
+ https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
+
+.. _num: https://developers.google.com/custom-search/docs/xml_results#numsp
+.. _save: https://developers.google.com/custom-search/docs/xml_results#safesp
- @using-api no
- @results HTML
- @stable no
- @parse url, title, content, publishedDate
"""
+# pylint: disable=invalid-name, missing-function-docstring
+
+import binascii
+import re
+from urllib.parse import urlencode
+from base64 import b64decode
+from random import random
from lxml import html
-from searx.engines.google import _fetch_supported_languages, supported_languages_url
-from searx.url_utils import urlencode
-from searx.utils import match_language
-# search-url
+from searx import logger
+from searx.utils import (
+ eval_xpath,
+ eval_xpath_list,
+ eval_xpath_getindex,
+ extract_text,
+)
+
+# pylint: disable=unused-import
+from searx.engines.google import (
+ supported_languages_url,
+ _fetch_supported_languages,
+)
+# pylint: enable=unused-import
+
+from searx.engines.google import (
+ get_lang_info,
+ detect_google_sorry,
+)
+
+# about
+about = {
+ "website": 'https://news.google.com',
+ "wikidata_id": 'Q12020',
+ "official_api_documentation": 'https://developers.google.com/custom-search',
+ "use_official_api": False,
+ "require_api_key": False,
+ "results": 'HTML',
+}
+
+logger = logger.getChild('google news')
+
+# compared to other google engines google-news has a different time range
+# support. The time range is included in the search term.
+time_range_dict = {
+ 'day': 'when:1d',
+ 'week': 'when:7d',
+ 'month': 'when:1m',
+ 'year': 'when:1y',
+}
+
+# engine dependent config
+
categories = ['news']
-paging = True
-language_support = True
-safesearch = True
+paging = False
+use_locale_domain = True
time_range_support = True
-number_of_results = 10
-search_url = 'https://www.google.com/search'\
- '?{query}'\
- '&tbm=nws'\
- '&gws_rd=cr'\
- '&{search_options}'
-time_range_attr = "qdr:{range}"
-time_range_dict = {'day': 'd',
- 'week': 'w',
- 'month': 'm',
- 'year': 'y'}
+# Google-News results are always *SafeSearch*. Option 'safesearch' is set to
+# False here, otherwise checker will report safesearch-errors::
+#
+# safesearch : results are identitical for safesearch=0 and safesearch=2
+safesearch = False
-
-# do search-request
def request(query, params):
+ """Google-News search request"""
+
+ lang_info = get_lang_info(
+ # pylint: disable=undefined-variable
+ params, supported_languages, language_aliases, False
+ )
+
+ # google news has only one domain
+ lang_info['subdomain'] = 'news.google.com'
- search_options = {
- 'start': (params['pageno'] - 1) * number_of_results
- }
+ ceid = "%s:%s" % (lang_info['country'], lang_info['language'])
- if params['time_range'] in time_range_dict:
- search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])
+ # google news redirects en to en-US
+ if lang_info['params']['hl'] == 'en':
+ lang_info['params']['hl'] = 'en-US'
- if safesearch and params['safesearch']:
- search_options['safe'] = 'on'
+ # Very special to google-news compared to other google engines, the time
+ # range is included in the search term.
+ if params['time_range']:
+ query += ' ' + time_range_dict[params['time_range']]
- params['url'] = search_url.format(query=urlencode({'q': query}),
- search_options=urlencode(search_options))
+ query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
+ 'q': query,
+ **lang_info['params'],
+ 'ie': "utf8",
+ 'oe': "utf8",
+ 'ucbcb': 1,
+ 'gl': lang_info['country'],
+ }) + ('&ceid=%s' % ceid) # ceid includes a ':' character which must not be urlencoded
- if params['language'] != 'all':
- language = match_language(params['language'], supported_languages, language_aliases).split('-')[0]
- if language:
- params['url'] += '&lr=lang_' + language
+ logger.debug("query_url --> %s", query_url)
+ params['url'] = query_url
+
+ logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
+
+ params['cookies']['CONSENT'] = "PENDING+" + str(random()*100)
+ params['headers'].update(lang_info['headers'])
+ params['headers']['Accept'] = (
+ 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
+ )
return params
-# get response from search-request
def response(resp):
+ """Get response from google's search request"""
results = []
+ detect_google_sorry(resp)
+
+ # convert the text to dom
dom = html.fromstring(resp.text)
- # parse results
- for result in dom.xpath('//div[@class="g"]|//div[@class="g _cy"]'):
- try:
- r = {
- 'url': result.xpath('.//a[@class="l lLrAF"]')[0].attrib.get("href"),
- 'title': ''.join(result.xpath('.//a[@class="l lLrAF"]//text()')),
- 'content': ''.join(result.xpath('.//div[@class="st"]//text()')),
- }
- except:
- continue
-
- imgs = result.xpath('.//img/@src')
- if len(imgs) and not imgs[0].startswith('data'):
- r['img_src'] = imgs[0]
-
- results.append(r)
+ for result in eval_xpath_list(dom, '//div[@class="xrnccd"]'):
+
+ # The first <a> tag in the <article> contains the link to the
+ # article The href attribute of the <a> is a google internal link,
+ # we can't use. The real link is hidden in the jslog attribute:
+ #
+ # <a ...
+ # jslog="95014; 4:https://www.cnn.com/.../index.html; track:click"
+ # href="./articles/CAIiENu3nGS...?hl=en-US&amp;gl=US&amp;ceid=US%3Aen"
+ # ... />
+
+ jslog = eval_xpath_getindex(result, './article/a/@jslog', 0)
+ url = re.findall('http[^;]*', jslog)
+ if url:
+ url = url[0]
+ else:
+ # The real URL is base64 encoded in the json attribute:
+ # jslog="95014; 5:W251bGwsbnVsbCxudW...giXQ==; track:click"
+ jslog = jslog.split(";")[1].split(':')[1].strip()
+ try:
+ padding = (4 -(len(jslog) % 4)) * "="
+ jslog = b64decode(jslog + padding)
+ except binascii.Error:
+ # URL cant be read, skip this result
+ continue
+
+ # now we have : b'[null, ... null,"https://www.cnn.com/.../index.html"]'
+ url = re.findall('http[^;"]*', str(jslog))[0]
+
+ # the first <h3> tag in the <article> contains the title of the link
+ title = extract_text(eval_xpath(result, './article/h3[1]'))
+
+ # the first <div> tag in the <article> contains the content of the link
+ content = extract_text(eval_xpath(result, './article/div[1]'))
+
+ # the second <div> tag contains origin publisher and the publishing date
+
+ pub_date = extract_text(eval_xpath(result, './article/div[2]//time'))
+ pub_origin = extract_text(eval_xpath(result, './article/div[2]//a'))
+
+ pub_info = []
+ if pub_origin:
+ pub_info.append(pub_origin)
+ if pub_date:
+ # The pub_date is mostly a string like 'yesertday', not a real
+ # timezone date or time. Therefore we can't use publishedDate.
+ pub_info.append(pub_date)
+ pub_info = ', '.join(pub_info)
+ if pub_info:
+ content = pub_info + ': ' + content
+
+ # The image URL is located in a preceding sibling <img> tag, e.g.:
+ # "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100"
+ # These URL are long but not personalized (double checked via tor).
+
+ img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
+
+ results.append({
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'img_src': img_src,
+ })
# return results
return results