2021-10-27 01:15:24 +03:00
|
|
|
from bs4 import BeautifulSoup, NavigableString
|
2020-12-06 01:01:21 +03:00
|
|
|
import os
|
2020-07-26 20:53:59 +03:00
|
|
|
import urllib.parse as urlparse
|
|
|
|
from urllib.parse import parse_qs
|
2021-10-26 23:59:23 +03:00
|
|
|
import re
|
2020-07-26 20:53:59 +03:00
|
|
|
|
|
|
|
SKIP_ARGS = ['ref_src', 'utm']
|
2021-01-24 01:43:53 +03:00
|
|
|
SKIP_PREFIX = ['//www.', '//mobile.', '//m.']
|
2021-02-20 23:04:32 +03:00
|
|
|
GOOG_STATIC = 'www.gstatic.com'
|
2020-07-26 20:53:59 +03:00
|
|
|
GOOG_IMG = '/images/branding/searchlogo/1x/googlelogo'
|
|
|
|
LOGO_URL = GOOG_IMG + '_desk'
|
2020-12-18 00:06:47 +03:00
|
|
|
BLANK_B64 = ('data:image/png;base64,'
|
|
|
|
'iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAQAAAAnOwc2AAAAD0lEQVR42mNkw'
|
|
|
|
'AIYh7IgAAVVAAuInjI5AAAAAElFTkSuQmCC')
|
2020-07-26 20:53:59 +03:00
|
|
|
|
2020-12-18 00:06:47 +03:00
|
|
|
# Ad keywords
|
2020-07-26 20:53:59 +03:00
|
|
|
BLACKLIST = [
|
2020-12-18 00:06:47 +03:00
|
|
|
'ad', 'anuncio', 'annuncio', 'annonce', 'Anzeige', '广告', '廣告', 'Reklama',
|
|
|
|
'Реклама', 'Anunț', '광고', 'annons', 'Annonse', 'Iklan', '広告', 'Augl.',
|
|
|
|
'Mainos', 'Advertentie', 'إعلان', 'Գովազդ', 'विज्ञापन', 'Reklam', 'آگهی',
|
|
|
|
'Reklāma', 'Reklaam', 'Διαφήμιση', 'מודעה', 'Hirdetés', 'Anúncio'
|
2020-07-26 20:53:59 +03:00
|
|
|
]
|
|
|
|
|
|
|
|
SITE_ALTS = {
|
2020-12-06 01:01:21 +03:00
|
|
|
'twitter.com': os.getenv('WHOOGLE_ALT_TW', 'nitter.net'),
|
|
|
|
'youtube.com': os.getenv('WHOOGLE_ALT_YT', 'invidious.snopyta.org'),
|
2021-01-24 01:43:53 +03:00
|
|
|
'instagram.com': os.getenv('WHOOGLE_ALT_IG', 'bibliogram.art/u'),
|
2021-10-16 21:22:00 +03:00
|
|
|
'reddit.com': os.getenv('WHOOGLE_ALT_RD', 'libredd.it'),
|
2021-10-24 08:23:37 +03:00
|
|
|
**dict.fromkeys([
|
|
|
|
'medium.com',
|
|
|
|
'levelup.gitconnected.com'
|
|
|
|
], os.getenv('WHOOGLE_ALT_MD', 'scribe.rip'))
|
2020-07-26 20:53:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-26 23:59:23 +03:00
|
|
|
def bold_search_terms(response: str, query: str) -> BeautifulSoup:
|
|
|
|
"""Wraps all search terms in bold tags (<b>). If any terms are wrapped
|
|
|
|
in quotes, only that exact phrase will be made bold.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
response: The initial response body for the query
|
|
|
|
query: The original search query
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
BeautifulSoup: modified soup object with bold items
|
|
|
|
"""
|
|
|
|
response = BeautifulSoup(response, 'html.parser')
|
|
|
|
|
|
|
|
def replace_any_case(element: NavigableString, target_word: str) -> None:
|
|
|
|
# Replace all instances of the word, but maintaining the same case in
|
|
|
|
# the replacement
|
2021-10-27 01:15:24 +03:00
|
|
|
if len(element) == len(target_word):
|
|
|
|
return
|
|
|
|
|
2021-10-26 23:59:23 +03:00
|
|
|
element.replace_with(
|
2021-10-27 01:15:24 +03:00
|
|
|
re.sub(r'\b((?![{}<>-])' + target_word + r'(?![{}<>-]))\b',
|
2021-10-27 01:17:38 +03:00
|
|
|
r'<b>\1</b>',
|
|
|
|
element,
|
|
|
|
flags=re.I)
|
2021-10-26 23:59:23 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
# Split all words out of query, grouping the ones wrapped in quotes
|
|
|
|
for word in re.split(r'\s+(?=[^"]*(?:"[^"]*"[^"]*)*$)', query):
|
|
|
|
word = re.sub(r'[^A-Za-z0-9 ]+', '', word)
|
|
|
|
target = response.find_all(
|
|
|
|
text=re.compile(r'' + re.escape(word), re.I))
|
|
|
|
for nav_str in target:
|
|
|
|
replace_any_case(nav_str, word)
|
|
|
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
2021-03-08 20:22:04 +03:00
|
|
|
def has_ad_content(element: str) -> bool:
|
|
|
|
"""Inspects an HTML element for ad related content
|
|
|
|
|
|
|
|
Args:
|
|
|
|
element: The HTML element to inspect
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
bool: True/False for the element containing an ad
|
|
|
|
|
|
|
|
"""
|
2021-03-08 20:38:40 +03:00
|
|
|
return (element.upper() in (value.upper() for value in BLACKLIST)
|
|
|
|
or 'ⓘ' in element)
|
2020-07-26 20:53:59 +03:00
|
|
|
|
|
|
|
|
2021-03-08 20:22:04 +03:00
|
|
|
def get_first_link(soup: BeautifulSoup) -> str:
|
|
|
|
"""Retrieves the first result link from the query response
|
|
|
|
|
|
|
|
Args:
|
|
|
|
soup: The BeautifulSoup response body
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: A str link to the first result
|
|
|
|
|
|
|
|
"""
|
2020-07-26 20:53:59 +03:00
|
|
|
# Replace hrefs with only the intended destination (no "utm" type tags)
|
|
|
|
for a in soup.find_all('a', href=True):
|
|
|
|
# Return the first search result URL
|
|
|
|
if 'url?q=' in a['href']:
|
|
|
|
return filter_link_args(a['href'])
|
2021-03-24 22:13:52 +03:00
|
|
|
return ''
|
2020-07-26 20:53:59 +03:00
|
|
|
|
|
|
|
|
2021-03-08 20:22:04 +03:00
|
|
|
def get_site_alt(link: str) -> str:
|
|
|
|
"""Returns an alternative to a particular site, if one is configured
|
|
|
|
|
|
|
|
Args:
|
|
|
|
link: A string result URL to check against the SITE_ALTS map
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: An updated (or ignored) result link
|
|
|
|
|
|
|
|
"""
|
2021-10-16 21:22:00 +03:00
|
|
|
# Need to replace full hostname with alternative to encapsulate
|
|
|
|
# subdomains as well
|
|
|
|
hostname = urlparse.urlparse(link).hostname
|
2021-03-28 20:27:08 +03:00
|
|
|
|
2020-07-26 20:53:59 +03:00
|
|
|
for site_key in SITE_ALTS.keys():
|
2021-10-16 21:22:00 +03:00
|
|
|
if not hostname or site_key not in hostname:
|
2020-07-26 20:53:59 +03:00
|
|
|
continue
|
|
|
|
|
2021-10-16 21:22:00 +03:00
|
|
|
link = link.replace(hostname, SITE_ALTS[site_key])
|
2021-10-11 23:25:21 +03:00
|
|
|
for prefix in SKIP_PREFIX:
|
|
|
|
link = link.replace(prefix, '//')
|
2020-07-26 20:53:59 +03:00
|
|
|
break
|
|
|
|
|
2021-01-24 01:43:53 +03:00
|
|
|
return link
|
2020-07-26 20:53:59 +03:00
|
|
|
|
|
|
|
|
2021-03-08 20:22:04 +03:00
|
|
|
def filter_link_args(link: str) -> str:
|
|
|
|
"""Filters out unnecessary URL args from a result link
|
|
|
|
|
|
|
|
Args:
|
|
|
|
link: The string result link to check for extraneous URL params
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: An updated (or ignored) result link
|
|
|
|
|
|
|
|
"""
|
|
|
|
parsed_link = urlparse.urlparse(link)
|
2020-07-26 20:53:59 +03:00
|
|
|
link_args = parse_qs(parsed_link.query)
|
|
|
|
safe_args = {}
|
|
|
|
|
|
|
|
if len(link_args) == 0 and len(parsed_link) > 0:
|
2021-03-08 20:22:04 +03:00
|
|
|
return link
|
2020-07-26 20:53:59 +03:00
|
|
|
|
|
|
|
for arg in link_args.keys():
|
|
|
|
if arg in SKIP_ARGS:
|
|
|
|
continue
|
|
|
|
|
|
|
|
safe_args[arg] = link_args[arg]
|
|
|
|
|
|
|
|
# Remove original link query and replace with filtered args
|
2021-03-08 20:22:04 +03:00
|
|
|
link = link.replace(parsed_link.query, '')
|
2020-07-26 20:53:59 +03:00
|
|
|
if len(safe_args) > 0:
|
2021-03-08 20:22:04 +03:00
|
|
|
link = link + urlparse.urlencode(safe_args, doseq=True)
|
2020-07-26 20:53:59 +03:00
|
|
|
else:
|
2021-03-08 20:22:04 +03:00
|
|
|
link = link.replace('?', '')
|
|
|
|
|
|
|
|
return link
|
|
|
|
|
|
|
|
|
|
|
|
def append_nojs(result: BeautifulSoup) -> None:
|
|
|
|
"""Appends a no-Javascript alternative for a search result
|
2020-07-26 20:53:59 +03:00
|
|
|
|
2021-03-08 20:22:04 +03:00
|
|
|
Args:
|
|
|
|
result: The search result to append a no-JS link to
|
2020-07-26 20:53:59 +03:00
|
|
|
|
2021-03-08 20:22:04 +03:00
|
|
|
Returns:
|
|
|
|
None
|
2020-07-26 20:53:59 +03:00
|
|
|
|
2021-03-08 20:22:04 +03:00
|
|
|
"""
|
2020-12-30 02:43:42 +03:00
|
|
|
nojs_link = BeautifulSoup(features='html.parser').new_tag('a')
|
2021-03-08 20:22:04 +03:00
|
|
|
nojs_link['href'] = '/window?location=' + result['href']
|
2020-07-26 20:53:59 +03:00
|
|
|
nojs_link['style'] = 'display:block;width:100%;'
|
|
|
|
nojs_link.string = 'NoJS Link: ' + nojs_link['href']
|
2021-03-08 20:22:04 +03:00
|
|
|
result.append(BeautifulSoup('<br><hr><br>', 'html.parser'))
|
|
|
|
result.append(nojs_link)
|
2021-10-21 19:42:31 +03:00
|
|
|
|
|
|
|
|
|
|
|
def add_ip_card(html_soup: BeautifulSoup, ip: str) -> BeautifulSoup:
|
|
|
|
"""Adds the client's IP address to the search results
|
|
|
|
if query contains keywords
|
|
|
|
|
|
|
|
Args:
|
|
|
|
html_soup: The parsed search result containing the keywords
|
|
|
|
ip: ip address of the client
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
BeautifulSoup
|
|
|
|
|
|
|
|
"""
|
|
|
|
if (not html_soup.select_one(".EY24We")
|
|
|
|
and html_soup.select_one(".OXXup").get_text().lower() == "all"):
|
|
|
|
# HTML IP card tag
|
|
|
|
ip_tag = html_soup.new_tag("div")
|
|
|
|
ip_tag["class"] = "ZINbbc xpd O9g5cc uUPGi"
|
|
|
|
|
|
|
|
# For IP Address html tag
|
|
|
|
ip_address = html_soup.new_tag("div")
|
|
|
|
ip_address["class"] = "kCrYT ip-address-div"
|
|
|
|
ip_address.string = ip
|
|
|
|
|
|
|
|
# Text below the IP address
|
|
|
|
ip_text = html_soup.new_tag("div")
|
|
|
|
ip_text.string = "Your public IP address"
|
|
|
|
ip_text["class"] = "kCrYT ip-text-div"
|
|
|
|
|
|
|
|
# Adding all the above html tags to the IP card
|
|
|
|
ip_tag.append(ip_address)
|
|
|
|
ip_tag.append(ip_text)
|
|
|
|
|
|
|
|
# Finding the element before which the IP card would be placed
|
|
|
|
f_link = html_soup.select_one(".BNeawe.vvjwJb.AP7Wnd")
|
|
|
|
ref_element = f_link.find_parent(class_="ZINbbc xpd O9g5cc" +
|
|
|
|
" uUPGi")
|
|
|
|
|
|
|
|
# Inserting the element
|
|
|
|
ref_element.insert_before(ip_tag)
|
|
|
|
return html_soup
|