2021-05-27 19:01:57 +03:00
|
|
|
from app.request import VALID_PARAMS, MAPS_URL
|
2021-03-08 20:22:04 +03:00
|
|
|
from app.utils.results import *
|
2021-03-24 22:13:52 +03:00
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
from bs4.element import ResultSet, Tag
|
2020-04-29 03:19:34 +03:00
|
|
|
from cryptography.fernet import Fernet
|
2021-04-05 17:37:39 +03:00
|
|
|
from flask import render_template
|
2020-04-10 23:52:27 +03:00
|
|
|
import re
|
|
|
|
import urllib.parse as urlparse
|
|
|
|
from urllib.parse import parse_qs
|
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
|
2021-05-29 19:21:20 +03:00
|
|
|
def extract_q(q_str: str, href: str) -> str:
|
|
|
|
"""Extracts the 'q' element from a result link. This is typically
|
|
|
|
either the link to a result's website, or a string.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
q_str: The result link to parse
|
|
|
|
href: The full url to check for standalone 'q' elements first,
|
|
|
|
rather than parsing the whole query string and then checking.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: The 'q' element of the link, or an empty string
|
|
|
|
"""
|
|
|
|
return parse_qs(q_str)['q'][0] if ('&q=' in href or '?q=' in href) else ''
|
|
|
|
|
|
|
|
|
2021-06-04 18:09:30 +03:00
|
|
|
def clean_query(query: str) -> str:
|
|
|
|
"""Strips the blocked site list from the query, if one is being
|
|
|
|
used.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
query: The query string
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: The query string without any "-site:..." filters
|
|
|
|
"""
|
|
|
|
return query[:query.find('-site:')] if '-site:' in query else query
|
|
|
|
|
|
|
|
|
2020-04-16 02:41:53 +03:00
|
|
|
class Filter:
|
2021-06-24 01:59:57 +03:00
|
|
|
# Limit used for determining if a result is a "regular" result or a list
|
|
|
|
# type result (such as "people also asked", "related searches", etc)
|
|
|
|
RESULT_CHILD_LIMIT = 7
|
|
|
|
|
2021-04-01 07:23:30 +03:00
|
|
|
def __init__(self, user_key: str, mobile=False, config=None) -> None:
|
2020-04-16 02:41:53 +03:00
|
|
|
if config is None:
|
|
|
|
config = {}
|
|
|
|
|
2020-05-13 02:15:53 +03:00
|
|
|
self.near = config['near'] if 'near' in config else ''
|
2020-04-17 03:50:31 +03:00
|
|
|
self.dark = config['dark'] if 'dark' in config else False
|
2020-04-16 19:01:02 +03:00
|
|
|
self.nojs = config['nojs'] if 'nojs' in config else False
|
2020-05-16 01:10:31 +03:00
|
|
|
self.new_tab = config['new_tab'] if 'new_tab' in config else False
|
2020-07-26 20:53:59 +03:00
|
|
|
self.alt_redirect = config['alts'] if 'alts' in config else False
|
2020-04-16 19:01:02 +03:00
|
|
|
self.mobile = mobile
|
2021-04-01 07:23:30 +03:00
|
|
|
self.user_key = user_key
|
2020-06-02 21:54:47 +03:00
|
|
|
self.main_divs = ResultSet('')
|
|
|
|
self._elements = 0
|
2020-04-16 02:41:53 +03:00
|
|
|
|
2020-04-24 05:59:43 +03:00
|
|
|
def __getitem__(self, name):
|
|
|
|
return getattr(self, name)
|
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
@property
|
|
|
|
def elements(self):
|
|
|
|
return self._elements
|
|
|
|
|
2021-03-24 22:13:52 +03:00
|
|
|
def reskin(self, page: str) -> str:
|
2020-04-16 02:41:53 +03:00
|
|
|
# Aesthetic only re-skinning
|
2020-04-17 03:50:31 +03:00
|
|
|
if self.dark:
|
2020-12-18 00:06:47 +03:00
|
|
|
page = page.replace(
|
|
|
|
'fff', '000').replace(
|
|
|
|
'202124', 'ddd').replace(
|
|
|
|
'1967D2', '3b85ea')
|
2020-04-16 02:41:53 +03:00
|
|
|
|
|
|
|
return page
|
|
|
|
|
2021-03-24 22:13:52 +03:00
|
|
|
def encrypt_path(self, path, is_element=False) -> str:
|
2020-06-02 21:54:47 +03:00
|
|
|
# Encrypts path to avoid plaintext results in logs
|
|
|
|
if is_element:
|
2020-12-18 00:06:47 +03:00
|
|
|
# Element paths are encrypted separately from text, to allow key
|
|
|
|
# regeneration once all items have been served to the user
|
2021-04-01 07:23:30 +03:00
|
|
|
enc_path = Fernet(self.user_key).encrypt(path.encode()).decode()
|
2020-06-02 21:54:47 +03:00
|
|
|
self._elements += 1
|
|
|
|
return enc_path
|
|
|
|
|
2021-04-01 07:23:30 +03:00
|
|
|
return Fernet(self.user_key).encrypt(path.encode()).decode()
|
2020-06-02 21:54:47 +03:00
|
|
|
|
2021-03-24 22:13:52 +03:00
|
|
|
def clean(self, soup) -> BeautifulSoup:
|
2020-06-02 21:54:47 +03:00
|
|
|
self.main_divs = soup.find('div', {'id': 'main'})
|
|
|
|
self.remove_ads()
|
2021-06-24 01:59:57 +03:00
|
|
|
self.collapse_sections()
|
2020-04-29 03:19:34 +03:00
|
|
|
self.update_styling(soup)
|
2020-06-02 21:54:47 +03:00
|
|
|
|
|
|
|
for img in [_ for _ in soup.find_all('img') if 'src' in _.attrs]:
|
|
|
|
self.update_element_src(img, 'image/png')
|
|
|
|
|
|
|
|
for audio in [_ for _ in soup.find_all('audio') if 'src' in _.attrs]:
|
|
|
|
self.update_element_src(audio, 'audio/mpeg')
|
|
|
|
|
|
|
|
for link in soup.find_all('a', href=True):
|
|
|
|
self.update_link(link)
|
2020-04-29 03:19:34 +03:00
|
|
|
|
|
|
|
input_form = soup.find('form')
|
2020-04-29 03:59:33 +03:00
|
|
|
if input_form is not None:
|
|
|
|
input_form['method'] = 'POST'
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2020-04-29 18:46:18 +03:00
|
|
|
# Ensure no extra scripts passed through
|
|
|
|
for script in soup('script'):
|
|
|
|
script.decompose()
|
|
|
|
|
2020-06-07 23:06:49 +03:00
|
|
|
# Update default footer and header
|
|
|
|
footer = soup.find('footer')
|
2020-05-24 23:03:11 +03:00
|
|
|
if footer:
|
2020-06-07 23:06:49 +03:00
|
|
|
# Remove divs that have multiple links beyond just page navigation
|
2020-12-18 00:06:47 +03:00
|
|
|
[_.decompose() for _ in footer.find_all('div', recursive=False)
|
|
|
|
if len(_.find_all('a', href=True)) > 3]
|
2020-04-29 18:46:18 +03:00
|
|
|
|
2020-05-24 23:03:11 +03:00
|
|
|
header = soup.find('header')
|
|
|
|
if header:
|
|
|
|
header.decompose()
|
|
|
|
|
2020-04-16 02:41:53 +03:00
|
|
|
return soup
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2021-03-24 22:13:52 +03:00
|
|
|
def remove_ads(self) -> None:
|
|
|
|
"""Removes ads found in the list of search result divs
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
None (The soup object is modified directly)
|
|
|
|
"""
|
2020-06-02 21:54:47 +03:00
|
|
|
if not self.main_divs:
|
2020-04-29 19:03:34 +03:00
|
|
|
return
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
for div in [_ for _ in self.main_divs.find_all('div', recursive=True)]:
|
2020-12-18 00:06:47 +03:00
|
|
|
div_ads = [_ for _ in div.find_all('span', recursive=True)
|
|
|
|
if has_ad_content(_.text)]
|
|
|
|
_ = div.decompose() if len(div_ads) else None
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2021-06-24 01:59:57 +03:00
|
|
|
def collapse_sections(self) -> None:
|
|
|
|
"""Collapses long result sections ("people also asked", "related
|
|
|
|
searches", etc) into "details" elements
|
2021-03-24 22:13:52 +03:00
|
|
|
|
|
|
|
These sections are typically the only sections in the results page that
|
2021-06-24 01:59:57 +03:00
|
|
|
have more than ~5 child divs within a primary result div.
|
2021-03-24 22:13:52 +03:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
None (The soup object is modified directly)
|
|
|
|
"""
|
2021-06-24 01:59:57 +03:00
|
|
|
def pull_child_divs(result_div: BeautifulSoup):
|
|
|
|
try:
|
|
|
|
return result_div.findChildren(
|
|
|
|
'div', recursive=False
|
|
|
|
)[0].findChildren(
|
|
|
|
'div', recursive=False)
|
|
|
|
except IndexError:
|
|
|
|
return []
|
2020-06-02 21:54:47 +03:00
|
|
|
|
2021-06-24 01:59:57 +03:00
|
|
|
if not self.main_divs:
|
2020-12-15 19:09:48 +03:00
|
|
|
return
|
|
|
|
|
2021-06-24 01:59:57 +03:00
|
|
|
# Loop through results and check for the number of child divs in each
|
|
|
|
for result in self.main_divs:
|
|
|
|
result_children = pull_child_divs(result)
|
|
|
|
if len(result_children) < self.RESULT_CHILD_LIMIT:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Find and decompose the first element with an inner HTML text val.
|
|
|
|
# This typically extracts the title of the section (i.e. "Related
|
|
|
|
# Searches", "People also ask", etc)
|
|
|
|
label = 'Collapsed Results'
|
|
|
|
for elem in result_children:
|
|
|
|
if elem.text:
|
|
|
|
label = elem.text
|
|
|
|
elem.decompose()
|
|
|
|
break
|
|
|
|
|
|
|
|
# Create the new details element to wrap around the result's
|
2021-07-04 22:20:19 +03:00
|
|
|
# first parent
|
|
|
|
parent = None
|
|
|
|
idx = 0
|
|
|
|
while not parent and idx < len(result_children):
|
|
|
|
parent = result_children[idx].parent
|
|
|
|
idx += 1
|
2021-06-24 01:59:57 +03:00
|
|
|
details = BeautifulSoup(features='html.parser').new_tag('details')
|
|
|
|
summary = BeautifulSoup(features='html.parser').new_tag('summary')
|
|
|
|
summary.string = label
|
|
|
|
details.append(summary)
|
|
|
|
|
|
|
|
if parent:
|
|
|
|
parent.wrap(details)
|
2020-06-02 21:54:47 +03:00
|
|
|
|
2021-03-24 22:13:52 +03:00
|
|
|
def update_element_src(self, element: Tag, mime: str) -> None:
|
|
|
|
"""Encrypts the original src of an element and rewrites the element src
|
|
|
|
to use the "/element?src=" pass-through.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
None (The soup element is modified directly)
|
|
|
|
|
|
|
|
"""
|
2021-02-20 23:04:32 +03:00
|
|
|
src = element['src']
|
|
|
|
|
|
|
|
if src.startswith('//'):
|
|
|
|
src = 'https:' + src
|
|
|
|
|
|
|
|
if src.startswith(LOGO_URL):
|
2020-06-02 21:54:47 +03:00
|
|
|
# Re-brand with Whoogle logo
|
2021-04-09 18:00:02 +03:00
|
|
|
element.replace_with(BeautifulSoup(
|
|
|
|
render_template('logo.html', dark=self.dark),
|
|
|
|
features='html.parser'))
|
2020-06-02 21:54:47 +03:00
|
|
|
return
|
2021-02-20 23:04:32 +03:00
|
|
|
elif src.startswith(GOOG_IMG) or GOOG_STATIC in src:
|
2020-06-02 21:54:47 +03:00
|
|
|
element['src'] = BLANK_B64
|
|
|
|
return
|
|
|
|
|
2020-12-18 00:06:47 +03:00
|
|
|
element['src'] = 'element?url=' + self.encrypt_path(
|
2021-02-20 23:04:32 +03:00
|
|
|
src,
|
2020-12-18 00:06:47 +03:00
|
|
|
is_element=True) + '&type=' + urlparse.quote(mime)
|
|
|
|
|
2021-03-24 22:13:52 +03:00
|
|
|
def update_styling(self, soup) -> None:
|
2020-04-29 03:19:34 +03:00
|
|
|
# Remove unnecessary button(s)
|
|
|
|
for button in soup.find_all('button'):
|
|
|
|
button.decompose()
|
|
|
|
|
|
|
|
# Remove svg logos
|
|
|
|
for svg in soup.find_all('svg'):
|
|
|
|
svg.decompose()
|
|
|
|
|
|
|
|
# Update logo
|
|
|
|
logo = soup.find('a', {'class': 'l'})
|
|
|
|
if logo and self.mobile:
|
2020-12-18 00:06:47 +03:00
|
|
|
logo['style'] = ('display:flex; justify-content:center; '
|
|
|
|
'align-items:center; color:#685e79; '
|
|
|
|
'font-size:18px; ')
|
2020-04-29 03:19:34 +03:00
|
|
|
|
|
|
|
# Fix search bar length on mobile
|
|
|
|
try:
|
|
|
|
search_bar = soup.find('header').find('form').find('div')
|
|
|
|
search_bar['style'] = 'width: 100%;'
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
2021-03-24 22:13:52 +03:00
|
|
|
def update_link(self, link: Tag) -> None:
|
|
|
|
"""Update internal link paths with encrypted path, otherwise remove
|
|
|
|
unnecessary redirects and/or marketing params from the url
|
|
|
|
|
|
|
|
Args:
|
|
|
|
link: A bs4 Tag element to inspect and update
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
None (the tag is updated directly)
|
|
|
|
|
|
|
|
"""
|
2020-06-02 21:54:47 +03:00
|
|
|
# Replace href with only the intended destination (no "utm" type tags)
|
|
|
|
href = link['href'].replace('https://www.google.com', '')
|
2020-10-29 18:09:31 +03:00
|
|
|
if 'advanced_search' in href or 'tbm=shop' in href:
|
2020-12-18 00:06:47 +03:00
|
|
|
# FIXME: The "Shopping" tab requires further filtering (see #136)
|
2020-10-25 20:52:30 +03:00
|
|
|
# Temporarily removing all links to that tab for now.
|
2020-06-02 21:54:47 +03:00
|
|
|
link.decompose()
|
|
|
|
return
|
|
|
|
elif self.new_tab:
|
|
|
|
link['target'] = '_blank'
|
|
|
|
|
|
|
|
result_link = urlparse.urlparse(href)
|
2021-05-29 19:21:20 +03:00
|
|
|
q = extract_q(result_link.query, href)
|
2020-06-02 21:54:47 +03:00
|
|
|
|
2021-05-29 19:21:20 +03:00
|
|
|
if q.startswith('/'):
|
2020-12-18 00:06:47 +03:00
|
|
|
# Internal google links (i.e. mail, maps, etc) should still
|
|
|
|
# be forwarded to Google
|
2021-05-29 19:21:20 +03:00
|
|
|
link['href'] = 'https://google.com' + q
|
2020-06-02 21:54:47 +03:00
|
|
|
elif '/search?q=' in href:
|
2020-12-18 00:06:47 +03:00
|
|
|
# "li:1" implies the query should be interpreted verbatim,
|
|
|
|
# which is accomplished by wrapping the query in double quotes
|
2020-07-26 20:53:59 +03:00
|
|
|
if 'li:1' in href:
|
2021-05-29 19:21:20 +03:00
|
|
|
q = '"' + q + '"'
|
|
|
|
new_search = 'search?q=' + self.encrypt_path(q)
|
2020-06-02 21:54:47 +03:00
|
|
|
|
|
|
|
query_params = parse_qs(urlparse.urlparse(href).query)
|
|
|
|
for param in VALID_PARAMS:
|
2020-12-18 00:06:47 +03:00
|
|
|
if param not in query_params:
|
|
|
|
continue
|
|
|
|
param_val = query_params[param][0]
|
2020-06-02 21:54:47 +03:00
|
|
|
new_search += '&' + param + '=' + param_val
|
|
|
|
link['href'] = new_search
|
|
|
|
elif 'url?q=' in href:
|
|
|
|
# Strip unneeded arguments
|
2021-05-29 19:21:20 +03:00
|
|
|
link['href'] = filter_link_args(q)
|
2020-06-02 21:54:47 +03:00
|
|
|
|
|
|
|
# Add no-js option
|
|
|
|
if self.nojs:
|
2021-03-08 20:22:04 +03:00
|
|
|
append_nojs(link)
|
2020-06-02 21:54:47 +03:00
|
|
|
else:
|
2021-05-27 19:01:57 +03:00
|
|
|
if href.startswith(MAPS_URL):
|
|
|
|
# Maps links don't work if a site filter is applied
|
2021-06-04 18:09:30 +03:00
|
|
|
link['href'] = MAPS_URL + "?q=" + clean_query(q)
|
2021-05-27 19:01:57 +03:00
|
|
|
else:
|
|
|
|
link['href'] = href
|
2020-06-02 21:54:47 +03:00
|
|
|
|
2020-07-26 20:53:59 +03:00
|
|
|
# Replace link location if "alts" config is enabled
|
|
|
|
if self.alt_redirect:
|
2020-12-18 00:06:47 +03:00
|
|
|
# Search and replace all link descriptions
|
|
|
|
# with alternative location
|
2020-07-26 20:53:59 +03:00
|
|
|
link['href'] = get_site_alt(link['href'])
|
2020-12-18 00:06:47 +03:00
|
|
|
link_desc = link.find_all(
|
|
|
|
text=re.compile('|'.join(SITE_ALTS.keys())))
|
2020-07-26 20:53:59 +03:00
|
|
|
if len(link_desc) == 0:
|
|
|
|
return
|
2020-06-02 21:54:47 +03:00
|
|
|
|
2020-07-26 20:53:59 +03:00
|
|
|
# Replace link destination
|
|
|
|
link_desc[0].replace_with(get_site_alt(link_desc[0]))
|
2021-04-16 17:16:14 +03:00
|
|
|
|
|
|
|
def view_image(self, soup) -> BeautifulSoup:
|
|
|
|
"""Replaces the soup with a new one that handles mobile results and
|
|
|
|
adds the link of the image full res to the results.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
soup: A BeautifulSoup object containing the image mobile results.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
BeautifulSoup: The new BeautifulSoup object
|
|
|
|
"""
|
|
|
|
|
|
|
|
# get some tags that are unchanged between mobile and pc versions
|
|
|
|
search_input = soup.find_all('td', attrs={'class': "O4cRJf"})[0]
|
|
|
|
search_options = soup.find_all('div', attrs={'class': "M7pB2"})[0]
|
|
|
|
cor_suggested = soup.find_all('table', attrs={'class': "By0U9"})
|
|
|
|
next_pages = soup.find_all('table', attrs={'class': "uZgmoc"})[0]
|
|
|
|
information = soup.find_all('div', attrs={'class': "TuS8Ad"})[0]
|
|
|
|
|
|
|
|
results = []
|
|
|
|
# find results div
|
|
|
|
results_div = soup.find_all('div', attrs={'class': "nQvrDb"})[0]
|
|
|
|
# find all the results
|
|
|
|
results_all = results_div.find_all('div', attrs={'class': "lIMUZd"})
|
|
|
|
|
|
|
|
for item in results_all:
|
|
|
|
urls = item.find('a')['href'].split('&imgrefurl=')
|
|
|
|
|
|
|
|
img_url = urlparse.unquote(urls[0].replace('/imgres?imgurl=', ''))
|
2021-06-16 17:40:18 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
# Try to strip out only the necessary part of the web page link
|
|
|
|
web_page = urlparse.unquote(urls[1].split('&')[0])
|
|
|
|
except IndexError:
|
|
|
|
web_page = urlparse.unquote(urls[1])
|
|
|
|
|
2021-04-16 17:16:14 +03:00
|
|
|
img_tbn = urlparse.unquote(item.find('a').find('img')['src'])
|
2021-06-16 17:40:18 +03:00
|
|
|
|
2021-04-16 17:16:14 +03:00
|
|
|
results.append({
|
2021-06-16 17:40:18 +03:00
|
|
|
'domain': urlparse.urlparse(web_page).netloc,
|
2021-04-16 17:16:14 +03:00
|
|
|
'img_url': img_url,
|
2021-06-16 17:40:18 +03:00
|
|
|
'web_page': web_page,
|
2021-04-16 17:16:14 +03:00
|
|
|
'img_tbn': img_tbn
|
|
|
|
})
|
|
|
|
|
|
|
|
soup = BeautifulSoup(render_template('imageresults.html',
|
|
|
|
length=len(results),
|
|
|
|
results=results,
|
|
|
|
view_label="View Image"),
|
|
|
|
features='html.parser')
|
|
|
|
# replace search input object
|
|
|
|
soup.find_all('td',
|
|
|
|
attrs={'class': "O4cRJf"})[0].replaceWith(search_input)
|
|
|
|
# replace search options object (All, Images, Videos, etc.)
|
|
|
|
soup.find_all('div',
|
|
|
|
attrs={'class': "M7pB2"})[0].replaceWith(search_options)
|
|
|
|
# replace correction suggested by google object if exists
|
|
|
|
if len(cor_suggested):
|
|
|
|
soup.find_all(
|
|
|
|
'table',
|
|
|
|
attrs={'class': "By0U9"}
|
|
|
|
)[0].replaceWith(cor_suggested[0])
|
|
|
|
# replace next page object at the bottom of the page
|
|
|
|
soup.find_all('table',
|
|
|
|
attrs={'class': "uZgmoc"})[0].replaceWith(next_pages)
|
|
|
|
# replace information about user connection at the bottom of the page
|
|
|
|
soup.find_all('div',
|
|
|
|
attrs={'class': "TuS8Ad"})[0].replaceWith(information)
|
|
|
|
return soup
|