473 lines
17 KiB
Python
473 lines
17 KiB
Python
from app.models.config import Config
|
|
from app.models.endpoint import Endpoint
|
|
from app.models.g_classes import GClasses
|
|
from app.request import VALID_PARAMS, MAPS_URL
|
|
from app.utils.misc import read_config_bool
|
|
from app.utils.results import *
|
|
from bs4 import BeautifulSoup
|
|
from bs4.element import ResultSet, Tag
|
|
from cryptography.fernet import Fernet
|
|
from flask import render_template
|
|
import re
|
|
import urllib.parse as urlparse
|
|
from urllib.parse import parse_qs
|
|
import os
|
|
|
|
minimal_mode_sections = ['Top stories', 'Images', 'People also ask']
|
|
unsupported_g_pages = [
|
|
'support.google.com',
|
|
'accounts.google.com',
|
|
'policies.google.com',
|
|
'google.com/preferences',
|
|
'google.com/intl',
|
|
'advanced_search',
|
|
'tbm=shop'
|
|
]
|
|
|
|
|
|
def extract_q(q_str: str, href: str) -> str:
|
|
"""Extracts the 'q' element from a result link. This is typically
|
|
either the link to a result's website, or a string.
|
|
|
|
Args:
|
|
q_str: The result link to parse
|
|
href: The full url to check for standalone 'q' elements first,
|
|
rather than parsing the whole query string and then checking.
|
|
|
|
Returns:
|
|
str: The 'q' element of the link, or an empty string
|
|
"""
|
|
return parse_qs(q_str)['q'][0] if ('&q=' in href or '?q=' in href) else ''
|
|
|
|
|
|
def clean_query(query: str) -> str:
|
|
"""Strips the blocked site list from the query, if one is being
|
|
used.
|
|
|
|
Args:
|
|
query: The query string
|
|
|
|
Returns:
|
|
str: The query string without any "-site:..." filters
|
|
"""
|
|
return query[:query.find('-site:')] if '-site:' in query else query
|
|
|
|
|
|
class Filter:
|
|
# Limit used for determining if a result is a "regular" result or a list
|
|
# type result (such as "people also asked", "related searches", etc)
|
|
RESULT_CHILD_LIMIT = 7
|
|
|
|
def __init__(self, user_key: str, config: Config, mobile=False) -> None:
|
|
self.config = config
|
|
self.mobile = mobile
|
|
self.user_key = user_key
|
|
self.main_divs = ResultSet('')
|
|
self._elements = 0
|
|
|
|
def __getitem__(self, name):
|
|
return getattr(self, name)
|
|
|
|
@property
|
|
def elements(self):
|
|
return self._elements
|
|
|
|
def encrypt_path(self, path, is_element=False) -> str:
|
|
# Encrypts path to avoid plaintext results in logs
|
|
if is_element:
|
|
# Element paths are encrypted separately from text, to allow key
|
|
# regeneration once all items have been served to the user
|
|
enc_path = Fernet(self.user_key).encrypt(path.encode()).decode()
|
|
self._elements += 1
|
|
return enc_path
|
|
|
|
return Fernet(self.user_key).encrypt(path.encode()).decode()
|
|
|
|
def clean(self, soup) -> BeautifulSoup:
|
|
self.main_divs = soup.find('div', {'id': 'main'})
|
|
self.remove_ads()
|
|
self.remove_block_titles()
|
|
self.remove_block_url()
|
|
self.collapse_sections()
|
|
self.update_styling(soup)
|
|
self.remove_block_tabs(soup)
|
|
|
|
for img in [_ for _ in soup.find_all('img') if 'src' in _.attrs]:
|
|
self.update_element_src(img, 'image/png')
|
|
|
|
for audio in [_ for _ in soup.find_all('audio') if 'src' in _.attrs]:
|
|
self.update_element_src(audio, 'audio/mpeg')
|
|
|
|
for link in soup.find_all('a', href=True):
|
|
self.update_link(link)
|
|
|
|
input_form = soup.find('form')
|
|
if input_form is not None:
|
|
input_form['method'] = 'GET' if self.config.get_only else 'POST'
|
|
|
|
# Ensure no extra scripts passed through
|
|
for script in soup('script'):
|
|
script.decompose()
|
|
|
|
# Update default footer and header
|
|
footer = soup.find('footer')
|
|
if footer:
|
|
# Remove divs that have multiple links beyond just page navigation
|
|
[_.decompose() for _ in footer.find_all('div', recursive=False)
|
|
if len(_.find_all('a', href=True)) > 3]
|
|
|
|
header = soup.find('header')
|
|
if header:
|
|
header.decompose()
|
|
self.remove_site_blocks(soup)
|
|
return soup
|
|
|
|
def remove_site_blocks(self, soup) -> None:
|
|
if not self.config.block or not soup.body:
|
|
return
|
|
search_string = ' '.join(['-site:' +
|
|
_ for _ in self.config.block.split(',')])
|
|
selected = soup.body.findAll(text=re.compile(search_string))
|
|
|
|
for result in selected:
|
|
result.string.replace_with(result.string.replace(
|
|
search_string, ''))
|
|
|
|
def remove_ads(self) -> None:
|
|
"""Removes ads found in the list of search result divs
|
|
|
|
Returns:
|
|
None (The soup object is modified directly)
|
|
"""
|
|
if not self.main_divs:
|
|
return
|
|
|
|
for div in [_ for _ in self.main_divs.find_all('div', recursive=True)]:
|
|
div_ads = [_ for _ in div.find_all('span', recursive=True)
|
|
if has_ad_content(_.text)]
|
|
_ = div.decompose() if len(div_ads) else None
|
|
|
|
def remove_block_titles(self) -> None:
|
|
if not self.main_divs or not self.config.block_title:
|
|
return
|
|
block_title = re.compile(self.block_title)
|
|
for div in [_ for _ in self.main_divs.find_all('div', recursive=True)]:
|
|
block_divs = [_ for _ in div.find_all('h3', recursive=True)
|
|
if block_title.search(_.text) is not None]
|
|
_ = div.decompose() if len(block_divs) else None
|
|
|
|
def remove_block_url(self) -> None:
|
|
if not self.main_divs or not self.config.block_url:
|
|
return
|
|
block_url = re.compile(self.block_url)
|
|
for div in [_ for _ in self.main_divs.find_all('div', recursive=True)]:
|
|
block_divs = [_ for _ in div.find_all('a', recursive=True)
|
|
if block_url.search(_.attrs['href']) is not None]
|
|
_ = div.decompose() if len(block_divs) else None
|
|
|
|
def remove_block_tabs(self, soup) -> None:
|
|
if self.main_divs:
|
|
for div in self.main_divs.find_all(
|
|
'div',
|
|
attrs={'class': f'{GClasses.main_tbm_tab}'}
|
|
):
|
|
_ = div.decompose()
|
|
else:
|
|
# when in images tab
|
|
for div in soup.find_all(
|
|
'div',
|
|
attrs={'class': f'{GClasses.images_tbm_tab}'}
|
|
):
|
|
_ = div.decompose()
|
|
|
|
def collapse_sections(self) -> None:
|
|
"""Collapses long result sections ("people also asked", "related
|
|
searches", etc) into "details" elements
|
|
|
|
These sections are typically the only sections in the results page that
|
|
have more than ~5 child divs within a primary result div.
|
|
|
|
Returns:
|
|
None (The soup object is modified directly)
|
|
"""
|
|
minimal_mode = read_config_bool('WHOOGLE_MINIMAL')
|
|
|
|
def pull_child_divs(result_div: BeautifulSoup):
|
|
try:
|
|
return result_div.findChildren(
|
|
'div', recursive=False
|
|
)[0].findChildren(
|
|
'div', recursive=False)
|
|
except IndexError:
|
|
return []
|
|
|
|
if not self.main_divs:
|
|
return
|
|
|
|
# Loop through results and check for the number of child divs in each
|
|
for result in self.main_divs:
|
|
result_children = pull_child_divs(result)
|
|
if minimal_mode:
|
|
if any(f">{x}</span" in str(s) for s in result_children
|
|
for x in minimal_mode_sections):
|
|
result.decompose()
|
|
continue
|
|
if len(result_children) < self.RESULT_CHILD_LIMIT:
|
|
continue
|
|
else:
|
|
if len(result_children) < self.RESULT_CHILD_LIMIT:
|
|
continue
|
|
|
|
# Find and decompose the first element with an inner HTML text val.
|
|
# This typically extracts the title of the section (i.e. "Related
|
|
# Searches", "People also ask", etc)
|
|
# If there are more than one child tags with text
|
|
# parenthesize the rest except the first
|
|
label = 'Collapsed Results'
|
|
subtitle = None
|
|
for elem in result_children:
|
|
if elem.text:
|
|
content = list(elem.strings)
|
|
label = content[0]
|
|
if len(content) > 1:
|
|
subtitle = '<span> (' + \
|
|
''.join(content[1:]) + ')</span>'
|
|
elem.decompose()
|
|
break
|
|
|
|
# Create the new details element to wrap around the result's
|
|
# first parent
|
|
parent = None
|
|
idx = 0
|
|
while not parent and idx < len(result_children):
|
|
parent = result_children[idx].parent
|
|
idx += 1
|
|
|
|
details = BeautifulSoup(features='html.parser').new_tag('details')
|
|
summary = BeautifulSoup(features='html.parser').new_tag('summary')
|
|
summary.string = label
|
|
|
|
if subtitle:
|
|
soup = BeautifulSoup(subtitle, 'html.parser')
|
|
summary.append(soup)
|
|
|
|
details.append(summary)
|
|
|
|
if parent and not minimal_mode:
|
|
parent.wrap(details)
|
|
elif parent and minimal_mode:
|
|
# Remove parent element from document if "minimal mode" is
|
|
# enabled
|
|
parent.decompose()
|
|
|
|
def update_element_src(self, element: Tag, mime: str) -> None:
|
|
"""Encrypts the original src of an element and rewrites the element src
|
|
to use the "/element?src=" pass-through.
|
|
|
|
Returns:
|
|
None (The soup element is modified directly)
|
|
|
|
"""
|
|
src = element['src']
|
|
|
|
if src.startswith('//'):
|
|
src = 'https:' + src
|
|
|
|
if src.startswith(LOGO_URL):
|
|
# Re-brand with Whoogle logo
|
|
element.replace_with(BeautifulSoup(
|
|
render_template('logo.html'),
|
|
features='html.parser'))
|
|
return
|
|
elif src.startswith(GOOG_IMG) or GOOG_STATIC in src:
|
|
element['src'] = BLANK_B64
|
|
return
|
|
|
|
element['src'] = f'{Endpoint.element}?url=' + self.encrypt_path(
|
|
src,
|
|
is_element=True) + '&type=' + urlparse.quote(mime)
|
|
|
|
def update_styling(self, soup) -> None:
|
|
# Remove unnecessary button(s)
|
|
for button in soup.find_all('button'):
|
|
button.decompose()
|
|
|
|
# Remove svg logos
|
|
for svg in soup.find_all('svg'):
|
|
svg.decompose()
|
|
|
|
# Update logo
|
|
logo = soup.find('a', {'class': 'l'})
|
|
if logo and self.mobile:
|
|
logo['style'] = ('display:flex; justify-content:center; '
|
|
'align-items:center; color:#685e79; '
|
|
'font-size:18px; ')
|
|
|
|
# Fix search bar length on mobile
|
|
try:
|
|
search_bar = soup.find('header').find('form').find('div')
|
|
search_bar['style'] = 'width: 100%;'
|
|
except AttributeError:
|
|
pass
|
|
|
|
# Fix body max width on images tab
|
|
style = soup.find('style')
|
|
div = soup.find('div', attrs={'class': f'{GClasses.images_tbm_tab}'})
|
|
if style and div and not self.mobile:
|
|
css = style.string
|
|
css_html_tag = (
|
|
'html{'
|
|
'font-family: Roboto, Helvetica Neue, Arial, sans-serif;'
|
|
'font-size: 14px;'
|
|
'line-height: 20px;'
|
|
'text-size-adjust: 100%;'
|
|
'word-wrap: break-word;'
|
|
'}'
|
|
)
|
|
css = f"{css_html_tag}{css}"
|
|
css = re.sub('body{(.*?)}',
|
|
'body{padding:0 8px;margin:0 auto;max-width:736px;}',
|
|
css)
|
|
style.string = css
|
|
|
|
def update_link(self, link: Tag) -> None:
|
|
"""Update internal link paths with encrypted path, otherwise remove
|
|
unnecessary redirects and/or marketing params from the url
|
|
|
|
Args:
|
|
link: A bs4 Tag element to inspect and update
|
|
|
|
Returns:
|
|
None (the tag is updated directly)
|
|
|
|
"""
|
|
# Remove any elements that direct to unsupported Google pages
|
|
if any(url in link['href'] for url in unsupported_g_pages):
|
|
# FIXME: The "Shopping" tab requires further filtering (see #136)
|
|
# Temporarily removing all links to that tab for now.
|
|
link.decompose()
|
|
return
|
|
|
|
# Replace href with only the intended destination (no "utm" type tags)
|
|
href = link['href'].replace('https://www.google.com', '')
|
|
result_link = urlparse.urlparse(href)
|
|
q = extract_q(result_link.query, href)
|
|
|
|
if q.startswith('/'):
|
|
# Internal google links (i.e. mail, maps, etc) should still
|
|
# be forwarded to Google
|
|
link['href'] = 'https://google.com' + q
|
|
elif '/search?q=' in href:
|
|
# "li:1" implies the query should be interpreted verbatim,
|
|
# which is accomplished by wrapping the query in double quotes
|
|
if 'li:1' in href:
|
|
q = '"' + q + '"'
|
|
new_search = 'search?q=' + self.encrypt_path(q)
|
|
|
|
query_params = parse_qs(urlparse.urlparse(href).query)
|
|
for param in VALID_PARAMS:
|
|
if param not in query_params:
|
|
continue
|
|
param_val = query_params[param][0]
|
|
new_search += '&' + param + '=' + param_val
|
|
link['href'] = new_search
|
|
elif 'url?q=' in href:
|
|
# Strip unneeded arguments
|
|
link['href'] = filter_link_args(q)
|
|
|
|
# Add no-js option
|
|
if self.config.nojs:
|
|
append_nojs(link)
|
|
|
|
if self.config.new_tab:
|
|
link['target'] = '_blank'
|
|
else:
|
|
if href.startswith(MAPS_URL):
|
|
# Maps links don't work if a site filter is applied
|
|
link['href'] = MAPS_URL + "?q=" + clean_query(q)
|
|
else:
|
|
link['href'] = href
|
|
|
|
# Replace link location if "alts" config is enabled
|
|
if self.config.alts:
|
|
# Search and replace all link descriptions
|
|
# with alternative location
|
|
link['href'] = get_site_alt(link['href'])
|
|
link_desc = link.find_all(
|
|
text=re.compile('|'.join(SITE_ALTS.keys())))
|
|
if len(link_desc) == 0:
|
|
return
|
|
|
|
# Replace link description
|
|
link_desc = link_desc[0]
|
|
for site, alt in SITE_ALTS.items():
|
|
if site not in link_desc:
|
|
continue
|
|
new_desc = BeautifulSoup(features='html.parser').new_tag('div')
|
|
new_desc.string = str(link_desc).replace(site, alt)
|
|
link_desc.replace_with(new_desc)
|
|
break
|
|
|
|
def view_image(self, soup) -> BeautifulSoup:
|
|
"""Replaces the soup with a new one that handles mobile results and
|
|
adds the link of the image full res to the results.
|
|
|
|
Args:
|
|
soup: A BeautifulSoup object containing the image mobile results.
|
|
|
|
Returns:
|
|
BeautifulSoup: The new BeautifulSoup object
|
|
"""
|
|
|
|
# get some tags that are unchanged between mobile and pc versions
|
|
cor_suggested = soup.find_all('table', attrs={'class': "By0U9"})
|
|
next_pages = soup.find_all('table', attrs={'class': "uZgmoc"})[0]
|
|
|
|
results = []
|
|
# find results div
|
|
results_div = soup.find_all('div', attrs={'class': "nQvrDb"})[0]
|
|
# find all the results
|
|
results_all = results_div.find_all('div', attrs={'class': "lIMUZd"})
|
|
|
|
for item in results_all:
|
|
urls = item.find('a')['href'].split('&imgrefurl=')
|
|
|
|
# Skip urls that are not two-element lists
|
|
if len(urls) != 2:
|
|
continue
|
|
|
|
img_url = urlparse.unquote(urls[0].replace(
|
|
f'/{Endpoint.imgres}?imgurl=', ''))
|
|
|
|
try:
|
|
# Try to strip out only the necessary part of the web page link
|
|
web_page = urlparse.unquote(urls[1].split('&')[0])
|
|
except IndexError:
|
|
web_page = urlparse.unquote(urls[1])
|
|
|
|
img_tbn = urlparse.unquote(item.find('a').find('img')['src'])
|
|
|
|
results.append({
|
|
'domain': urlparse.urlparse(web_page).netloc,
|
|
'img_url': img_url,
|
|
'web_page': web_page,
|
|
'img_tbn': img_tbn
|
|
})
|
|
|
|
soup = BeautifulSoup(render_template('imageresults.html',
|
|
length=len(results),
|
|
results=results,
|
|
view_label="View Image"),
|
|
features='html.parser')
|
|
|
|
# replace correction suggested by google object if exists
|
|
if len(cor_suggested):
|
|
soup.find_all(
|
|
'table',
|
|
attrs={'class': "By0U9"}
|
|
)[0].replaceWith(cor_suggested[0])
|
|
# replace next page object at the bottom of the page
|
|
soup.find_all('table',
|
|
attrs={'class': "uZgmoc"})[0].replaceWith(next_pages)
|
|
return soup
|