2020-04-29 03:19:34 +03:00
|
|
|
from app.request import VALID_PARAMS
|
2020-06-11 22:21:40 +03:00
|
|
|
from app.utils.misc import BLACKLIST
|
2020-04-10 23:52:27 +03:00
|
|
|
from bs4 import BeautifulSoup
|
2020-06-02 21:54:47 +03:00
|
|
|
from bs4.element import ResultSet
|
2020-04-29 03:19:34 +03:00
|
|
|
from cryptography.fernet import Fernet
|
2020-04-10 23:52:27 +03:00
|
|
|
import re
|
|
|
|
import urllib.parse as urlparse
|
|
|
|
from urllib.parse import parse_qs
|
|
|
|
|
2020-04-27 03:48:40 +03:00
|
|
|
SKIP_ARGS = ['ref_src', 'utm']
|
2020-05-04 04:32:47 +03:00
|
|
|
FULL_RES_IMG = '<br/><a href="{}">Full Image</a>'
|
|
|
|
GOOG_IMG = '/images/branding/searchlogo/1x/googlelogo'
|
|
|
|
LOGO_URL = GOOG_IMG + '_desk'
|
|
|
|
BLANK_B64 = '''
|
|
|
|
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAQAAAAnOwc2AAAAD0lEQVR42mNkwAIYh7IgAAVVAAuInjI5AAAAAElFTkSuQmCC
|
|
|
|
'''
|
2020-04-27 03:48:40 +03:00
|
|
|
|
2020-05-20 20:02:30 +03:00
|
|
|
|
2020-05-18 19:28:23 +03:00
|
|
|
def get_first_link(soup):
|
|
|
|
# Replace hrefs with only the intended destination (no "utm" type tags)
|
|
|
|
for a in soup.find_all('a', href=True):
|
|
|
|
# Return the first search result URL
|
2020-06-02 21:54:47 +03:00
|
|
|
if 'url?q=' in a['href']:
|
|
|
|
return filter_link_args(a['href'])
|
2020-05-18 19:28:23 +03:00
|
|
|
|
2020-05-20 20:02:30 +03:00
|
|
|
|
2020-05-18 19:28:23 +03:00
|
|
|
def filter_link_args(query_link):
|
|
|
|
parsed_link = urlparse.urlparse(query_link)
|
|
|
|
link_args = parse_qs(parsed_link.query)
|
|
|
|
safe_args = {}
|
|
|
|
|
|
|
|
if len(link_args) == 0 and len(parsed_link) > 0:
|
|
|
|
return query_link
|
|
|
|
|
|
|
|
for arg in link_args.keys():
|
|
|
|
if arg in SKIP_ARGS:
|
|
|
|
continue
|
|
|
|
|
|
|
|
safe_args[arg] = link_args[arg]
|
|
|
|
|
|
|
|
# Remove original link query and replace with filtered args
|
|
|
|
query_link = query_link.replace(parsed_link.query, '')
|
|
|
|
if len(safe_args) > 0:
|
|
|
|
query_link = query_link + urlparse.urlencode(safe_args, doseq=True)
|
|
|
|
else:
|
|
|
|
query_link = query_link.replace('?', '')
|
|
|
|
|
|
|
|
return query_link
|
2020-04-16 02:41:53 +03:00
|
|
|
|
2020-05-20 20:02:30 +03:00
|
|
|
|
2020-06-11 22:21:40 +03:00
|
|
|
def has_ad_content(element: str):
|
|
|
|
return element.upper() in (value.upper() for value in BLACKLIST) or 'ⓘ' in element
|
2020-06-02 21:54:47 +03:00
|
|
|
|
|
|
|
|
2020-04-16 02:41:53 +03:00
|
|
|
class Filter:
|
2020-06-02 21:54:47 +03:00
|
|
|
def __init__(self, user_keys: dict, mobile=False, config=None):
|
2020-04-16 02:41:53 +03:00
|
|
|
if config is None:
|
|
|
|
config = {}
|
|
|
|
|
2020-05-13 02:15:53 +03:00
|
|
|
self.near = config['near'] if 'near' in config else ''
|
2020-04-17 03:50:31 +03:00
|
|
|
self.dark = config['dark'] if 'dark' in config else False
|
2020-04-16 19:01:02 +03:00
|
|
|
self.nojs = config['nojs'] if 'nojs' in config else False
|
2020-05-16 01:10:31 +03:00
|
|
|
self.new_tab = config['new_tab'] if 'new_tab' in config else False
|
2020-04-16 19:01:02 +03:00
|
|
|
self.mobile = mobile
|
2020-06-02 21:54:47 +03:00
|
|
|
self.user_keys = user_keys
|
|
|
|
self.main_divs = ResultSet('')
|
|
|
|
self._elements = 0
|
2020-04-16 02:41:53 +03:00
|
|
|
|
2020-04-24 05:59:43 +03:00
|
|
|
def __getitem__(self, name):
|
|
|
|
return getattr(self, name)
|
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
@property
|
|
|
|
def elements(self):
|
|
|
|
return self._elements
|
|
|
|
|
2020-04-16 02:41:53 +03:00
|
|
|
def reskin(self, page):
|
|
|
|
# Aesthetic only re-skinning
|
2020-05-05 03:00:43 +03:00
|
|
|
page = page.replace('>G<', '>Wh<')
|
2020-04-16 02:41:53 +03:00
|
|
|
pattern = re.compile('4285f4|ea4335|fbcc05|34a853|fbbc05', re.IGNORECASE)
|
|
|
|
page = pattern.sub('685e79', page)
|
2020-04-17 03:50:31 +03:00
|
|
|
if self.dark:
|
2020-04-16 02:41:53 +03:00
|
|
|
page = page.replace('fff', '000').replace('202124', 'ddd').replace('1967D2', '3b85ea')
|
|
|
|
|
|
|
|
return page
|
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
def encrypt_path(self, msg, is_element=False):
|
|
|
|
# Encrypts path to avoid plaintext results in logs
|
|
|
|
if is_element:
|
|
|
|
# Element paths are tracked differently in order for the element key to be regenerated
|
|
|
|
# once all elements have been loaded
|
|
|
|
enc_path = Fernet(self.user_keys['element_key']).encrypt(msg.encode()).decode()
|
|
|
|
self._elements += 1
|
|
|
|
return enc_path
|
|
|
|
|
|
|
|
return Fernet(self.user_keys['text_key']).encrypt(msg.encode()).decode()
|
|
|
|
|
2020-04-16 02:41:53 +03:00
|
|
|
def clean(self, soup):
|
2020-06-02 21:54:47 +03:00
|
|
|
self.main_divs = soup.find('div', {'id': 'main'})
|
|
|
|
self.remove_ads()
|
|
|
|
self.fix_question_section()
|
2020-04-29 03:19:34 +03:00
|
|
|
self.update_styling(soup)
|
2020-06-02 21:54:47 +03:00
|
|
|
|
|
|
|
for img in [_ for _ in soup.find_all('img') if 'src' in _.attrs]:
|
|
|
|
self.update_element_src(img, 'image/png')
|
|
|
|
|
|
|
|
for audio in [_ for _ in soup.find_all('audio') if 'src' in _.attrs]:
|
|
|
|
self.update_element_src(audio, 'audio/mpeg')
|
|
|
|
|
|
|
|
for link in soup.find_all('a', href=True):
|
|
|
|
self.update_link(link)
|
2020-04-29 03:19:34 +03:00
|
|
|
|
|
|
|
input_form = soup.find('form')
|
2020-04-29 03:59:33 +03:00
|
|
|
if input_form is not None:
|
|
|
|
input_form['method'] = 'POST'
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2020-04-29 18:46:18 +03:00
|
|
|
# Ensure no extra scripts passed through
|
|
|
|
for script in soup('script'):
|
|
|
|
script.decompose()
|
|
|
|
|
2020-06-07 23:06:49 +03:00
|
|
|
# Update default footer and header
|
|
|
|
footer = soup.find('footer')
|
2020-05-24 23:03:11 +03:00
|
|
|
if footer:
|
2020-06-07 23:06:49 +03:00
|
|
|
# Remove divs that have multiple links beyond just page navigation
|
|
|
|
[_.decompose() for _ in footer.find_all('div', recursive=False) if len(_.find_all('a', href=True)) > 2]
|
2020-04-29 18:46:18 +03:00
|
|
|
|
2020-05-24 23:03:11 +03:00
|
|
|
header = soup.find('header')
|
|
|
|
if header:
|
|
|
|
header.decompose()
|
|
|
|
|
2020-04-16 02:41:53 +03:00
|
|
|
return soup
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
def remove_ads(self):
|
|
|
|
if not self.main_divs:
|
2020-04-29 19:03:34 +03:00
|
|
|
return
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
for div in [_ for _ in self.main_divs.find_all('div', recursive=True)]:
|
2020-06-11 22:21:40 +03:00
|
|
|
has_ad = len([_ for _ in div.find_all('span', recursive=True) if has_ad_content(_.text)])
|
2020-05-29 22:21:53 +03:00
|
|
|
_ = div.decompose() if has_ad else None
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
def fix_question_section(self):
|
|
|
|
if not self.main_divs:
|
|
|
|
return
|
|
|
|
|
|
|
|
question_divs = [_ for _ in self.main_divs.find_all('div', recursive=False) if len(_.find_all('h2')) > 0]
|
2020-06-07 23:06:49 +03:00
|
|
|
for question_div in question_divs:
|
|
|
|
questions = [_ for _ in question_div.find_all('div', recursive=True) if _.text.endswith('?')]
|
2020-06-02 21:54:47 +03:00
|
|
|
for question in questions:
|
|
|
|
question['style'] = 'padding: 10px; font-style: italic;'
|
|
|
|
|
2020-06-07 23:06:49 +03:00
|
|
|
def update_element_src(self, element, mime):
|
2020-06-02 21:54:47 +03:00
|
|
|
element_src = element['src']
|
|
|
|
if element_src.startswith('//'):
|
|
|
|
element_src = 'https:' + element_src
|
|
|
|
elif element_src.startswith(LOGO_URL):
|
|
|
|
# Re-brand with Whoogle logo
|
|
|
|
element['src'] = '/static/img/logo.png'
|
|
|
|
element['style'] = 'height:40px;width:162px'
|
|
|
|
return
|
|
|
|
elif element_src.startswith(GOOG_IMG):
|
|
|
|
element['src'] = BLANK_B64
|
|
|
|
return
|
|
|
|
|
|
|
|
element['src'] = '/element?url=' + self.encrypt_path(element_src, is_element=True) + \
|
2020-06-07 23:06:49 +03:00
|
|
|
'&type=' + urlparse.quote(mime)
|
2020-06-02 21:54:47 +03:00
|
|
|
# TODO: Non-mobile image results link to website instead of image
|
|
|
|
# if not self.mobile:
|
|
|
|
# img.append(BeautifulSoup(FULL_RES_IMG.format(element_src), 'html.parser'))
|
2020-04-29 03:19:34 +03:00
|
|
|
|
|
|
|
def update_styling(self, soup):
|
|
|
|
# Remove unnecessary button(s)
|
|
|
|
for button in soup.find_all('button'):
|
|
|
|
button.decompose()
|
|
|
|
|
|
|
|
# Remove svg logos
|
|
|
|
for svg in soup.find_all('svg'):
|
|
|
|
svg.decompose()
|
|
|
|
|
|
|
|
# Update logo
|
|
|
|
logo = soup.find('a', {'class': 'l'})
|
|
|
|
if logo and self.mobile:
|
|
|
|
logo['style'] = 'display:flex; justify-content:center; align-items:center; color:#685e79; ' \
|
|
|
|
'font-size:18px; '
|
|
|
|
|
|
|
|
# Fix search bar length on mobile
|
|
|
|
try:
|
|
|
|
search_bar = soup.find('header').find('form').find('div')
|
|
|
|
search_bar['style'] = 'width: 100%;'
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Set up dark mode if active
|
|
|
|
if self.dark:
|
2020-05-15 23:17:16 +03:00
|
|
|
soup.find('html')['style'] = 'scrollbar-color: #333 #111;color:#fff !important;background:#000 !important'
|
2020-04-29 03:19:34 +03:00
|
|
|
for input_element in soup.findAll('input'):
|
2020-05-15 23:17:16 +03:00
|
|
|
input_element['style'] = 'color:#fff;background:#000;'
|
|
|
|
|
|
|
|
for span_element in soup.findAll('span'):
|
|
|
|
span_element['style'] = 'color: white;'
|
|
|
|
|
|
|
|
for href_element in soup.findAll('a'):
|
|
|
|
href_element['style'] = 'color: white' if href_element['href'].startswith('/search') else ''
|
2020-04-29 03:19:34 +03:00
|
|
|
|
2020-06-02 21:54:47 +03:00
|
|
|
def update_link(self, link):
|
|
|
|
# Replace href with only the intended destination (no "utm" type tags)
|
|
|
|
href = link['href'].replace('https://www.google.com', '')
|
|
|
|
if '/advanced_search' in href:
|
|
|
|
link.decompose()
|
|
|
|
return
|
|
|
|
elif self.new_tab:
|
|
|
|
link['target'] = '_blank'
|
|
|
|
|
|
|
|
result_link = urlparse.urlparse(href)
|
|
|
|
query_link = parse_qs(result_link.query)['q'][0] if '?q=' in href else ''
|
|
|
|
|
|
|
|
if query_link.startswith('/'):
|
|
|
|
link['href'] = 'https://google.com' + query_link
|
|
|
|
elif '/search?q=' in href:
|
|
|
|
new_search = '/search?q=' + self.encrypt_path(query_link)
|
|
|
|
|
|
|
|
query_params = parse_qs(urlparse.urlparse(href).query)
|
|
|
|
for param in VALID_PARAMS:
|
|
|
|
param_val = query_params[param][0] if param in query_params else ''
|
|
|
|
new_search += '&' + param + '=' + param_val
|
|
|
|
link['href'] = new_search
|
|
|
|
elif 'url?q=' in href:
|
|
|
|
# Strip unneeded arguments
|
|
|
|
link['href'] = filter_link_args(query_link)
|
|
|
|
|
|
|
|
# Add no-js option
|
|
|
|
if self.nojs:
|
|
|
|
gen_nojs(link)
|
|
|
|
else:
|
|
|
|
link['href'] = href
|
|
|
|
|
|
|
|
|
|
|
|
def gen_nojs(sibling):
|
|
|
|
nojs_link = BeautifulSoup().new_tag('a')
|
|
|
|
nojs_link['href'] = '/window?location=' + sibling['href']
|
2020-04-29 18:46:18 +03:00
|
|
|
nojs_link['style'] = 'display:block;width:100%;'
|
|
|
|
nojs_link.string = 'NoJS Link: ' + nojs_link['href']
|
|
|
|
sibling.append(BeautifulSoup('<br><hr><br>', 'html.parser'))
|
2020-06-02 21:54:47 +03:00
|
|
|
sibling.append(nojs_link)
|