diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..2d2ecd6
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1 @@
+.git/
diff --git a/app/filter.py b/app/filter.py
new file mode 100644
index 0000000..7e845d3
--- /dev/null
+++ b/app/filter.py
@@ -0,0 +1,111 @@
+from bs4 import BeautifulSoup
+import re
+import urllib.parse as urlparse
+from urllib.parse import parse_qs
+
+AD_CLASS = 'ZINbbc'
+SPONS_CLASS = 'D1fz0e'
+
+
+def reskin(page, dark_mode=False):
+ # Aesthetic only re-skinning
+ page = page.replace('>G<', '>Sh<')
+ pattern = re.compile('4285f4|ea4335|fbcc05|34a853|fbbc05', re.IGNORECASE)
+ page = pattern.sub('685e79', page)
+ if dark_mode:
+ page = page.replace('fff', '000').replace('202124', 'ddd').replace('1967D2', '3b85ea')
+
+ return page
+
+
+def gen_query(q, args, near_city=None):
+ # Use :past(hour/day/week/month/year) if available
+ # example search "new restaurants :past month"
+ tbs = ''
+ # if 'tbs' in request.args:
+ # tbs = '&tbs=' + request.args.get('tbs')
+ # q = q.replace(q.split(':past', 1)[-1], '').replace(':past', '')
+ if ':past' in q:
+ time_range = str.strip(q.split(':past', 1)[-1])
+ tbs = '&tbs=qdr:' + str.lower(time_range[0])
+
+ # Ensure search query is parsable
+ q = urlparse.quote(q)
+
+ # Pass along type of results (news, images, books, etc)
+ tbm = ''
+ if 'tbm' in args:
+ tbm = '&tbm=' + args.get('tbm')
+
+ # Get results page start value (10 per page, ie page 2 start val = 20)
+ start = ''
+ if 'start' in args:
+ start = '&start=' + args.get('start')
+
+ # Grab city from config, if available
+ near = ''
+ if near_city:
+ near = '&near=' + urlparse.quote(near_city)
+
+ return q + tbs + tbm + start + near
+
+
+def cook(soup, user_agent, nojs=False, dark_mode=False):
+ # Remove all ads (TODO: Ad specific div classes probably change over time, look into a more generic method)
+ main_divs = soup.find('div', {'id': 'main'})
+ if main_divs is not None:
+ ad_divs = main_divs.findAll('div', {'class': AD_CLASS}, recursive=False)
+ sponsored_divs = main_divs.findAll('div', {'class': SPONS_CLASS}, recursive=False)
+ for div in ad_divs + sponsored_divs:
+ div.decompose()
+
+ # Remove unnecessary button(s)
+ for button in soup.find_all('button'):
+ button.decompose()
+
+ # Remove svg logos
+ for svg in soup.find_all('svg'):
+ svg.decompose()
+
+ # Update logo
+ logo = soup.find('a', {'class': 'l'})
+ if logo is not None and ('Android' in user_agent or 'iPhone' in user_agent):
+ logo.insert(0, 'Shoogle')
+ logo['style'] = 'display: flex;justify-content: center;align-items: center;color: #685e79;font-size: 18px;'
+
+ # Replace hrefs with only the intended destination (no "utm" type tags)
+ for a in soup.find_all('a', href=True):
+ href = a['href']
+ if '/advanced_search' in href:
+ a.decompose()
+ continue
+
+ if 'url?q=' in href:
+ # Strip unneeded arguments
+ href = urlparse.urlparse(href)
+ href = parse_qs(href.query)['q'][0]
+
+ # Add no-js option
+ if nojs:
+ nojs_link = soup.new_tag('a')
+ nojs_link['href'] = '/window?location=' + href
+ nojs_link['style'] = 'display:block;width:100%;'
+ nojs_link.string = 'NoJS Link: ' + nojs_link['href']
+ a.append(BeautifulSoup('
', 'html.parser'))
+ a.append(nojs_link)
+
+ # Set up dark mode if active
+ if dark_mode:
+ soup.find('html')['style'] = 'scrollbar-color: #333 #111;'
+ for input_element in soup.findAll('input'):
+ input_element['style'] = 'color:#fff;'
+
+ # Ensure no extra scripts passed through
+ try:
+ for script in soup('script'):
+ script.decompose()
+ soup.find('div', id='sfooter').decompose()
+ except Exception:
+ pass
+
+ return soup
diff --git a/rhyme.py b/app/rhyme.py
similarity index 99%
rename from rhyme.py
rename to app/rhyme.py
index ba21cb7..23b9137 100644
--- a/rhyme.py
+++ b/app/rhyme.py
@@ -8,6 +8,7 @@ random.seed(time.time())
ph = Phyme()
+
def get_rhyme(word):
# Get all rhymes and merge to one list (normally separated by syllable count)
rhymes = ph.get_perfect_rhymes(word)
diff --git a/app/routes.py b/app/routes.py
index 03fa69f..6f225e6 100644
--- a/app/routes.py
+++ b/app/routes.py
@@ -1,14 +1,11 @@
-from app import app
+from app import app, rhyme, filter
from bs4 import BeautifulSoup
-from flask import request, redirect, Response, render_template
+from flask import request, redirect, render_template
+from io import BytesIO
import json
import os
import pycurl
-import rhyme
-import re
import urllib.parse as urlparse
-from urllib.parse import parse_qs
-from io import BytesIO
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_FOLDER = os.path.join(APP_ROOT, 'static')
@@ -31,7 +28,6 @@ def get_ua(user_agent):
def send_request(curl_url, ua):
- request_header = []
response_header = []
b_obj = BytesIO()
@@ -59,105 +55,15 @@ def search():
if q is None or len(q) <= 0:
return render_template('error.html')
- # Use :past(hour/day/week/month/year) if available
- # example search "new restaurants :past month"
- tbs = ''
- # if 'tbs' in request.args:
- # tbs = '&tbs=' + request.args.get('tbs')
- # q = q.replace(q.split(':past', 1)[-1], '').replace(':past', '')
- if ':past' in q:
- time_range = str.strip(q.split(':past', 1)[-1])
- tbs = '&tbs=qdr:' + str.lower(time_range[0])
-
- # Ensure search query is parsable
- q = urlparse.quote(q)
-
- # Pass along type of results (news, images, books, etc)
- tbm = ''
- if 'tbm' in request.args:
- tbm = '&tbm=' + request.args.get('tbm')
-
- # Get results page start value (10 per page, ie page 2 start val = 20)
- start = ''
- if 'start' in request.args:
- start = '&start=' + request.args.get('start')
-
- # Grab city from config, if available
- near = ''
- if 'near' in user_config:
- near = '&near=' + urlparse.quote(user_config['near'])
-
+ full_query = filter.gen_query(q, request.args)
user_agent = request.headers.get('User-Agent')
- full_query = q + tbs + tbm + start + near
-
- get_body = send_request(SEARCH_URL + full_query, get_ua(user_agent))
-
- # Aesthetic only re-skinning
dark_mode = 'dark' in user_config and user_config['dark']
- get_body = get_body.replace('>G<', '>Sh<')
- pattern = re.compile('4285f4|ea4335|fbcc05|34a853|fbbc05', re.IGNORECASE)
- get_body = pattern.sub('685e79', get_body)
- if dark_mode:
- get_body = get_body.replace('fff', '000').replace('202124', 'ddd').replace('1967D2', '3b85ea')
+ nojs = 'nojs' in user_config and user_config['nojs']
- soup = BeautifulSoup(get_body, 'html.parser')
+ get_body = filter.reskin(send_request(
+ SEARCH_URL + full_query, get_ua(user_agent)), dark_mode=dark_mode)
- # Remove all ads (TODO: Ad specific div classes probably change over time, look into a more generic method)
- main_divs = soup.find('div', {'id': 'main'})
- if main_divs is not None:
- ad_divs = main_divs.findAll('div', {'class': 'ZINbbc'}, recursive=False)
- sponsored_divs = main_divs.findAll('div', {'class': 'D1fz0e'}, recursive=False)
- for div in ad_divs + sponsored_divs:
- div.decompose()
-
- # Remove unnecessary button(s)
- for button in soup.find_all('button'):
- button.decompose()
-
- # Remove svg logos
- for svg in soup.find_all('svg'):
- svg.decompose()
-
- # Update logo
- logo = soup.find('a', {'class': 'l'})
- if logo is not None and ('Android' in user_agent or 'iPhone' in user_agent):
- logo.insert(0, 'Shoogle')
- logo['style'] = 'display: flex;justify-content: center;align-items: center;color: #685e79;font-size: 18px;'
-
- # Replace hrefs with only the intended destination (no "utm" type tags)
- for a in soup.find_all('a', href=True):
- href = a['href']
- if '/advanced_search' in href:
- a.decompose()
- continue
-
- if 'url?q=' in href:
- # Strip unneeded arguments
- href = urlparse.urlparse(href)
- href = parse_qs(href.query)['q'][0]
-
- # Add no-js option
- if 'nojs' in user_config and user_config['nojs']:
- nojs_link = soup.new_tag('a')
- nojs_link['href'] = '/window?location=' + href
- nojs_link['style'] = 'display:block;width:100%;'
- nojs_link.string = 'NoJS Link: ' + nojs_link['href']
- a.append(BeautifulSoup('
', 'html.parser'))
- a.append(nojs_link)
-
- # Set up dark mode if active
- if dark_mode:
- soup.find('html')['style'] = 'scrollbar-color: #333 #111;'
- for input_element in soup.findAll('input'):
- input_element['style'] = 'color:#fff;'
-
- # Ensure no extra scripts passed through
- try:
- for script in soup('script'):
- script.decompose()
- soup.find('div', id='sfooter').decompose()
- except Exception:
- pass
+ soup = filter.cook(BeautifulSoup(get_body, 'html.parser'), user_agent, nojs=nojs, dark_mode=dark_mode)
return render_template('display.html', query=urlparse.unquote(q), response=soup)