2020-01-21 23:26:49 +03:00
|
|
|
from app import app
|
2020-01-23 09:19:17 +03:00
|
|
|
from bs4 import BeautifulSoup
|
2020-01-21 23:26:49 +03:00
|
|
|
from flask import request, redirect, Response, render_template
|
2020-04-05 04:30:53 +03:00
|
|
|
import json
|
2020-01-21 23:26:49 +03:00
|
|
|
import os
|
|
|
|
import pycurl
|
2020-04-05 04:30:53 +03:00
|
|
|
import rhyme
|
2020-01-23 09:19:17 +03:00
|
|
|
import re
|
2020-02-22 02:52:29 +03:00
|
|
|
import urllib.parse as urlparse
|
|
|
|
from urllib.parse import parse_qs
|
2020-01-21 23:26:49 +03:00
|
|
|
from io import BytesIO
|
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Get Mozilla Firefox rhyme (important) and form a new user agent
|
|
|
|
mozilla = rhyme.get_rhyme('Mo') + 'zilla'
|
|
|
|
firefox = rhyme.get_rhyme('Fire') + 'fox'
|
2020-02-22 02:52:29 +03:00
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
MOBILE_UA = mozilla + '/5.0 (Android 4.20; Mobile; rv:54.0) Gecko/54.0 ' + firefox + '/59.0'
|
|
|
|
DESKTOP_UA = mozilla + '/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Mobile ' + firefox + '/59.0'
|
|
|
|
|
|
|
|
# Base search url
|
2020-02-22 02:52:29 +03:00
|
|
|
SEARCH_URL = 'https://www.google.com/search?gbv=1&q='
|
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Optional nojs tag - opens links in a contained window with all js removed
|
|
|
|
# (can be useful for achieving nojs on mobile)
|
2020-02-22 02:52:29 +03:00
|
|
|
nojs = int(os.environ.get('NOJS'))
|
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
config = json.load(open('config.json'))
|
|
|
|
|
2020-02-22 02:52:29 +03:00
|
|
|
|
|
|
|
def get_ua(user_agent):
|
|
|
|
return MOBILE_UA if ('Android' in user_agent or 'iPhone' in user_agent) else DESKTOP_UA
|
|
|
|
|
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
def send_request(curl_url, ua):
|
2020-02-22 02:52:29 +03:00
|
|
|
request_header = []
|
|
|
|
|
|
|
|
b_obj = BytesIO()
|
|
|
|
crl = pycurl.Curl()
|
2020-04-05 04:30:53 +03:00
|
|
|
crl.setopt(crl.URL, curl_url)
|
2020-02-22 02:52:29 +03:00
|
|
|
crl.setopt(crl.USERAGENT, ua)
|
|
|
|
crl.setopt(crl.WRITEDATA, b_obj)
|
|
|
|
crl.perform()
|
|
|
|
crl.close()
|
|
|
|
|
|
|
|
return b_obj.getvalue().decode('utf-8', 'ignore')
|
2020-01-22 08:51:02 +03:00
|
|
|
|
2020-01-21 23:26:49 +03:00
|
|
|
|
|
|
|
@app.route('/', methods=['GET'])
|
|
|
|
def index():
|
|
|
|
return render_template('index.html')
|
|
|
|
|
|
|
|
|
|
|
|
@app.route('/search', methods=['GET'])
|
|
|
|
def search():
|
|
|
|
q = request.args.get('q')
|
|
|
|
if q is None or len(q) <= 0:
|
|
|
|
return render_template('error.html')
|
2020-04-06 01:15:46 +03:00
|
|
|
|
|
|
|
# Use :past(hour/day/week/month/year) if available
|
2020-04-06 01:37:35 +03:00
|
|
|
# example search "new restaurants :past month"
|
2020-04-06 01:15:46 +03:00
|
|
|
tbs = ''
|
2020-04-06 01:37:35 +03:00
|
|
|
# if 'tbs' in request.args:
|
|
|
|
# tbs = '&tbs=' + request.args.get('tbs')
|
|
|
|
# q = q.replace(q.split(':past', 1)[-1], '').replace(':past', '')
|
2020-04-06 01:15:46 +03:00
|
|
|
if ':past' in q:
|
2020-04-06 01:37:35 +03:00
|
|
|
time_range = str.strip(q.split(':past', 1)[-1])
|
|
|
|
tbs = '&tbs=qdr:' + str.lower(time_range[0])
|
|
|
|
|
|
|
|
# Ensure search query is parsable
|
2020-04-05 04:30:53 +03:00
|
|
|
q = urlparse.quote(q)
|
2020-01-21 23:26:49 +03:00
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Pass along type of results (news, images, books, etc)
|
2020-01-22 04:07:08 +03:00
|
|
|
tbm = ''
|
|
|
|
if 'tbm' in request.args:
|
|
|
|
tbm = '&tbm=' + request.args.get('tbm')
|
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Get results page start value (10 per page, ie page 2 start val = 20)
|
2020-01-23 09:19:17 +03:00
|
|
|
start = ''
|
|
|
|
if 'start' in request.args:
|
|
|
|
start = '&start=' + request.args.get('start')
|
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Grab city from config, if available
|
|
|
|
near = ''
|
|
|
|
if 'near' in config:
|
|
|
|
near = '&near=' + config['near']
|
2020-01-22 08:51:02 +03:00
|
|
|
|
2020-02-22 02:52:29 +03:00
|
|
|
user_agent = request.headers.get('User-Agent')
|
2020-04-06 01:15:46 +03:00
|
|
|
full_query = q + tbs + tbm + start + near
|
2020-01-23 09:19:17 +03:00
|
|
|
|
2020-02-22 02:52:29 +03:00
|
|
|
get_body = send_request(SEARCH_URL + full_query, get_ua(user_agent))
|
2020-04-06 01:15:46 +03:00
|
|
|
|
|
|
|
# Aesthetic only re-skinning
|
2020-04-03 21:02:45 +03:00
|
|
|
get_body = get_body.replace('>G<', '>Sh<')
|
2020-01-23 09:19:17 +03:00
|
|
|
pattern = re.compile('4285f4|ea4335|fbcc05|34a853|fbbc05', re.IGNORECASE)
|
2020-04-05 04:30:53 +03:00
|
|
|
get_body = pattern.sub('685e79', get_body)
|
2020-01-23 09:19:17 +03:00
|
|
|
|
|
|
|
soup = BeautifulSoup(get_body, 'html.parser')
|
2020-04-03 21:02:45 +03:00
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Remove all ads (TODO: Ad specific div class may change over time, look into a more generic method)
|
2020-04-06 01:15:46 +03:00
|
|
|
main_divs = soup.find('div', {'id': 'main'})
|
|
|
|
if main_divs is not None:
|
|
|
|
ad_divs = main_divs.findAll('div', {'class': 'ZINbbc'}, recursive=False)
|
|
|
|
for div in ad_divs:
|
|
|
|
div.decompose()
|
2020-04-03 21:02:45 +03:00
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Remove unnecessary button(s)
|
|
|
|
for button in soup.find_all('button'):
|
|
|
|
button.decompose()
|
|
|
|
|
2020-04-06 01:15:46 +03:00
|
|
|
# Remove svg logos
|
|
|
|
for svg in soup.find_all('svg'):
|
|
|
|
svg.decompose()
|
|
|
|
|
|
|
|
# Update logo
|
|
|
|
logo = soup.find('a', {'class': 'l'})
|
|
|
|
if logo is not None and 'Android' in user_agent or 'iPhone' in user_agent:
|
|
|
|
logo.insert(0, "Shoogle")
|
|
|
|
logo['style'] = 'display: flex;justify-content: center;align-items: center;color: #685e79;font-size: 18px;'
|
|
|
|
|
2020-04-05 04:30:53 +03:00
|
|
|
# Replace hrefs with only the intended destination (no "utm" type tags)
|
2020-02-22 02:52:29 +03:00
|
|
|
for a in soup.find_all('a', href=True):
|
|
|
|
href = a['href']
|
2020-04-06 01:15:46 +03:00
|
|
|
if '/advanced_search' in href:
|
|
|
|
a.decompose()
|
|
|
|
continue
|
2020-02-22 02:52:29 +03:00
|
|
|
if 'url?q=' in href:
|
|
|
|
href = urlparse.urlparse(href)
|
|
|
|
href = parse_qs(href.query)['q'][0]
|
|
|
|
if nojs:
|
|
|
|
a['href'] = '/window?location=' + href
|
2020-04-05 04:30:53 +03:00
|
|
|
# else: # Automatically go to reader mode in ff? Not sure if possible
|
2020-02-22 02:52:29 +03:00
|
|
|
# a['href'] = 'about:reader?url=' + href
|
2020-04-05 04:30:53 +03:00
|
|
|
|
|
|
|
# Ensure no extra scripts passed through
|
2020-01-23 09:19:17 +03:00
|
|
|
try:
|
|
|
|
for script in soup("script"):
|
|
|
|
script.decompose()
|
|
|
|
soup.find('div', id='sfooter').decompose()
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
2020-02-22 02:52:29 +03:00
|
|
|
return render_template('display.html', response=soup)
|
2020-01-21 23:26:49 +03:00
|
|
|
|
|
|
|
|
|
|
|
@app.route('/url', methods=['GET'])
|
|
|
|
def url():
|
2020-01-23 09:19:17 +03:00
|
|
|
if 'url' in request.args:
|
|
|
|
return redirect(request.args.get('url'))
|
|
|
|
|
2020-01-21 23:26:49 +03:00
|
|
|
q = request.args.get('q')
|
|
|
|
if len(q) > 0 and 'http' in q:
|
|
|
|
return redirect(q)
|
|
|
|
else:
|
|
|
|
return render_template('error.html')
|
|
|
|
|
|
|
|
|
2020-01-23 09:19:17 +03:00
|
|
|
@app.route('/imgres')
|
|
|
|
def imgres():
|
|
|
|
return redirect(request.args.get('imgurl'))
|
|
|
|
|
|
|
|
|
2020-02-22 02:52:29 +03:00
|
|
|
@app.route('/window')
|
|
|
|
def window():
|
|
|
|
get_body = send_request(request.args.get('location'), get_ua(request.headers.get('User-Agent')))
|
|
|
|
get_body = get_body.replace('src="/', 'src="' + request.args.get('location') + '"')
|
|
|
|
get_body = get_body.replace('href="/', 'href="' + request.args.get('location') + '"')
|
|
|
|
|
|
|
|
soup = BeautifulSoup(get_body, 'html.parser')
|
|
|
|
|
|
|
|
try:
|
|
|
|
for script in soup('script'):
|
|
|
|
script.decompose()
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
|
|
|
return render_template('display.html', response=soup)
|
|
|
|
|
|
|
|
|
2020-01-21 23:26:49 +03:00
|
|
|
if __name__ == '__main__':
|
|
|
|
app.run(debug=True, host='0.0.0.0')
|