Added optional no-js functionality, added location based searching (hardcoded), updated html

main
Ben Busby 2020-02-21 23:52:29 +00:00
parent 4636b0f695
commit 24aa4367d3
4 changed files with 89 additions and 18 deletions

View File

@ -5,10 +5,41 @@ import os
import pycurl import pycurl
import re import re
from .url import url_parse from .url import url_parse
import urllib.parse as urlparse
from urllib.parse import parse_qs
from io import BytesIO from io import BytesIO
MOBILE_UA = 'Mozilla/5.0 (Linux; Android 8.0.0; SM-G960F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36' MOBILE_UA = os.environ.get('MOZ') + '/5.0 (Android 4.20; Mobile; rv:54.0) Gecko/54.0 ' + os.environ.get('FF') + '/54.0'
DESKTOP_UA = 'Brozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Mobile LizzieMcGuirefox/59.0' DESKTOP_UA = os.environ.get('MOZ') + '/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Mobile ' + os.environ.get('FF') + '/59.0'
SEARCH_URL = 'https://www.google.com/search?gbv=1&q='
nojs = int(os.environ.get('NOJS'))
def get_ua(user_agent):
return MOBILE_UA if ('Android' in user_agent or 'iPhone' in user_agent) else DESKTOP_UA
def send_request(url, ua):
request_header = []
# Update as an optional param
# Todo: this doesn't seem to work
ip = '64.22.92.48'
request_header.append('CLIENT-IP: ' + ip)
request_header.append('X-FORWARDED-FOR: ' + ip)
b_obj = BytesIO()
crl = pycurl.Curl()
crl.setopt(crl.URL, url)
crl.setopt(crl.USERAGENT, ua)
crl.setopt(crl.HTTPHEADER, request_header)
crl.setopt(crl.WRITEDATA, b_obj)
crl.perform()
crl.close()
return b_obj.getvalue().decode('utf-8', 'ignore')
@app.route('/', methods=['GET']) @app.route('/', methods=['GET'])
@ -30,27 +61,29 @@ def search():
if 'start' in request.args: if 'start' in request.args:
start = '&start=' + request.args.get('start') start = '&start=' + request.args.get('start')
# Change to a config setting
near = '&near=boulder'
if 'near' in request.args:
near = '&near=' + request.args.get('near')
user_agent = request.headers.get('User-Agent') user_agent = request.headers.get('User-Agent')
full_query = url_parse(q) + tbm + start full_query = url_parse(q) + tbm + start + near
google_ua = DESKTOP_UA
if 'Android' in user_agent or 'iPhone' in user_agent:
google_ua = MOBILE_UA
b_obj = BytesIO()
crl = pycurl.Curl()
crl.setopt(crl.URL, 'https://www.google.com/search?gbv=1&q=' + full_query)
crl.setopt(crl.USERAGENT, google_ua)
crl.setopt(crl.WRITEDATA, b_obj)
crl.perform()
crl.close()
get_body = b_obj.getvalue().decode('utf-8', 'ignore')
get_body = get_body.replace('data-src', 'src').replace('.001', '1').replace('visibility:hidden', 'visibility:visible').replace('>G<', '>Bl<')
get_body = send_request(SEARCH_URL + full_query, get_ua(user_agent))
get_body = get_body.replace('>G<', '>Bl<')
pattern = re.compile('4285f4|ea4335|fbcc05|34a853|fbbc05', re.IGNORECASE) pattern = re.compile('4285f4|ea4335|fbcc05|34a853|fbbc05', re.IGNORECASE)
get_body = pattern.sub('0000ff', get_body) get_body = pattern.sub('0000ff', get_body)
soup = BeautifulSoup(get_body, 'html.parser') soup = BeautifulSoup(get_body, 'html.parser')
for a in soup.find_all('a', href=True):
href = a['href']
if 'url?q=' in href:
href = urlparse.urlparse(href)
href = parse_qs(href.query)['q'][0]
if nojs:
a['href'] = '/window?location=' + href
#else:
# a['href'] = 'about:reader?url=' + href
try: try:
for script in soup("script"): for script in soup("script"):
script.decompose() script.decompose()
@ -58,7 +91,7 @@ def search():
except Exception: except Exception:
pass pass
return render_template('search.html', response=soup) return render_template('display.html', response=soup)
@app.route('/url', methods=['GET']) @app.route('/url', methods=['GET'])
@ -78,5 +111,22 @@ def imgres():
return redirect(request.args.get('imgurl')) return redirect(request.args.get('imgurl'))
@app.route('/window')
def window():
get_body = send_request(request.args.get('location'), get_ua(request.headers.get('User-Agent')))
get_body = get_body.replace('src="/', 'src="' + request.args.get('location') + '"')
get_body = get_body.replace('href="/', 'href="' + request.args.get('location') + '"')
soup = BeautifulSoup(get_body, 'html.parser')
try:
for script in soup('script'):
script.decompose()
except Exception:
pass
return render_template('display.html', response=soup)
if __name__ == '__main__': if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') app.run(debug=True, host='0.0.0.0')

View File

@ -1,3 +1,5 @@
beautifulsoup4==4.8.2
bs4==0.0.1
cffi==1.13.2 cffi==1.13.2
Click==7.0 Click==7.0
cryptography==2.8 cryptography==2.8
@ -5,8 +7,10 @@ Flask==1.1.1
itsdangerous==1.1.0 itsdangerous==1.1.0
Jinja2==2.10.3 Jinja2==2.10.3
MarkupSafe==1.1.1 MarkupSafe==1.1.1
Phyme==0.0.9
pycparser==2.19 pycparser==2.19
pycurl==7.43.0.4 pycurl==7.43.0.4
pyOpenSSL==19.1.0 pyOpenSSL==19.1.0
six==1.14.0 six==1.14.0
soupsieve==1.9.5
Werkzeug==0.16.0 Werkzeug==0.16.0

17
rhyme.py Normal file
View File

@ -0,0 +1,17 @@
from Phyme import Phyme
import random
import sys
import time
random.seed(time.clock())
ph = Phyme()
rhymes = ph.get_perfect_rhymes(sys.argv[1])
rhyme_vals = []
for arr in rhymes.values():
for rhyme in arr:
rhyme_vals.append(rhyme)
print(rhyme_vals[random.randint(0, len(rhyme_vals))].capitalize())