fix: correctly handle skip_prefix logic for site_alts (#1092)

Fixes #1091
main
Gautam Korlam 2023-11-01 13:07:45 -07:00 committed by GitHub
parent cdf0b50284
commit 9cc1004fb8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 33 additions and 10 deletions

View File

@ -12,7 +12,7 @@ import re
import warnings import warnings
SKIP_ARGS = ['ref_src', 'utm'] SKIP_ARGS = ['ref_src', 'utm']
SKIP_PREFIX = ['//www.', '//mobile.', '//m.', 'www.', 'mobile.', 'm.'] SKIP_PREFIX = ['//www.', '//mobile.', '//m.']
GOOG_STATIC = 'www.gstatic.com' GOOG_STATIC = 'www.gstatic.com'
G_M_LOGO_URL = 'https://www.gstatic.com/m/images/icons/googleg.gif' G_M_LOGO_URL = 'https://www.gstatic.com/m/images/icons/googleg.gif'
GOOG_IMG = '/images/branding/searchlogo/1x/googlelogo' GOOG_IMG = '/images/branding/searchlogo/1x/googlelogo'
@ -152,11 +152,12 @@ def get_first_link(soup: BeautifulSoup) -> str:
return '' return ''
def get_site_alt(link: str) -> str: def get_site_alt(link: str, site_alts: dict = SITE_ALTS) -> str:
"""Returns an alternative to a particular site, if one is configured """Returns an alternative to a particular site, if one is configured
Args: Args:
link: A string result URL to check against the SITE_ALTS map link: A string result URL to check against the site_alts map
site_alts: A map of site alternatives to replace with. defaults to SITE_ALTS
Returns: Returns:
str: An updated (or ignored) result link str: An updated (or ignored) result link
@ -178,9 +179,9 @@ def get_site_alt(link: str) -> str:
# "https://medium.com/..." should match, but "philomedium.com" should not) # "https://medium.com/..." should match, but "philomedium.com" should not)
hostcomp = f'{parsed_link.scheme}://{hostname}' hostcomp = f'{parsed_link.scheme}://{hostname}'
for site_key in SITE_ALTS.keys(): for site_key in site_alts.keys():
site_alt = f'{parsed_link.scheme}://{site_key}' site_alt = f'{parsed_link.scheme}://{site_key}'
if not hostname or site_alt not in hostcomp or not SITE_ALTS[site_key]: if not hostname or site_alt not in hostcomp or not site_alts[site_key]:
continue continue
# Wikipedia -> Wikiless replacements require the subdomain (if it's # Wikipedia -> Wikiless replacements require the subdomain (if it's
@ -193,9 +194,8 @@ def get_site_alt(link: str) -> str:
elif 'medium' in hostname and len(subdomain) > 0: elif 'medium' in hostname and len(subdomain) > 0:
hostname = f'{subdomain}.{hostname}' hostname = f'{subdomain}.{hostname}'
parsed_alt = urlparse.urlparse(SITE_ALTS[site_key]) parsed_alt = urlparse.urlparse(site_alts[site_key])
link = link.replace(hostname, SITE_ALTS[site_key]) + params link = link.replace(hostname, site_alts[site_key]) + params
# If a scheme is specified in the alternative, this results in a # If a scheme is specified in the alternative, this results in a
# replaced link that looks like "https://http://altservice.tld". # replaced link that looks like "https://http://altservice.tld".
# In this case, we can remove the original scheme from the result # In this case, we can remove the original scheme from the result
@ -205,9 +205,12 @@ def get_site_alt(link: str) -> str:
for prefix in SKIP_PREFIX: for prefix in SKIP_PREFIX:
if parsed_alt.scheme: if parsed_alt.scheme:
link = link.replace(prefix, '') # If a scheme is specified, remove everything before the
# first occurence of it
link = f'{parsed_alt.scheme}{link.split(parsed_alt.scheme, 1)[-1]}'
else: else:
link = link.replace(prefix, '//') # Otherwise, replace the first occurrence of the prefix
link = link.replace(prefix, '//', 1)
break break
return link return link

View File

@ -2,6 +2,7 @@ from bs4 import BeautifulSoup
from app.filter import Filter from app.filter import Filter
from app.models.config import Config from app.models.config import Config
from app.models.endpoint import Endpoint from app.models.endpoint import Endpoint
from app.utils import results
from app.utils.session import generate_key from app.utils.session import generate_key
from datetime import datetime from datetime import datetime
from dateutil.parser import ParserError, parse from dateutil.parser import ParserError, parse
@ -136,3 +137,22 @@ def test_leading_slash_search(client):
continue continue
assert link['href'].startswith(f'{Endpoint.search}') assert link['href'].startswith(f'{Endpoint.search}')
def test_site_alt_prefix_skip():
# Ensure prefixes are skipped correctly for site alts
# default silte_alts (farside.link)
assert results.get_site_alt(link = 'https://www.reddit.com') == 'https://farside.link/libreddit'
assert results.get_site_alt(link = 'https://www.twitter.com') == 'https://farside.link/nitter'
assert results.get_site_alt(link = 'https://www.youtube.com') == 'https://farside.link/invidious'
test_site_alts = {
'reddit.com': 'reddit.endswithmobile.domain',
'twitter.com': 'https://twitter.endswithm.domain',
'youtube.com': 'http://yt.endswithwww.domain',
}
# Domains with part of SKIP_PREFIX in them
assert results.get_site_alt(link = 'https://www.reddit.com', site_alts = test_site_alts) == 'https://reddit.endswithmobile.domain'
assert results.get_site_alt(link = 'https://www.twitter.com', site_alts = test_site_alts) == 'https://twitter.endswithm.domain'
assert results.get_site_alt(link = 'https://www.youtube.com', site_alts = test_site_alts) == 'http://yt.endswithwww.domain'