Merge pull request #655 from JackDandy/feature/FixAltUnicodeSearch

Fix alternative unicode show names from breaking search.
This commit is contained in:
JackDandy 2016-02-27 14:21:23 +00:00
commit d0231cea56
5 changed files with 25 additions and 14 deletions

View file

@ -36,6 +36,7 @@
* Change indicate when not sorting with article by dimming ("The", "A", "An") on Show List, Episode, History, * Change indicate when not sorting with article by dimming ("The", "A", "An") on Show List, Episode, History,
Mass Update, Add with Browse and from Existing views Mass Update, Add with Browse and from Existing views
* Add Emby notifier to config/Notifications * Add Emby notifier to config/Notifications
* Fix alternative unicode show names from breaking search
### 0.11.6 (2016-02-18 23:10:00 UTC) ### 0.11.6 (2016-02-18 23:10:00 UTC)

View file

@ -131,6 +131,11 @@ class GenericProvider:
return result return result
# noinspection PyUnusedLocal
def cb_response(self, r, *args, **kwargs):
self.session.response = dict(url=r.url, status_code=r.status_code, elapsed=r.elapsed, from_cache=r.from_cache)
return r
def get_url(self, url, post_data=None, params=None, timeout=30, json=False): def get_url(self, url, post_data=None, params=None, timeout=30, json=False):
""" """
By default this is just a simple urlopen call but this method should be overridden By default this is just a simple urlopen call but this method should be overridden
@ -142,7 +147,7 @@ class GenericProvider:
return return
return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout, return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
session=self.session, json=json) session=self.session, json=json, hooks=dict(response=self.cb_response))
def download_result(self, result): def download_result(self, result):
""" """

View file

@ -17,7 +17,6 @@
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import time import time
import urllib
import sickbeard import sickbeard
@ -84,8 +83,7 @@ class NewznabProvider(generic.NZBProvider):
categories = self.get_url('%s/api' % self.url, params=params, timeout=10) categories = self.get_url('%s/api' % self.url, params=params, timeout=10)
if not categories: if not categories:
logger.log(u'Error getting html for [%s/api?%s]' % logger.log(u'Error getting html for [%s]' % self.session.response['url'], logger.DEBUG)
(self.url, '&'.join('%s=%s' % (x, y) for x, y in params.items())), logger.DEBUG)
return (False, return_categories, 'Error getting html for [%s]' % return (False, return_categories, 'Error getting html for [%s]' %
('%s/api?%s' % (self.url, '&'.join('%s=%s' % (x, y) for x, y in params.items())))) ('%s/api?%s' % (self.url, '&'.join('%s=%s' % (x, y) for x, y in params.items()))))
@ -258,9 +256,8 @@ class NewznabProvider(generic.NZBProvider):
# hardcoded to stop after a max of 4 hits (400 items) per query # hardcoded to stop after a max of 4 hits (400 items) per query
while (offset <= total) and (offset < (200, 400)[self.supports_tvdbid()]) and batch_count: while (offset <= total) and (offset < (200, 400)[self.supports_tvdbid()]) and batch_count:
cnt = len(results) cnt = len(results)
search_url = '%sapi?%s' % (self.url, urllib.urlencode(request_params))
data = self.cache.getRSSFeed(search_url) data = self.cache.getRSSFeed('%sapi' % self.url, params=request_params)
i and time.sleep(1.1) i and time.sleep(1.1)
if not data or not self.check_auth_from_data(data): if not data or not self.check_auth_from_data(data):
@ -295,13 +292,13 @@ class NewznabProvider(generic.NZBProvider):
break break
if offset != request_params['offset']: if offset != request_params['offset']:
logger.log('Tell your newznab provider to fix their bloody newznab responses') logger.log('Ask your newznab provider to fix their newznab responses')
break break
request_params['offset'] += request_params['limit'] request_params['offset'] += request_params['limit']
if total <= request_params['offset']: if total <= request_params['offset']:
exit_log = True exit_log = True
logger.log('%s item%s found that will be used for episode matching' % (total, helpers.maybe_plural(total)), logger.log('%s item%s found for episode matching' % (total, helpers.maybe_plural(total)),
logger.DEBUG) logger.DEBUG)
break break
@ -310,10 +307,10 @@ class NewznabProvider(generic.NZBProvider):
logger.log('%s more item%s to fetch from a batch of up to %s items.' logger.log('%s more item%s to fetch from a batch of up to %s items.'
% (items, helpers.maybe_plural(items), request_params['limit']), logger.DEBUG) % (items, helpers.maybe_plural(items), request_params['limit']), logger.DEBUG)
batch_count = self._log_result(results, mode, cnt, search_url) batch_count = self._log_result(results, mode, cnt, data.rq_response['url'])
if exit_log: if exit_log:
self._log_result(results, mode, cnt, search_url) self._log_result(results, mode, cnt, data and data.rq_response['url'] or '%sapi' % self.url)
exit_log = False exit_log = False
if 'tvdbid' in request_params and len(results): if 'tvdbid' in request_params and len(results):

View file

@ -14,6 +14,7 @@ class RSSFeeds:
def __init__(self, provider=None): def __init__(self, provider=None):
self.provider = provider self.provider = provider
self.response = None
def _check_auth_cookie(self): def _check_auth_cookie(self):
@ -21,7 +22,12 @@ class RSSFeeds:
return self.provider.check_auth_cookie() return self.provider.check_auth_cookie()
return True return True
def get_feed(self, url, request_headers=None): # noinspection PyUnusedLocal
def cb_response(self, r, *args, **kwargs):
self.response = dict(url=r.url, elapsed=r.elapsed, from_cache=r.from_cache)
return r
def get_feed(self, url, request_headers=None, **kwargs):
if not self._check_auth_cookie(): if not self._check_auth_cookie():
return return
@ -30,12 +36,14 @@ class RSSFeeds:
if self.provider and hasattr(self.provider, 'session'): if self.provider and hasattr(self.provider, 'session'):
session = self.provider.session session = self.provider.session
response = helpers.getURL(url, headers=request_headers, session=session) response = helpers.getURL(url, headers=request_headers, session=session,
hooks=dict(response=self.cb_response), **kwargs)
if not response: if not response:
return return
try: try:
feed = feedparser.parse(response) feed = feedparser.parse(response)
feed['rq_response'] = self.response
if feed and 'entries' in feed: if feed and 'entries' in feed:
return feed return feed

View file

@ -107,8 +107,8 @@ class TVCache:
return [] return []
def getRSSFeed(self, url): def getRSSFeed(self, url, **kwargs):
return RSSFeeds(self.provider).get_feed(url) return RSSFeeds(self.provider).get_feed(url, **kwargs)
def _translateTitle(self, title): def _translateTitle(self, title):
return u'' + title.replace(' ', '.') return u'' + title.replace(' ', '.')