Add provider error table to page Manage/Media Search.

Add failure handling, skip provider for x hour(s) depending on count of failures.
Add detection of api hit limit reached.
Add failure count to omgwtfnzbs.
Change improve categories selection (manual search for example).
This commit is contained in:
Prinz23 2017-11-02 18:30:05 +00:00 committed by JackDandy
parent ce79d91430
commit f9cc6ed330
8 changed files with 532 additions and 23 deletions

View file

@ -1,4 +1,5 @@
#import sickbeard
#from sickbeard import sbdatetime
##
#set global $title = 'Media Search'
#set global $header = 'Media Search'
@ -7,6 +8,7 @@
##
#import os.path
#include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_top.tmpl')
<input type="hidden" id="sbRoot" value="$sbRoot">
<script type="text/javascript" src="$sbRoot/js/plotTooltip.js?v=$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/manageSearches.js?v=$sbPID"></script>
@ -46,9 +48,45 @@
In Progress<br />
#end if
<br />
#if $provider_errors
<h3>Provider Errors:</h3><br>
#for $prov in $provider_error_stats
#if $len($prov['errors'])
$prov['name']<input type="button" class="shows-more btn" id="$prov['name']-btn-more" value="Expand" style="display:none"><input type="button" class="shows-less btn" id="$prov['name']-btn-less" value="Collapse">
#if $prov['next_try']
#set nt = $str($prov['next_try']).split('.', 2)
Next try in: $nt[0] <input type="button" class="prov-retry btn" id="$prov['prov_id']-btn-retry" value="Retry">
#end if
<br>
<table class="sickbeardTable manageTable" cellspacing="1" border="0" cellpadding="0">
<thead><th>Day</th><th>http</th><th>connection</th><th>connection timeout</th><th>timeout</th><th>unknown</th><th>no data</th>
#if $prov['has_limit']
<th>Hit Limit</th>
#end if
</thead>
<tbody>
#set $row = 0
#for $error in $prov['errors']
<tr class="#echo ('odd', 'even')[$row % 2]##set $row+=1#">
<td style="color:white">$sbdatetime.sbdatetime.sbfdate($error['date'])</td>
<td style="color:white">$error['http'].get('count', 0)</td>
<td style="color:white">$error['connection'].get('count', 0)</td>
<td style="color:white">$error['connection_timeout'].get('count', 0)</td>
<td style="color:white">$error['timeout'].get('count', 0)</td>
<td style="color:white">$error['unknown'].get('count', 0)</td>
<td style="color:white">$error['nodata'].get('count', 0)</td>
#if $prov['has_limit']
<td style="color:white">$error.get('limit', {}).get('count', 0)</td>
#end if
</tr>
#end for
</tbody>
</table>
#end if
#end for
<br>
<br /><br />
#end if
<h3>Search Queue:</h3>
#if $queue_length['backlog'] or $queue_length['manual'] or $queue_length['failed']
<input type="button" class="show-all-more btn" id="all-btn-more" value="Expand All"><input type="button" class="show-all-less btn" id="all-btn-less" value="Collapse All"><br>

View file

@ -1,4 +1,4 @@
$(document).ready(function() {
$(document).ready(function() {
$('#recentsearch,#propersearch').click(function(){
$(this).addClass('disabled');
})
@ -30,4 +30,15 @@ $(document).ready(function() {
$(this).hide();
$(this).nextAll('input:first').show();
})
$('.prov-retry').click(function () {
$(this).addClass('disabled');
var match = $(this).attr('id').match(/^(.+)-btn-retry$/);
$.ajax({
url: sbRoot + '/manage/manageSearches/retryProvider?provider=' + match[1],
type: 'GET',
complete: function () {
window.location.reload(true);
}
});
})
});

View file

@ -19,7 +19,7 @@
from sickbeard import db
MIN_DB_VERSION = 1
MAX_DB_VERSION = 3
MAX_DB_VERSION = 4
# Add new migrations at the bottom of the list; subclass the previous migration.
@ -105,3 +105,26 @@ class AddBacklogParts(ConsolidateProviders):
self.connection.action('VACUUM')
self.incDBVersion()
class AddProviderErrors(AddBacklogParts):
def test(self):
return self.checkDBVersion() > 3
def execute(self):
db.backup_database('cache.db', self.checkDBVersion())
if not self.hasTable('providererrors'):
self.connection.action('CREATE TABLE providererrors ("prov_name" TEXT, "error_type" INTEGER, '
'"error_code" INTEGER, "error_time" NUMERIC)')
self.connection.action('CREATE INDEX idx_prov_name_error ON providererrors (prov_name)')
self.connection.action('CREATE UNIQUE INDEX idx_prov_errors ON providererrors (prov_name, error_time)')
if not self.hasTable('providererrorcount'):
self.connection.action('CREATE TABLE providererrorcount (prov_name TEXT PRIMARY KEY , '
'failure_count NUMERIC, failure_time NUMERIC, hit_limit_count NUMERIC, '
'hit_limit_time NUMERIC, hit_limit_wait NUMERIC)')
self.connection.action('VACUUM')
self.incDBVersion()

View file

@ -27,6 +27,7 @@ import re
import time
import urlparse
import threading
import socket
from urllib import quote_plus
import zlib
from base64 import b16encode, b32decode
@ -45,13 +46,124 @@ from sickbeard.exceptions import SickBeardException, AuthException, ex
from sickbeard.helpers import maybe_plural, remove_file_failed
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard.show_name_helpers import get_show_names_all_possible
from sickbeard.sbdatetime import sbdatetime
class HaltParseException(SickBeardException):
"""Something requires the current processing to abort"""
class GenericProvider:
class ProviderErrorTypes:
http = 1
connection = 2
connection_timeout = 3
timeout = 4
unknown = 5
limit = 6
nodata = 7
names = {1: 'http', 2: 'connection', 3: 'connection_timeout', 4: 'timeout', 5: 'unknown', 6: 'limit', 7: 'nodata'}
def __init__(self):
pass
class ProviderError(object):
def __init__(self, error_type=ProviderErrorTypes.unknown, code=None, error_time=None):
self.code = code
self.error_type = error_type
self.error_time = (datetime.datetime.now(), error_time)[isinstance(error_time, datetime.datetime)]
class ProviderErrorList(object):
def __init__(self, provider_name):
self.provider_name = provider_name
self._errors = []
self.lock = threading.Lock()
self.clear_old()
self.load_list()
self.last_save = datetime.datetime.now()
self.dirty = False
@property
def errors(self):
return self._errors
@property
def errors_sorted(self):
error_dict = {}
b_d = {'count': 0, 'code': None}
for e in self._errors:
dd = e.error_time.date()
if ProviderErrorTypes.names[e.error_type] not in error_dict.get(dd, {}):
error_dict.setdefault(dd,
{'date': dd, 'http': b_d.copy(), 'connection': b_d.copy(),
'connection_timeout': b_d.copy(), 'timeout': b_d.copy(),
'unknown': b_d.copy(), 'limit': b_d.copy(),
'nodata': b_d.copy()})[ProviderErrorTypes.names[e.error_type]]['count'] = 1
else:
error_dict[dd][ProviderErrorTypes.names[e.error_type]]['count'] += 1
if ProviderErrorTypes.http == e.error_type:
if e.code in error_dict[dd].get(ProviderErrorTypes.names[e.error_type], {}):
error_dict[dd][ProviderErrorTypes.names[e.error_type]][e.code] += 1
else:
error_dict[dd][ProviderErrorTypes.names[e.error_type]][e.code] = 1
error_list = sorted([error_dict[k] for k in error_dict.iterkeys()], key=lambda x: x.get('date'), reverse=True)
return error_list
def add_error(self, error):
if isinstance(error, ProviderError):
with self.lock:
self.dirty = True
self._errors.append(error)
logger.log('Adding error: %s for %s' %
(ProviderErrorTypes.names.get(error.error_type, 'unknown'), self.provider_name()),
logger.DEBUG)
self.save_list()
def save_list(self):
if self.dirty:
self.clear_old()
with self.lock:
myDB = db.DBConnection('cache.db')
cl = []
for e in self._errors:
cl.append(['INSERT OR IGNORE INTO providererrors (prov_name, error_type, error_code, error_time) '
'VALUES (?,?,?,?)', [self.provider_name(), e.error_type, e.code,
sbdatetime.totimestamp(e.error_time)]])
self.dirty = False
if cl:
myDB.mass_action(cl)
self.last_save = datetime.datetime.now()
def load_list(self):
with self.lock:
try:
myDB = db.DBConnection('cache.db')
if myDB.hasTable('providererrors'):
results = myDB.select('SELECT * FROM providererrors WHERE prov_name = ?', [self.provider_name()])
self._errors = []
for r in results:
try:
self._errors.append(ProviderError(
error_type=helpers.tryInt(r['error_type']), code=helpers.tryInt(r['error_code']),
error_time=datetime.datetime.fromtimestamp(helpers.tryInt(r['error_time']))))
except (StandardError, Exception):
continue
except (StandardError, Exception):
pass
def clear_old(self):
with self.lock:
try:
myDB = db.DBConnection('cache.db')
if myDB.hasTable('providererrors'):
time_limit = sbdatetime.totimestamp(datetime.datetime.now() - datetime.timedelta(days=28))
myDB.action('DELETE FROM providererrors WHERE error_time < ?', [time_limit])
except (StandardError, Exception):
pass
class GenericProvider(object):
NZB = 'nzb'
TORRENT = 'torrent'
@ -86,6 +198,226 @@ class GenericProvider:
# 'Chrome/32.0.1700.107 Safari/537.36'}
'User-Agent': USER_AGENT}
self._failure_count = 0
self._failure_time = None
self.errors = ProviderErrorList(self.get_id)
self._hit_limit_count = 0
self._hit_limit_time = None
self._hit_limit_wait = None
self._last_error_type = None
self.has_limit = False
self.fail_times = {1: (0, 15), 2: (0, 30), 3: (1, 0), 4: (2, 0), 5: (3, 0), 6: (6, 0), 7: (12, 0), 8: (24, 0)}
self._load_error_values()
def _load_error_values(self):
if hasattr(sickbeard, 'DATA_DIR'):
myDB = db.DBConnection('cache.db')
if myDB.hasTable('providererrorcount'):
r = myDB.select('SELECT * FROM providererrorcount WHERE prov_name = ?', [self.get_id()])
if r:
self._failure_count = helpers.tryInt(r[0]['failure_count'], 0)
if r[0]['failure_time']:
self._failure_time = datetime.datetime.fromtimestamp(r[0]['failure_time'])
else:
self._failure_time = None
self._hit_limit_count = helpers.tryInt(r[0]['hit_limit_count'], 0)
if r[0]['hit_limit_time']:
self._hit_limit_time = datetime.datetime.fromtimestamp(r[0]['hit_limit_time'])
else:
self._hit_limit_time = None
if r[0]['hit_limit_wait']:
self._hit_limit_wait = datetime.timedelta(seconds=helpers.tryInt(r[0]['hit_limit_wait'], 0))
else:
self._hit_limit_wait = None
self._last_error_type = self.last_error
def _save_error_value(self, field, value):
myDB = db.DBConnection('cache.db')
if myDB.hasTable('providererrorcount'):
r = myDB.action('UPDATE providererrorcount SET %s = ? WHERE prov_name = ?' % field, [value, self.get_id()])
if 0 == r.rowcount:
myDB.action('REPLACE INTO providererrorcount (prov_name, %s) VALUES (?,?)' % field,
[self.get_id(), value])
@property
def last_error(self):
try:
return sorted(self.errors.errors, key=lambda x: x.error_time, reverse=True)[0].error_type
except (StandardError, Exception):
return None
@property
def failure_count(self):
return self._failure_count
@failure_count.setter
def failure_count(self, value):
changed_val = self._failure_count != value
self._failure_count = value
if changed_val:
self._save_error_value('failure_count', value)
@property
def failure_time(self):
return self._failure_time
@failure_time.setter
def failure_time(self, value):
if None is value or isinstance(value, datetime.datetime):
changed_val = self._failure_time != value
self._failure_time = value
if None is value:
v = value
else:
v = sbdatetime.totimestamp(value)
if changed_val:
self._save_error_value('failure_time', v)
@property
def hit_limit_count(self):
return self._hit_limit_count
@hit_limit_count.setter
def hit_limit_count(self, value):
changed_val = self._hit_limit_count != value
self._hit_limit_count = value
if changed_val:
self._save_error_value('hit_limit_count', value)
@property
def hit_limit_time(self):
return self._hit_limit_time
@hit_limit_time.setter
def hit_limit_time(self, value):
if None is value or isinstance(value, datetime.datetime):
changed_val = self._hit_limit_time != value
self._hit_limit_time = value
if None is value:
v = value
else:
v = sbdatetime.totimestamp(value)
if changed_val:
self._save_error_value('hit_limit_time', v)
@property
def max_index(self):
return len(self.fail_times)
@property
def hit_limit_wait(self):
return self._hit_limit_wait
@hit_limit_wait.setter
def hit_limit_wait(self, value):
if isinstance(getattr(self, 'errors', None), ProviderErrorList) and isinstance(value, datetime.timedelta):
self.errors.add_error(ProviderError(error_type=ProviderErrorTypes.limit))
changed_val = self._hit_limit_wait != value
self._hit_limit_wait = value
if changed_val:
if None is value:
self._save_error_value('hit_limit_wait', value)
elif isinstance(value, datetime.timedelta):
self._save_error_value('hit_limit_wait', value.total_seconds())
def fail_time_index(self, base_limit=2):
i = self.failure_count - base_limit
return (i, self.max_index)[i >= self.max_index]
def wait_time(self, fc):
return datetime.timedelta(hours=self.fail_times[fc][0], minutes=self.fail_times[fc][1])
@property
def get_next_try_time(self):
n = None
h = datetime.timedelta(seconds=0)
f = datetime.timedelta(seconds=0)
if isinstance(self.hit_limit_wait, datetime.timedelta) and isinstance(self.hit_limit_time, datetime.datetime):
h = self.hit_limit_time + self.hit_limit_wait - datetime.datetime.now()
if 3 <= self.failure_count and isinstance(self.failure_time, datetime.datetime):
fc = self.fail_time_index()
if datetime.datetime.now() - self.failure_time < self.wait_time(fc):
h = self.failure_time + self.wait_time(fc) - datetime.datetime.now()
if datetime.timedelta(seconds=0) < max((h, f)):
n = max((h, f))
return n
def retry_next(self):
if isinstance(self.hit_limit_wait, datetime.timedelta) and isinstance(self.hit_limit_time, datetime.datetime):
self.hit_limit_time = datetime.datetime.now() - self.hit_limit_wait
if 3 <= self.failure_count and isinstance(self.failure_time, datetime.datetime):
fc = self.fail_time_index()
if datetime.datetime.now() - self.failure_time < self.wait_time(fc):
self.failure_time = datetime.datetime.now() - self.wait_time(fc)
def should_skip(self, log_warning=True):
if isinstance(self.hit_limit_wait, datetime.timedelta) and isinstance(self.hit_limit_time, datetime.datetime):
time_left = self.hit_limit_time + self.hit_limit_wait - datetime.datetime.now()
if time_left > datetime.timedelta(seconds=0):
if log_warning:
logger.log('Hit limited reached, waiting for %s' % time_left, logger.WARNING)
return True
else:
self.hit_limit_time = None
self.hit_limit_wait = None
if 3 <= self.failure_count:
if None is self.failure_time:
self.failure_time = datetime.datetime.now()
fc = self.fail_time_index()
if datetime.datetime.now() - self.failure_time < self.wait_time(fc):
if log_warning:
time_left = self.wait_time(fc) - (datetime.datetime.now() - self.failure_time)
logger.log('Failed %s times, skipping provider for %s' % (self.failure_count, time_left),
logger.WARNING)
return True
return False
def inc_failure_count(self, *args, **kwargs):
error_type = ('error_type' in kwargs and kwargs['error_type'].error_type) or \
(isinstance(args, tuple) and isinstance(args[0], ProviderError) and args[0].error_type)
if not isinstance(self.failure_time, datetime.datetime) or \
error_type != self._last_error_type or \
datetime.datetime.now() - self.failure_time > datetime.timedelta(seconds=3):
self.failure_count += 1
self.failure_time = datetime.datetime.now()
self._last_error_type = error_type
self.errors.add_error(*args, **kwargs)
else:
logger.log('%s: Not logging same error within 3 seconds' % self.name, logger.DEBUG)
def getURL(self, *args, **kwargs):
data = None
# check for auth
if not self._authorised() or self.should_skip():
return data
kwargs['raise_exceptions'] = True
kwargs['raise_status_code'] = True
try:
data = helpers.getURL(*args, **kwargs)
if data:
if 0 != self.failure_count:
logger.log('Unblocking provider: %s' % self.get_id(), logger.DEBUG)
self.failure_count = 0
self.failure_time = None
else:
self.inc_failure_count(ProviderError(error_type=ProviderErrorTypes.nodata))
except requests.exceptions.HTTPError as e:
self.inc_failure_count(ProviderError(error_type=ProviderErrorTypes.http, code=e.response.status_code))
except requests.exceptions.ConnectionError as e:
self.inc_failure_count(ProviderError(error_type=ProviderErrorTypes.connection))
except requests.exceptions.ReadTimeout as e:
self.inc_failure_count(ProviderError(error_type=ProviderErrorTypes.timeout))
except (requests.exceptions.Timeout, socket.timeout) as e:
self.inc_failure_count(ProviderError(error_type=ProviderErrorTypes.connection_timeout))
except (StandardError, Exception) as e:
self.inc_failure_count(ProviderError(error_type=ProviderErrorTypes.unknown))
self.errors.save_list()
return data
def get_id(self):
return GenericProvider.make_id(self.name)
@ -428,9 +760,13 @@ class GenericProvider:
results = {}
item_list = []
if self.should_skip():
return results
searched_scene_season = None
for ep_obj in episodes:
if self.should_skip(log_warning=False):
break
# search cache for episode result
cache_result = self.cache.searchCache(ep_obj, manual_search)
if cache_result:
@ -457,6 +793,8 @@ class GenericProvider:
for cur_param in search_params:
item_list += self._search_provider(cur_param, search_mode=search_mode, epcount=len(episodes))
if self.should_skip():
break
return self.finish_find_search_results(show, episodes, search_mode, manual_search, results, item_list)
@ -649,10 +987,11 @@ class GenericProvider:
:param count: count of successfully processed items
:param url: source url of item(s)
"""
str1, thing, str3 = (('', '%s item' % mode.lower(), ''), (' usable', 'proper', ' found'))['Propers' == mode]
logger.log(u'%s %s in response from %s' % (('No' + str1, count)[0 < count], (
'%s%s%s%s' % (('', 'freeleech ')[getattr(self, 'freeleech', False)], thing, maybe_plural(count), str3)),
re.sub('(\s)\s+', r'\1', url)))
if not self.should_skip():
str1, thing, str3 = (('', '%s item' % mode.lower(), ''), (' usable', 'proper', ' found'))['Propers' == mode]
logger.log(u'%s %s in response from %s' % (('No' + str1, count)[0 < count], (
'%s%s%s%s' % (('', 'freeleech ')[getattr(self, 'freeleech', False)], thing, maybe_plural(count), str3)),
re.sub('(\s)\s+', r'\1', url)))
def check_auth_cookie(self):
@ -723,12 +1062,13 @@ class GenericProvider:
return
class NZBProvider(object, GenericProvider):
class NZBProvider(GenericProvider):
def __init__(self, name, supports_backlog=True, anime_only=False):
GenericProvider.__init__(self, name, supports_backlog, anime_only)
self.providerType = GenericProvider.NZB
self.has_limit = True
def image_name(self):
@ -757,6 +1097,9 @@ class NZBProvider(object, GenericProvider):
results = [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
cache_results]
if self.should_skip():
return results
index = 0
alt_search = ('nzbs_org' == self.get_id())
do_search_alt = False
@ -775,6 +1118,9 @@ class NZBProvider(object, GenericProvider):
urls = []
while index < len(search_terms):
if self.should_skip(log_warning=False):
break
search_params = {'q': search_terms[index], 'maxage': sickbeard.BACKLOG_DAYS + 2}
if alt_search:
@ -817,7 +1163,7 @@ class NZBProvider(object, GenericProvider):
return self._search_provider(search_params=search_params, **kwargs)
class TorrentProvider(object, GenericProvider):
class TorrentProvider(GenericProvider):
def __init__(self, name, supports_backlog=True, anime_only=False, cache_update_freq=None, update_freq=None):
GenericProvider.__init__(self, name, supports_backlog, anime_only)
@ -1153,6 +1499,8 @@ class TorrentProvider(object, GenericProvider):
:return: list of Proper objects
"""
results = []
if self.should_skip():
return results
search_terms = getattr(self, 'proper_search_terms', ['proper', 'repack', 'real'])
if not isinstance(search_terms, list):
@ -1164,9 +1512,14 @@ class TorrentProvider(object, GenericProvider):
clean_term = re.compile(r'(?i)[^a-z1-9|.]+')
for proper_term in search_terms:
if self.should_skip(log_warning=False):
break
proper_check = re.compile(r'(?i)(?:%s)' % clean_term.sub('', proper_term))
for item in items:
if self.should_skip(log_warning=False):
break
title, url = self._title_and_url(item)
if proper_check.search(title):
results.append(classes.Proper(title, url, datetime.datetime.today(),

View file

@ -28,7 +28,7 @@ from math import ceil
from sickbeard.sbdatetime import sbdatetime
from . import generic
from sickbeard import helpers, logger, tvcache, classes, db
from sickbeard.common import neededQualities, Quality
from sickbeard.common import neededQualities, Quality, SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, DOWNLOADED
from sickbeard.exceptions import AuthException, MultipleShowObjectsException
from sickbeard.indexers.indexer_config import *
from io import BytesIO
@ -187,13 +187,13 @@ class NewznabProvider(generic.NZBProvider):
if datetime.date.today() - self._caps_need_apikey['date'] > datetime.timedelta(days=30) or \
not self._caps_need_apikey['need']:
self._caps_need_apikey['need'] = False
data = self.get_url('%s/api?t=caps' % self.url)
data = self.getURL('%s/api?t=caps' % self.url)
if data:
xml_caps = helpers.parse_xml(data)
if xml_caps is None or not hasattr(xml_caps, 'tag') or xml_caps.tag == 'error' or xml_caps.tag != 'caps':
api_key = self.maybe_apikey()
if isinstance(api_key, basestring) and api_key not in ('0', ''):
data = self.get_url('%s/api?t=caps&apikey=%s' % (self.url, api_key))
data = self.getURL('%s/api?t=caps&apikey=%s' % (self.url, api_key))
if data:
xml_caps = helpers.parse_xml(data)
if xml_caps and hasattr(xml_caps, 'tag') and xml_caps.tag == 'caps':
@ -291,6 +291,11 @@ class NewznabProvider(generic.NZBProvider):
return [x for x in cats if x['id'] not in self.excludes]
return ','.join(set(cats.split(',')) - self.excludes)
def _check_auth(self, is_required=None):
if self.should_skip():
return False
return super(NewznabProvider, self)._check_auth(is_required)
def check_auth_from_data(self, data):
if data is None or not hasattr(data, 'tag'):
@ -306,6 +311,24 @@ class NewznabProvider(generic.NZBProvider):
raise AuthException('Your account on %s has been suspended, contact the admin.' % self.name)
elif '102' == code:
raise AuthException('Your account isn\'t allowed to use the API on %s, contact the admin.' % self.name)
elif '500' == code:
self.hit_limit_time = datetime.datetime.now()
self.hit_limit_count += 1
retry_time = re.search(r'Retry in (\d+)\W+([a-z]+)', description, flags=re.I)
if retry_time:
if retry_time.group(2) in ('s', 'sec', 'secs', 'seconds', 'second'):
self.hit_limit_wait = datetime.timedelta(seconds=helpers.tryInt(retry_time.group(1)))
elif retry_time.group(2) in ('m', 'min', 'mins', 'minutes', 'minute'):
self.hit_limit_wait = datetime.timedelta(minutes=helpers.tryInt(retry_time.group(1)))
elif retry_time.group(2) in ('h', 'hr', 'hrs', 'hours', 'hour'):
self.hit_limit_wait = datetime.timedelta(hours=helpers.tryInt(retry_time.group(1)))
elif retry_time.group(2) in ('d', 'days', 'day'):
self.hit_limit_wait = datetime.timedelta(days=helpers.tryInt(retry_time.group(1)))
if not self.hit_limit_wait:
fc = self.fail_time_index(base_limit=0)
self.hit_limit_wait = self.wait_time(fc)
logger.log('Request limit reached. Waiting for %s until next retry. Message: %s' %
(self.hit_limit_wait, description), logger.WARNING)
elif '910' == code:
logger.log(
'%s %s, please check with provider.' %
@ -316,6 +339,7 @@ class NewznabProvider(generic.NZBProvider):
logger.WARNING)
return False
self.hit_limit_count = 0
return True
def config_str(self):
@ -530,15 +554,20 @@ class NewznabProvider(generic.NZBProvider):
(hits_per_page * 100 // hits_per_page * 2, hits_per_page * int(ceil(rel_limit * 1.5)))[season_search])
def find_search_results(self, show, episodes, search_mode, manual_search=False, try_other_searches=False, **kwargs):
self._check_auth()
check = self._check_auth()
results = {}
if (isinstance(check, bool) and not check) or self.should_skip():
return results
self.show = show
results = {}
item_list = []
name_space = {}
searched_scene_season = s_mode = None
for ep_obj in episodes:
if self.should_skip(log_warning=False):
break
# skip if season already searched
if (s_mode or 'sponly' == search_mode) and 1 < len(episodes) \
and searched_scene_season == ep_obj.scene_season:
@ -577,6 +606,8 @@ class NewznabProvider(generic.NZBProvider):
try_all_searches=try_other_searches)
item_list += items
name_space.update(n_space)
if self.should_skip():
break
return self.finish_find_search_results(
show, episodes, search_mode, manual_search, results, item_list, name_space=name_space)
@ -617,7 +648,13 @@ class NewznabProvider(generic.NZBProvider):
def _search_provider(self, search_params, needed=neededQualities(need_all=True), max_items=400,
try_all_searches=False, **kwargs):
results, n_spaces = [], {}
if self.should_skip():
return results, n_spaces
api_key = self._check_auth()
if isinstance(api_key, bool) and not api_key:
return results, n_spaces
base_params = {'t': 'tvsearch',
'maxage': sickbeard.USENET_RETENTION or 0,
@ -644,8 +681,13 @@ class NewznabProvider(generic.NZBProvider):
cat_webdl = self.cats.get(NewznabConstants.CAT_WEBDL)
for mode in search_params.keys():
if self.should_skip(log_warning=False):
break
for i, params in enumerate(search_params[mode]):
if self.should_skip(log_warning=False):
break
# category ids
cat = []
if 'Episode' == mode or 'Season' == mode:
@ -697,7 +739,10 @@ class NewznabProvider(generic.NZBProvider):
search_url = '%sapi?%s' % (self.url, urllib.urlencode(request_params))
i and time.sleep(2.1)
data = helpers.getURL(search_url)
data = self.getURL(search_url)
if self.should_skip():
break
if not data:
logger.log('No Data returned from %s' % self.name, logger.WARNING)
@ -794,6 +839,10 @@ class NewznabProvider(generic.NZBProvider):
results = [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
cache_results]
check = self._check_auth()
if isinstance(check, bool) and not check:
return results
index = 0
alt_search = ('nzbs_org' == self.get_id())
do_search_alt = False
@ -812,6 +861,9 @@ class NewznabProvider(generic.NZBProvider):
urls = []
while index < len(search_terms):
if self.should_skip(log_warning=False):
break
search_params = {'q': search_terms[index], 'maxage': sickbeard.BACKLOG_DAYS + 2}
if alt_search:
@ -885,8 +937,11 @@ class NewznabCache(tvcache.TVCache):
if 4489 != sickbeard.RECENTSEARCH_FREQUENCY or self.should_update():
n_spaces = {}
try:
self._checkAuth()
(items, n_spaces) = self.provider.cache_data(needed=needed)
check = self._checkAuth()
if isinstance(check, bool) and not check:
items = None
else:
(items, n_spaces) = self.provider.cache_data(needed=needed)
except (StandardError, Exception):
items = None

View file

@ -99,7 +99,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider):
def get_data(self, url):
result = None
if url and False is self._init_api():
data = self.get_url(url, timeout=90)
data = self.getURL(url, timeout=90)
if data:
if re.search('(?i)limit.*?reached', data):
logger.log('Daily Nzb Download limit reached', logger.DEBUG)
@ -138,6 +138,9 @@ class OmgwtfnzbsProvider(generic.NZBProvider):
def cache_data(self, needed=neededQualities(need_all=True), **kwargs):
if self.should_skip():
return []
api_key = self._init_api()
if False is api_key:
return self.search_html(needed=needed, **kwargs)
@ -182,7 +185,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider):
search_url = self.urls['search'] % urllib.urlencode(params)
data_json = self.get_url(search_url, json=True)
data_json = self.getURL(search_url, json=True)
if data_json and self._check_auth_from_data(data_json, is_xml=False):
for item in data_json:
if 'release' in item and 'getnzb' in item:
@ -210,7 +213,7 @@ class OmgwtfnzbsProvider(generic.NZBProvider):
'cat': 'cat=(?:%s)' % '|'.join(cats)}.items())
mode = ('search', 'cache')['' == search]
search_url = self.urls[mode + '_html'] % search
html = self.get_url(search_url)
html = self.getURL(search_url)
cnt = len(results)
try:
if not html:
@ -254,6 +257,8 @@ class OmgwtfnzbsProvider(generic.NZBProvider):
search_terms = ['.PROPER.', '.REPACK.', '.REAL.']
results = []
if self.should_skip():
return results
for term in search_terms:
for item in self._search_provider(term, search_mode='Propers', retention=4):
@ -272,6 +277,9 @@ class OmgwtfnzbsProvider(generic.NZBProvider):
def _init_api(self):
if self.should_skip():
return None
try:
api_key = self._check_auth()
if not api_key.startswith('cookie:'):

View file

@ -470,6 +470,11 @@ def search_for_needed_episodes(episodes):
found_results[cur_ep] = best_result
try:
cur_provider.save_list()
except (StandardError, Exception):
pass
threading.currentThread().name = orig_thread_name
if not len(providers):

View file

@ -4531,11 +4531,27 @@ class ManageSearches(Manage):
t.recent_search_status = sickbeard.searchQueueScheduler.action.is_recentsearch_in_progress()
t.find_propers_status = sickbeard.searchQueueScheduler.action.is_propersearch_in_progress()
t.queue_length = sickbeard.searchQueueScheduler.action.queue_length()
t.provider_error_stats = [{'name': p.name, 'prov_id': p.get_id(), 'errors': p.errors.errors_sorted,
'hit_limit_time': p.hit_limit_time, 'failure_time': p.failure_time,
'last_error': p.last_error,
'next_try': p.get_next_try_time, 'has_limit': getattr(p, 'has_limit', False)}
for p in sickbeard.providerList + sickbeard.newznabProviderList]
t.provider_errors = 0 < len([p for p in t.provider_error_stats if len(p['errors'])])
t.submenu = self.ManageMenu('Search')
return t.respond()
def retryProvider(self, provider=None, *args, **kwargs):
if not provider:
return
prov = [p for p in sickbeard.providerList + sickbeard.newznabProviderList if p.get_id() == provider]
if not prov:
return
prov[0].retry_next()
time.sleep(3)
return
def forceVersionCheck(self, *args, **kwargs):
# force a check to see if there is a new version
if sickbeard.versionCheckScheduler.action.check_for_new_version(force=True):