Merge pull request #944 from JackDandy/feature/ChangeShowUpdate

Change catch show update task errors.
This commit is contained in:
JackDandy 2017-06-15 01:55:08 +01:00 committed by GitHub
commit 925c286ac2
12 changed files with 69 additions and 29 deletions

View file

@ -59,6 +59,7 @@
* Change cosmetic title on shutdown * Change cosmetic title on shutdown
* Change use TVDb API v2 * Change use TVDb API v2
* Change improve search for PROPERS * Change improve search for PROPERS
* Change catch show update task errors
[develop changelog] [develop changelog]

View file

@ -264,7 +264,7 @@ class Quality:
logger.log(msg % (filename, e.text), logger.WARNING) logger.log(msg % (filename, e.text), logger.WARNING)
except Exception as e: except Exception as e:
logger.log(msg % (filename, ex(e)), logger.ERROR) logger.log(msg % (filename, ex(e)), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
if parser: if parser:
if '.avi' == filename[-4::].lower(): if '.avi' == filename[-4::].lower():

View file

@ -91,7 +91,7 @@ def _get_proper_list(aired_since_shows, recent_shows, recent_anime):
continue continue
except Exception as e: except Exception as e:
logger.log(u'Error while searching ' + cur_provider.name + ', skipping: ' + ex(e), logger.ERROR) logger.log(u'Error while searching ' + cur_provider.name + ', skipping: ' + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
continue continue
finally: finally:
threading.currentThread().name = orig_thread_name threading.currentThread().name = orig_thread_name

View file

@ -21,6 +21,7 @@ import time
import threading import threading
import datetime import datetime
import sickbeard import sickbeard
import traceback
from collections import defaultdict from collections import defaultdict
from lib import adba from lib import adba
@ -210,7 +211,12 @@ def retrieve_exceptions():
continue continue
for cur_exception_dict in exception_dict[cur_indexer_id]: for cur_exception_dict in exception_dict[cur_indexer_id]:
cur_exception, cur_season = cur_exception_dict.items()[0] try:
cur_exception, cur_season = cur_exception_dict.items()[0]
except Exception:
logger.log('scene exception error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
continue
# if this exception isn't already in the DB then add it # if this exception isn't already in the DB then add it
if cur_exception not in existing_exceptions: if cur_exception not in existing_exceptions:

View file

@ -529,7 +529,7 @@ def xem_refresh(indexer_id, indexer, force=False):
logger.log( logger.log(
u'Exception while refreshing XEM data for show ' + str(indexer_id) + ' on ' + sickbeard.indexerApi( u'Exception while refreshing XEM data for show ' + str(indexer_id) + ' on ' + sickbeard.indexerApi(
indexer).name + ': ' + ex(e), logger.WARNING) indexer).name + ': ' + ex(e), logger.WARNING)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
def fix_xem_numbering(indexer_id, indexer): def fix_xem_numbering(indexer_id, indexer):

View file

@ -90,7 +90,7 @@ class Scheduler(threading.Thread):
self.action.run() self.action.run()
except Exception as e: except Exception as e:
logger.log(u"Exception generated in thread " + self.name + ": " + ex(e), logger.ERROR) logger.log(u"Exception generated in thread " + self.name + ": " + ex(e), logger.ERROR)
logger.log(repr(traceback.format_exc()), logger.DEBUG) logger.log(repr(traceback.format_exc()), logger.ERROR)
finally: finally:
if self.force: if self.force:

View file

@ -530,7 +530,7 @@ def search_providers(show, episodes, manual_search=False, torrent_only=False, tr
break break
except Exception as e: except Exception as e:
logger.log(u'Error while searching %s, skipping: %s' % (cur_provider.name, ex(e)), logger.ERROR) logger.log(u'Error while searching %s, skipping: %s' % (cur_provider.name, ex(e)), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
break break
finally: finally:
threading.currentThread().name = orig_thread_name threading.currentThread().name = orig_thread_name

View file

@ -249,7 +249,7 @@ class RecentSearchQueueItem(generic_queue.QueueItem):
helpers.cpu_sleep() helpers.cpu_sleep()
except (StandardError, Exception): except (StandardError, Exception):
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
if None is self.success: if None is self.success:
self.success = False self.success = False
@ -398,7 +398,7 @@ class ManualSearchQueueItem(generic_queue.QueueItem):
logger.log(u'Unable to find a download for: [%s]' % self.segment.prettyName()) logger.log(u'Unable to find a download for: [%s]' % self.segment.prettyName())
except (StandardError, Exception): except (StandardError, Exception):
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
finally: finally:
# Keep a list with the 100 last executed searches # Keep a list with the 100 last executed searches
@ -445,7 +445,7 @@ class BacklogQueueItem(generic_queue.QueueItem):
logger.log(u'No needed episodes found during backlog search for: [%s]' % self.show.name) logger.log(u'No needed episodes found during backlog search for: [%s]' % self.show.name)
except (StandardError, Exception): except (StandardError, Exception):
is_error = True is_error = True
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
finally: finally:
logger.log('Completed backlog search %sfor: [%s]' % (('', 'with a debug error ')[is_error], self.show.name)) logger.log('Completed backlog search %sfor: [%s]' % (('', 'with a debug error ')[is_error], self.show.name))
@ -496,7 +496,7 @@ class FailedQueueItem(generic_queue.QueueItem):
pass pass
# logger.log(u'No valid episode found to retry for: [%s]' % self.segment.prettyName()) # logger.log(u'No valid episode found to retry for: [%s]' % self.segment.prettyName())
except (StandardError, Exception): except (StandardError, Exception):
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
finally: finally:
# Keep a list with the 100 last executed searches # Keep a list with the 100 last executed searches

View file

@ -369,7 +369,7 @@ class QueueItemAdd(ShowQueueItem):
except Exception as e: except Exception as e:
logger.log('Error trying to add show: %s' % ex(e), logger.ERROR) logger.log('Error trying to add show: %s' % ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
self._finishEarly() self._finishEarly()
raise raise
@ -379,7 +379,7 @@ class QueueItemAdd(ShowQueueItem):
self.show.saveToDB() self.show.saveToDB()
except Exception as e: except Exception as e:
logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR) logger.log('Error saving the show to the database: %s' % ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
self._finishEarly() self._finishEarly()
raise raise
@ -392,13 +392,13 @@ class QueueItemAdd(ShowQueueItem):
logger.log( logger.log(
'Error with %s, not creating episode list: %s' % (sickbeard.indexerApi(self.show.indexer).name, ex(e)), 'Error with %s, not creating episode list: %s' % (sickbeard.indexerApi(self.show.indexer).name, ex(e)),
logger.ERROR) logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
try: try:
self.show.loadEpisodesFromDir() self.show.loadEpisodesFromDir()
except Exception as e: except Exception as e:
logger.log('Error searching directory for episodes: %s' % ex(e), logger.ERROR) logger.log('Error searching directory for episodes: %s' % ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
# if they gave a custom status then change all the eps to it # if they gave a custom status then change all the eps to it
my_db = db.DBConnection() my_db = db.DBConnection()
@ -616,7 +616,7 @@ class QueueItemUpdate(ShowQueueItem):
self.show.saveToDB() self.show.saveToDB()
except Exception as e: except Exception as e:
logger.log('Error saving the episode to the database: %s' % ex(e), logger.ERROR) logger.log('Error saving the episode to the database: %s' % ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
# get episode list from DB # get episode list from DB
logger.log('Loading all episodes from the database', logger.DEBUG) logger.log('Loading all episodes from the database', logger.DEBUG)

View file

@ -17,6 +17,7 @@
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import datetime import datetime
import traceback
import sickbeard import sickbeard
from sickbeard import logger, exceptions, ui, db, network_timezones, failed_history from sickbeard import logger, exceptions, ui, db, network_timezones, failed_history
@ -36,36 +37,68 @@ class ShowUpdater:
update_date = update_datetime.date() update_date = update_datetime.date()
# refresh network timezones # refresh network timezones
network_timezones.update_network_dict() try:
network_timezones.update_network_dict()
except Exception:
logger.log('network timezone update error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
# update xem id lists # update xem id lists
sickbeard.scene_exceptions.get_xem_ids() try:
sickbeard.scene_exceptions.get_xem_ids()
except Exception:
logger.log('xem id list update error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
# update scene exceptions # update scene exceptions
sickbeard.scene_exceptions.retrieve_exceptions() try:
sickbeard.scene_exceptions.retrieve_exceptions()
except Exception:
logger.log('scene exceptions update error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
# sure, why not? # sure, why not?
if sickbeard.USE_FAILED_DOWNLOADS: if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.remove_old_history() try:
failed_history.remove_old_history()
except Exception:
logger.log('Failed History cleanup error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
# clear the data of unused providers # clear the data of unused providers
sickbeard.helpers.clear_unused_providers() try:
sickbeard.helpers.clear_unused_providers()
except Exception:
logger.log('unused provider cleanup error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
# cleanup image cache # cleanup image cache
sickbeard.helpers.cleanup_cache() try:
sickbeard.helpers.cleanup_cache()
except Exception:
logger.log('image cache cleanup error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
# add missing mapped ids # add missing mapped ids
if not sickbeard.background_mapping_task.is_alive(): if not sickbeard.background_mapping_task.is_alive():
logger.log(u'Updating the Indexer mappings') logger.log(u'Updating the Indexer mappings')
import threading import threading
sickbeard.background_mapping_task = threading.Thread( try:
name='LOAD-MAPPINGS', target=sickbeard.indexermapper.load_mapped_ids, kwargs={'update': True}) sickbeard.background_mapping_task = threading.Thread(
sickbeard.background_mapping_task.start() name='LOAD-MAPPINGS', target=sickbeard.indexermapper.load_mapped_ids, kwargs={'update': True})
sickbeard.background_mapping_task.start()
except Exception:
logger.log('missing mapped ids update error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
logger.log(u'Doing full update on all shows') logger.log(u'Doing full update on all shows')
# clean out cache directory, remove everything > 12 hours old # clean out cache directory, remove everything > 12 hours old
sickbeard.helpers.clearCache() try:
sickbeard.helpers.clearCache()
except Exception:
logger.log('cache dir cleanup error', logger.ERROR)
logger.log(traceback.format_exc(), logger.ERROR)
# select 10 'Ended' tv_shows updated more than 90 days ago # select 10 'Ended' tv_shows updated more than 90 days ago
# and all shows not updated more then 180 days ago to include in this update # and all shows not updated more then 180 days ago to include in this update

View file

@ -471,7 +471,7 @@ class TVShow(object):
curEpisode.refreshSubtitles() curEpisode.refreshSubtitles()
except: except:
logger.log('%s: Could not refresh subtitles' % self.indexerid, logger.ERROR) logger.log('%s: Could not refresh subtitles' % self.indexerid, logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG) logger.log(traceback.format_exc(), logger.ERROR)
result = curEpisode.get_sql() result = curEpisode.get_sql()
if None is not result: if None is not result:
@ -953,7 +953,7 @@ class TVShow(object):
logger.log('Something is wrong with IMDb api: %s' % ex(e), logger.WARNING) logger.log('Something is wrong with IMDb api: %s' % ex(e), logger.WARNING)
except Exception as e: except Exception as e:
logger.log('Error loading IMDb info: %s' % ex(e), logger.ERROR) logger.log('Error loading IMDb info: %s' % ex(e), logger.ERROR)
logger.log('%s' % traceback.format_exc(), logger.DEBUG) logger.log('%s' % traceback.format_exc(), logger.ERROR)
def _get_imdb_info(self): def _get_imdb_info(self):
@ -1203,7 +1203,7 @@ class TVShow(object):
episode = self.makeEpFromFile(episodeLoc['location']) episode = self.makeEpFromFile(episodeLoc['location'])
subtitles = episode.downloadSubtitles(force=force) subtitles = episode.downloadSubtitles(force=force)
except Exception as e: except Exception as e:
logger.log('Error occurred when downloading subtitles: %s' % traceback.format_exc(), logger.DEBUG) logger.log('Error occurred when downloading subtitles: %s' % traceback.format_exc(), logger.ERROR)
return return
def switchIndexer(self, old_indexer, old_indexerid, pausestatus_after=None): def switchIndexer(self, old_indexer, old_indexerid, pausestatus_after=None):

View file

@ -144,7 +144,7 @@ class Api(webserve.BaseHandler):
out = '%s(%s);' % (callback, out) # wrap with JSONP call if requested out = '%s(%s);' % (callback, out) # wrap with JSONP call if requested
except Exception as e: # if we fail to generate the output fake an error except Exception as e: # if we fail to generate the output fake an error
logger.log(u'API :: ' + traceback.format_exc(), logger.DEBUG) logger.log(u'API :: ' + traceback.format_exc(), logger.ERROR)
out = '{"result":"' + result_type_map[RESULT_ERROR] + '", "message": "error while composing output: "' + ex( out = '{"result":"' + result_type_map[RESULT_ERROR] + '", "message": "error while composing output: "' + ex(
e) + '"}' e) + '"}'