diff --git a/CHANGES.md b/CHANGES.md index 5522af15..c1485178 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -27,6 +27,9 @@ * Change database code to PEP8 standards * Change general config's branches and pull request list generation for faster page loading * Add PlayStation Network logo +* Change layout of Recent Search code +* Change naming of SEARCHQUEUE threads for shorter log lines +* Fix Recent Search running status on Manage Searches page [develop changelog] * Fix traceback error when using the menu item Manage/Update Kodi diff --git a/sickbeard/generic_queue.py b/sickbeard/generic_queue.py index c6380379..5d72dc1f 100644 --- a/sickbeard/generic_queue.py +++ b/sickbeard/generic_queue.py @@ -89,7 +89,8 @@ class GenericQueue(object): # launch the queue item in a thread self.currentItem = self.queue.pop(0) - self.currentItem.name = self.queue_name + '-' + self.currentItem.name + if not self.queue_name == 'SEARCHQUEUE': + self.currentItem.name = self.queue_name + '-' + self.currentItem.name self.currentItem.start() class QueueItem(threading.Thread): diff --git a/sickbeard/search.py b/sickbeard/search.py index 30174d3d..19a2e2c8 100644 --- a/sickbeard/search.py +++ b/sickbeard/search.py @@ -314,7 +314,6 @@ def wantedEpisodes(show, fromDate): anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable allQualities = list(set(anyQualities + bestQualities)) - logger.log(u"Seeing if we need anything from " + show.name) myDB = db.DBConnection() if show.air_by_date: @@ -345,38 +344,18 @@ def wantedEpisodes(show, fromDate): epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)] wanted.append(epObj) + logger.log(u'We want %d episode(s) of %s' % (len(wanted), show.name)) + return wanted -def searchForNeededEpisodes(): +def searchForNeededEpisodes(episodes): foundResults = {} didSearch = False origThreadName = threading.currentThread().name - threads = [] - - show_list = sickbeard.showList - fromDate = datetime.date.fromordinal(1) - episodes = [] - - for curShow in show_list: - if curShow.paused: - continue - - episodes.extend(wantedEpisodes(curShow, fromDate)) providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive() and x.enable_recentsearch] - for curProvider in providers: - - # spawn separate threads for each provider so we don't need to wait for providers with slow network operation - threads.append(threading.Thread(target=curProvider.cache.updateCache, name=origThreadName + - " :: [" + curProvider.name + "]")) - # start the thread we just created - threads[-1].start() - - # wait for all threads to finish - for t in threads: - t.join() for curProvider in providers: threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]" diff --git a/sickbeard/searchRecent.py b/sickbeard/searchRecent.py index eb05fe3a..38e32210 100644 --- a/sickbeard/searchRecent.py +++ b/sickbeard/searchRecent.py @@ -18,18 +18,9 @@ from __future__ import with_statement -import datetime import threading -import traceback import sickbeard -from sickbeard import logger -from sickbeard import db -from sickbeard import common -from sickbeard import helpers -from sickbeard import exceptions -from sickbeard import network_timezones -from sickbeard.exceptions import ex class RecentSearcher(): @@ -41,63 +32,6 @@ class RecentSearcher(): self.amActive = True - logger.log(u"Searching for new released episodes ...") - - if not network_timezones.network_dict: - network_timezones.update_network_dict() - - if network_timezones.network_dict: - curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal() - else: - curDate = (datetime.date.today() - datetime.timedelta(days=2)).toordinal() - - curTime = datetime.datetime.now(network_timezones.sb_timezone) - - myDB = db.DBConnection() - sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND airdate <= ?", - [common.UNAIRED, curDate]) - - sql_l = [] - show = None - - for sqlEp in sqlResults: - try: - if not show or int(sqlEp["showid"]) != show.indexerid: - show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"])) - - # for when there is orphaned series in the database but not loaded into our showlist - if not show: - continue - - except exceptions.MultipleShowObjectsException: - logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid'])) - continue - - try: - end_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network) + datetime.timedelta(minutes=helpers.tryInt(show.runtime, 60)) - # filter out any episodes that haven't aried yet - if end_time > curTime: - continue - except: - # if an error occured assume the episode hasn't aired yet - continue - - ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"])) - with ep.lock: - if ep.show.paused: - ep.status = common.SKIPPED - else: - ep.status = common.WANTED - - sql_l.append(ep.get_sql()) - else: - logger.log(u"No new released episodes found ...") - - if len(sql_l) > 0: - myDB = db.DBConnection() - myDB.mass_action(sql_l) - - # queue episode for recent search recentsearch_queue_item = sickbeard.search_queue.RecentSearchQueueItem() sickbeard.searchQueueScheduler.action.add_item(recentsearch_queue_item) diff --git a/sickbeard/search_queue.py b/sickbeard/search_queue.py index 242dc544..d989d658 100644 --- a/sickbeard/search_queue.py +++ b/sickbeard/search_queue.py @@ -21,14 +21,13 @@ from __future__ import with_statement import time import traceback import threading +import datetime import sickbeard -from sickbeard import db, logger, common, exceptions, helpers -from sickbeard import generic_queue, scheduler -from sickbeard import search, failed_history, history -from sickbeard import ui -from sickbeard.exceptions import ex -from sickbeard.search import pickBestResult +from sickbeard import db, logger, common, exceptions, helpers, network_timezones, generic_queue, search, \ + failed_history, history, ui +from sickbeard.search import wantedEpisodes + search_queue_lock = threading.Lock() @@ -128,38 +127,132 @@ class SearchQueue(generic_queue.GenericQueue): else: logger.log(u"Not adding item, it's already in the queue", logger.DEBUG) + class RecentSearchQueueItem(generic_queue.QueueItem): def __init__(self): self.success = None + self.episodes = [] generic_queue.QueueItem.__init__(self, 'Recent Search', RECENT_SEARCH) def run(self): generic_queue.QueueItem.run(self) - try: - logger.log("Beginning recent search for new episodes") - foundResults = search.searchForNeededEpisodes() + self._change_missing_episodes() - if not len(foundResults): - logger.log(u"No needed episodes found") - else: - for result in foundResults: - # just use the first result for now - logger.log(u"Downloading " + result.name + " from " + result.provider.name) - self.success = search.snatchEpisode(result) + self.update_providers() - # give the CPU a break - time.sleep(common.cpu_presets[sickbeard.CPU_PRESET]) + show_list = sickbeard.showList + fromDate = datetime.date.fromordinal(1) + for curShow in show_list: + if curShow.paused: + continue - generic_queue.QueueItem.finish(self) - except Exception: - logger.log(traceback.format_exc(), logger.DEBUG) + self.episodes.extend(wantedEpisodes(curShow, fromDate)) - if self.success is None: - self.success = False + if not self.episodes: + logger.log(u'No search of cache for episodes required') + self.success = True + else: + logger.log(u'Found a total of %d episode(s) requiring searching' % len(self.episodes)) + + try: + logger.log(u'Beginning recent search for episodes') + foundResults = search.searchForNeededEpisodes(self.episodes) + + if not len(foundResults): + logger.log(u'No needed episodes found') + else: + for result in foundResults: + # just use the first result for now + logger.log(u'Downloading ' + result.name + ' from ' + result.provider.name) + self.success = search.snatchEpisode(result) + + # give the CPU a break + time.sleep(common.cpu_presets[sickbeard.CPU_PRESET]) + + except Exception: + logger.log(traceback.format_exc(), logger.DEBUG) + + if self.success is None: + self.success = False self.finish() + @staticmethod + def _change_missing_episodes(): + if not network_timezones.network_dict: + network_timezones.update_network_dict() + + if network_timezones.network_dict: + curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal() + else: + curDate = (datetime.date.today() - datetime.timedelta(days=2)).toordinal() + + curTime = datetime.datetime.now(network_timezones.sb_timezone) + + myDB = db.DBConnection() + sqlResults = myDB.select('SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND airdate <= ?', + [common.UNAIRED, curDate]) + + sql_l = [] + show = None + + for sqlEp in sqlResults: + try: + if not show or int(sqlEp['showid']) != show.indexerid: + show = helpers.findCertainShow(sickbeard.showList, int(sqlEp['showid'])) + + # for when there is orphaned series in the database but not loaded into our showlist + if not show: + continue + + except exceptions.MultipleShowObjectsException: + logger.log(u'ERROR: expected to find a single show matching ' + str(sqlEp['showid'])) + continue + + try: + end_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network) + datetime.timedelta(minutes=helpers.tryInt(show.runtime, 60)) + # filter out any episodes that haven't aried yet + if end_time > curTime: + continue + except: + # if an error occured assume the episode hasn't aired yet + continue + + ep = show.getEpisode(int(sqlEp['season']), int(sqlEp['episode'])) + with ep.lock: + if ep.show.paused: + ep.status = common.SKIPPED + else: + ep.status = common.WANTED + + sql_l.append(ep.get_sql()) + else: + logger.log(u'No new released episodes found ...') + + if len(sql_l) > 0: + myDB = db.DBConnection() + myDB.mass_action(sql_l) + + @staticmethod + def update_providers(): + origThreadName = threading.currentThread().name + threads = [] + + logger.log('Updating provider caches with recent upload data') + + providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive() and x.enable_recentsearch] + for curProvider in providers: + # spawn separate threads for each provider so we don't need to wait for providers with slow network operation + threads.append(threading.Thread(target=curProvider.cache.updateCache, name=origThreadName + + ' :: [' + curProvider.name + ']')) + # start the thread we just created + threads[-1].start() + + # wait for all threads to finish + for t in threads: + t.join() + class ManualSearchQueueItem(generic_queue.QueueItem): def __init__(self, show, segment): diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py index 7d3da4e6..8d7f428e 100644 --- a/sickbeard/webserve.py +++ b/sickbeard/webserve.py @@ -3126,10 +3126,10 @@ class ManageSearches(Manage): def index(self, *args, **kwargs): t = PageTemplate(headers=self.request.headers, file='manage_manageSearches.tmpl') # t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator() - t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() # @UndefinedVariable - t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() # @UndefinedVariable - t.recentSearchStatus = sickbeard.recentSearchScheduler.action.amActive # @UndefinedVariable - t.findPropersStatus = sickbeard.properFinderScheduler.action.amActive # @UndefinedVariable + t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() + t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() + t.recentSearchStatus = sickbeard.searchQueueScheduler.action.is_recentsearch_in_progress() + t.findPropersStatus = sickbeard.properFinderScheduler.action.amActive t.queueLength = sickbeard.searchQueueScheduler.action.queue_length() t.submenu = self.ManageMenu()