Change layout of Recent Search code

Change naming of SEARCHQUEUE threads for shorter log lines
Fix Recent Search running status on Manage Searches page
This commit is contained in:
adam 2015-03-11 20:31:57 +08:00
parent 722ffbe2b3
commit d1687dd401
6 changed files with 128 additions and 118 deletions

View file

@ -27,6 +27,9 @@
* Change database code to PEP8 standards * Change database code to PEP8 standards
* Change general config's branches and pull request list generation for faster page loading * Change general config's branches and pull request list generation for faster page loading
* Add PlayStation Network logo * Add PlayStation Network logo
* Change layout of Recent Search code
* Change naming of SEARCHQUEUE threads for shorter log lines
* Fix Recent Search running status on Manage Searches page
[develop changelog] [develop changelog]
* Fix traceback error when using the menu item Manage/Update Kodi * Fix traceback error when using the menu item Manage/Update Kodi

View file

@ -89,7 +89,8 @@ class GenericQueue(object):
# launch the queue item in a thread # launch the queue item in a thread
self.currentItem = self.queue.pop(0) self.currentItem = self.queue.pop(0)
self.currentItem.name = self.queue_name + '-' + self.currentItem.name if not self.queue_name == 'SEARCHQUEUE':
self.currentItem.name = self.queue_name + '-' + self.currentItem.name
self.currentItem.start() self.currentItem.start()
class QueueItem(threading.Thread): class QueueItem(threading.Thread):

View file

@ -314,7 +314,6 @@ def wantedEpisodes(show, fromDate):
anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # @UnusedVariable
allQualities = list(set(anyQualities + bestQualities)) allQualities = list(set(anyQualities + bestQualities))
logger.log(u"Seeing if we need anything from " + show.name)
myDB = db.DBConnection() myDB = db.DBConnection()
if show.air_by_date: if show.air_by_date:
@ -345,38 +344,18 @@ def wantedEpisodes(show, fromDate):
epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)] epObj.wantedQuality = [i for i in allQualities if (i > curQuality and i != common.Quality.UNKNOWN)]
wanted.append(epObj) wanted.append(epObj)
logger.log(u'We want %d episode(s) of %s' % (len(wanted), show.name))
return wanted return wanted
def searchForNeededEpisodes(): def searchForNeededEpisodes(episodes):
foundResults = {} foundResults = {}
didSearch = False didSearch = False
origThreadName = threading.currentThread().name origThreadName = threading.currentThread().name
threads = []
show_list = sickbeard.showList
fromDate = datetime.date.fromordinal(1)
episodes = []
for curShow in show_list:
if curShow.paused:
continue
episodes.extend(wantedEpisodes(curShow, fromDate))
providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive() and x.enable_recentsearch] providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive() and x.enable_recentsearch]
for curProvider in providers:
# spawn separate threads for each provider so we don't need to wait for providers with slow network operation
threads.append(threading.Thread(target=curProvider.cache.updateCache, name=origThreadName +
" :: [" + curProvider.name + "]"))
# start the thread we just created
threads[-1].start()
# wait for all threads to finish
for t in threads:
t.join()
for curProvider in providers: for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]" threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"

View file

@ -18,18 +18,9 @@
from __future__ import with_statement from __future__ import with_statement
import datetime
import threading import threading
import traceback
import sickbeard import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard import common
from sickbeard import helpers
from sickbeard import exceptions
from sickbeard import network_timezones
from sickbeard.exceptions import ex
class RecentSearcher(): class RecentSearcher():
@ -41,63 +32,6 @@ class RecentSearcher():
self.amActive = True self.amActive = True
logger.log(u"Searching for new released episodes ...")
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() - datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND airdate <= ?",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show:
continue
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
try:
end_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network) + datetime.timedelta(minutes=helpers.tryInt(show.runtime, 60))
# filter out any episodes that haven't aried yet
if end_time > curTime:
continue
except:
# if an error occured assume the episode hasn't aired yet
continue
ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
with ep.lock:
if ep.show.paused:
ep.status = common.SKIPPED
else:
ep.status = common.WANTED
sql_l.append(ep.get_sql())
else:
logger.log(u"No new released episodes found ...")
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
# queue episode for recent search
recentsearch_queue_item = sickbeard.search_queue.RecentSearchQueueItem() recentsearch_queue_item = sickbeard.search_queue.RecentSearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(recentsearch_queue_item) sickbeard.searchQueueScheduler.action.add_item(recentsearch_queue_item)

View file

@ -21,14 +21,13 @@ from __future__ import with_statement
import time import time
import traceback import traceback
import threading import threading
import datetime
import sickbeard import sickbeard
from sickbeard import db, logger, common, exceptions, helpers from sickbeard import db, logger, common, exceptions, helpers, network_timezones, generic_queue, search, \
from sickbeard import generic_queue, scheduler failed_history, history, ui
from sickbeard import search, failed_history, history from sickbeard.search import wantedEpisodes
from sickbeard import ui
from sickbeard.exceptions import ex
from sickbeard.search import pickBestResult
search_queue_lock = threading.Lock() search_queue_lock = threading.Lock()
@ -128,38 +127,132 @@ class SearchQueue(generic_queue.GenericQueue):
else: else:
logger.log(u"Not adding item, it's already in the queue", logger.DEBUG) logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
class RecentSearchQueueItem(generic_queue.QueueItem): class RecentSearchQueueItem(generic_queue.QueueItem):
def __init__(self): def __init__(self):
self.success = None self.success = None
self.episodes = []
generic_queue.QueueItem.__init__(self, 'Recent Search', RECENT_SEARCH) generic_queue.QueueItem.__init__(self, 'Recent Search', RECENT_SEARCH)
def run(self): def run(self):
generic_queue.QueueItem.run(self) generic_queue.QueueItem.run(self)
try: self._change_missing_episodes()
logger.log("Beginning recent search for new episodes")
foundResults = search.searchForNeededEpisodes()
if not len(foundResults): self.update_providers()
logger.log(u"No needed episodes found")
else:
for result in foundResults:
# just use the first result for now
logger.log(u"Downloading " + result.name + " from " + result.provider.name)
self.success = search.snatchEpisode(result)
# give the CPU a break show_list = sickbeard.showList
time.sleep(common.cpu_presets[sickbeard.CPU_PRESET]) fromDate = datetime.date.fromordinal(1)
for curShow in show_list:
if curShow.paused:
continue
generic_queue.QueueItem.finish(self) self.episodes.extend(wantedEpisodes(curShow, fromDate))
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
if self.success is None: if not self.episodes:
self.success = False logger.log(u'No search of cache for episodes required')
self.success = True
else:
logger.log(u'Found a total of %d episode(s) requiring searching' % len(self.episodes))
try:
logger.log(u'Beginning recent search for episodes')
foundResults = search.searchForNeededEpisodes(self.episodes)
if not len(foundResults):
logger.log(u'No needed episodes found')
else:
for result in foundResults:
# just use the first result for now
logger.log(u'Downloading ' + result.name + ' from ' + result.provider.name)
self.success = search.snatchEpisode(result)
# give the CPU a break
time.sleep(common.cpu_presets[sickbeard.CPU_PRESET])
except Exception:
logger.log(traceback.format_exc(), logger.DEBUG)
if self.success is None:
self.success = False
self.finish() self.finish()
@staticmethod
def _change_missing_episodes():
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() - datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select('SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND airdate <= ?',
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp['showid']) != show.indexerid:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp['showid']))
# for when there is orphaned series in the database but not loaded into our showlist
if not show:
continue
except exceptions.MultipleShowObjectsException:
logger.log(u'ERROR: expected to find a single show matching ' + str(sqlEp['showid']))
continue
try:
end_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network) + datetime.timedelta(minutes=helpers.tryInt(show.runtime, 60))
# filter out any episodes that haven't aried yet
if end_time > curTime:
continue
except:
# if an error occured assume the episode hasn't aired yet
continue
ep = show.getEpisode(int(sqlEp['season']), int(sqlEp['episode']))
with ep.lock:
if ep.show.paused:
ep.status = common.SKIPPED
else:
ep.status = common.WANTED
sql_l.append(ep.get_sql())
else:
logger.log(u'No new released episodes found ...')
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
@staticmethod
def update_providers():
origThreadName = threading.currentThread().name
threads = []
logger.log('Updating provider caches with recent upload data')
providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive() and x.enable_recentsearch]
for curProvider in providers:
# spawn separate threads for each provider so we don't need to wait for providers with slow network operation
threads.append(threading.Thread(target=curProvider.cache.updateCache, name=origThreadName +
' :: [' + curProvider.name + ']'))
# start the thread we just created
threads[-1].start()
# wait for all threads to finish
for t in threads:
t.join()
class ManualSearchQueueItem(generic_queue.QueueItem): class ManualSearchQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment): def __init__(self, show, segment):

View file

@ -3126,10 +3126,10 @@ class ManageSearches(Manage):
def index(self, *args, **kwargs): def index(self, *args, **kwargs):
t = PageTemplate(headers=self.request.headers, file='manage_manageSearches.tmpl') t = PageTemplate(headers=self.request.headers, file='manage_manageSearches.tmpl')
# t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator() # t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() # @UndefinedVariable t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused()
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() # @UndefinedVariable t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress()
t.recentSearchStatus = sickbeard.recentSearchScheduler.action.amActive # @UndefinedVariable t.recentSearchStatus = sickbeard.searchQueueScheduler.action.is_recentsearch_in_progress()
t.findPropersStatus = sickbeard.properFinderScheduler.action.amActive # @UndefinedVariable t.findPropersStatus = sickbeard.properFinderScheduler.action.amActive
t.queueLength = sickbeard.searchQueueScheduler.action.queue_length() t.queueLength = sickbeard.searchQueueScheduler.action.queue_length()
t.submenu = self.ManageMenu() t.submenu = self.ManageMenu()