Merge branch 'feature/ChangeSchedulers' into dev

This commit is contained in:
JackDandy 2023-04-27 12:49:50 +01:00
commit 1579df04f1
29 changed files with 929 additions and 796 deletions

View file

@ -10,6 +10,8 @@
* Update SimpleJSON 3.18.1 (c891b95) to 3.19.1 (aeb63ee) * Update SimpleJSON 3.18.1 (c891b95) to 3.19.1 (aeb63ee)
* Update Tornado Web Server 6.3.0 (7186b86) to 6.3.1 (419838b) * Update Tornado Web Server 6.3.0 (7186b86) to 6.3.1 (419838b)
* Update urllib3 1.26.14 (a06c05c) to 1.26.15 (25cca389) * Update urllib3 1.26.14 (a06c05c) to 1.26.15 (25cca389)
* Change add jobs to centralise scheduler activities
* Change refactor scene_exceptions
### 3.28.0 (2023-04-12 13:05:00 UTC) ### 3.28.0 (2023-04-12 13:05:00 UTC)

View file

@ -555,9 +555,9 @@ class SickGear(object):
name_cache.build_name_cache() name_cache.build_name_cache()
# load all ids from xem # load all ids from xem
sickgear.classes.loading_msg.message = 'Loading xem data' # sickgear.classes.loading_msg.message = 'Loading xem data'
startup_background_tasks = threading.Thread(name='XEMUPDATER', target=sickgear.scene_exceptions.get_xem_ids) # startup_background_tasks = threading.Thread(name='XEMUPDATER', target=sickgear.scene_exceptions.ReleaseMap().fetch_xem_ids)
startup_background_tasks.start() # startup_background_tasks.start()
sickgear.classes.loading_msg.message = 'Checking history' sickgear.classes.loading_msg.message = 'Checking history'
# check history snatched_proper update # check history snatched_proper update
@ -624,7 +624,7 @@ class SickGear(object):
if not switching and (self.force_update or sickgear.UPDATE_SHOWS_ON_START): if not switching and (self.force_update or sickgear.UPDATE_SHOWS_ON_START):
sickgear.classes.loading_msg.message = 'Starting a forced show update' sickgear.classes.loading_msg.message = 'Starting a forced show update'
background_start_forced_show_update = threading.Thread(name='STARTUP-FORCE-SHOW-UPDATE', background_start_forced_show_update = threading.Thread(name='STARTUP-FORCE-SHOW-UPDATE',
target=sickgear.show_update_scheduler.action.run) target=sickgear.update_show_scheduler.action.run)
background_start_forced_show_update.start() background_start_forced_show_update.start()
sickgear.classes.loading_msg.message = 'Switching to default web server' sickgear.classes.loading_msg.message = 'Switching to default web server'

View file

@ -37,7 +37,7 @@ import zlib
from . import classes, db, helpers, image_cache, indexermapper, logger, metadata, naming, people_queue, providers, \ from . import classes, db, helpers, image_cache, indexermapper, logger, metadata, naming, people_queue, providers, \
scene_exceptions, scene_numbering, scheduler, search_backlog, search_propers, search_queue, search_recent, \ scene_exceptions, scene_numbering, scheduler, search_backlog, search_propers, search_queue, search_recent, \
show_queue, show_updater, subtitles, trakt_helpers, version_checker, watchedstate_queue show_queue, show_updater, subtitles, trakt_helpers, version_checker, watchedstate_queue
from . import auto_post_processer, properFinder # must come after the above imports from . import auto_media_process, properFinder # must come after the above imports
from .common import SD, SKIPPED, USER_AGENT from .common import SD, SKIPPED, USER_AGENT
from .config import check_section, check_setting_int, check_setting_str, ConfigMigrator, minimax from .config import check_section, check_setting_int, check_setting_str, ConfigMigrator, minimax
from .databases import cache_db, failed_db, mainDB from .databases import cache_db, failed_db, mainDB
@ -61,7 +61,7 @@ import sg_helpers
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
from typing import AnyStr, Dict, List from typing import AnyStr, Dict, List, Optional
from adba import Connection from adba import Connection
from .event_queue import Events from .event_queue import Events
from .tv import TVShow from .tv import TVShow
@ -88,23 +88,25 @@ DATA_DIR = ''
# noinspection PyTypeChecker # noinspection PyTypeChecker
events = None # type: Events events = None # type: Events
recent_search_scheduler = None show_queue_scheduler = None # type: Optional[scheduler.Scheduler]
backlog_search_scheduler = None search_queue_scheduler = None # type: Optional[scheduler.Scheduler]
show_update_scheduler = None people_queue_scheduler = None # type: Optional[scheduler.Scheduler]
people_queue_scheduler = None watched_state_queue_scheduler = None # type: Optional[scheduler.Scheduler]
update_software_scheduler = None update_software_scheduler = None # type: Optional[scheduler.Scheduler]
update_packages_scheduler = None update_packages_scheduler = None # type: Optional[scheduler.Scheduler]
show_queue_scheduler = None update_show_scheduler = None # type: Optional[scheduler.Scheduler]
search_queue_scheduler = None update_release_mappings_scheduler = None # type: Optional[scheduler.Scheduler]
proper_finder_scheduler = None search_recent_scheduler = None # type: Optional[scheduler.Scheduler]
media_process_scheduler = None search_backlog_scheduler = None # type: Optional[search_backlog.BacklogSearchScheduler]
subtitles_finder_scheduler = None search_propers_scheduler = None # type: Optional[scheduler.Scheduler]
# trakt_checker_scheduler = None search_subtitles_scheduler = None # type: Optional[scheduler.Scheduler]
emby_watched_state_scheduler = None emby_watched_state_scheduler = None # type: Optional[scheduler.Scheduler]
plex_watched_state_scheduler = None plex_watched_state_scheduler = None # type: Optional[scheduler.Scheduler]
watched_state_queue_scheduler = None process_media_scheduler = None # type: Optional[scheduler.Scheduler]
# noinspection PyTypeChecker # noinspection PyTypeChecker
background_mapping_task = None # type: threading.Thread background_mapping_task = None # type: threading.Thread
# deprecated
# trakt_checker_scheduler = None
provider_ping_thread_pool = {} provider_ping_thread_pool = {}
@ -624,9 +626,11 @@ __INITIALIZED__ = False
__INIT_STAGE__ = 0 __INIT_STAGE__ = 0
# don't reassign MEMCACHE var without reassigning sg_helpers.MEMCACHE # don't reassign MEMCACHE var without reassigning sg_helpers.MEMCACHE
# and scene_exceptions.MEMCACHE
# as long as the pointer is the same (dict only modified) all is fine # as long as the pointer is the same (dict only modified) all is fine
MEMCACHE = {} MEMCACHE = {}
sg_helpers.MEMCACHE = MEMCACHE sg_helpers.MEMCACHE = MEMCACHE
scene_exceptions.MEMCACHE = MEMCACHE
MEMCACHE_FLAG_IMAGES = {} MEMCACHE_FLAG_IMAGES = {}
@ -1518,11 +1522,14 @@ def init_stage_2():
global __INITIALIZED__, MEMCACHE, MEMCACHE_FLAG_IMAGES, RECENTSEARCH_STARTUP global __INITIALIZED__, MEMCACHE, MEMCACHE_FLAG_IMAGES, RECENTSEARCH_STARTUP
# Schedulers # Schedulers
# global trakt_checker_scheduler # global trakt_checker_scheduler
global recent_search_scheduler, backlog_search_scheduler, people_queue_scheduler, show_update_scheduler, \ global update_software_scheduler, update_packages_scheduler, \
update_software_scheduler, update_packages_scheduler, show_queue_scheduler, search_queue_scheduler, \ update_show_scheduler, update_release_mappings_scheduler, \
proper_finder_scheduler, media_process_scheduler, subtitles_finder_scheduler, \ search_backlog_scheduler, search_propers_scheduler, \
background_mapping_task, \ search_recent_scheduler, search_subtitles_scheduler, \
watched_state_queue_scheduler, emby_watched_state_scheduler, plex_watched_state_scheduler search_queue_scheduler, show_queue_scheduler, people_queue_scheduler, \
watched_state_queue_scheduler, emby_watched_state_scheduler, plex_watched_state_scheduler, \
process_media_scheduler, background_mapping_task
# Gen Config/Misc # Gen Config/Misc
global SHOW_UPDATE_HOUR, UPDATE_INTERVAL, UPDATE_PACKAGES_INTERVAL global SHOW_UPDATE_HOUR, UPDATE_INTERVAL, UPDATE_PACKAGES_INTERVAL
# Search Settings/Episode # Search Settings/Episode
@ -1570,32 +1577,17 @@ def init_stage_2():
metadata_provider_dict[tmp_provider.name] = tmp_provider metadata_provider_dict[tmp_provider.name] = tmp_provider
# initialize schedulers # initialize schedulers
# updaters # /
update_now = datetime.timedelta(minutes=0) # queues must be first
update_software_scheduler = scheduler.Scheduler(
version_checker.SoftwareUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_INTERVAL),
thread_name='SOFTWAREUPDATER',
silent=False)
update_packages_scheduler = scheduler.Scheduler(
version_checker.PackagesUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_PACKAGES_INTERVAL),
# run_delay=datetime.timedelta(minutes=2),
thread_name='PACKAGESUPDATER',
silent=False)
show_queue_scheduler = scheduler.Scheduler( show_queue_scheduler = scheduler.Scheduler(
show_queue.ShowQueue(), show_queue.ShowQueue(),
cycle_time=datetime.timedelta(seconds=3), cycle_time=datetime.timedelta(seconds=3),
thread_name='SHOWQUEUE') thread_name='SHOWQUEUE')
show_update_scheduler = scheduler.Scheduler( search_queue_scheduler = scheduler.Scheduler(
show_updater.ShowUpdater(), search_queue.SearchQueue(),
cycle_time=datetime.timedelta(hours=1), cycle_time=datetime.timedelta(seconds=3),
start_time=datetime.time(hour=SHOW_UPDATE_HOUR), thread_name='SEARCHQUEUE')
thread_name='SHOWUPDATER',
prevent_cycle_run=show_queue_scheduler.action.is_show_update_running) # 3AM
people_queue_scheduler = scheduler.Scheduler( people_queue_scheduler = scheduler.Scheduler(
people_queue.PeopleQueue(), people_queue.PeopleQueue(),
@ -1603,21 +1595,52 @@ def init_stage_2():
thread_name='PEOPLEQUEUE' thread_name='PEOPLEQUEUE'
) )
# searchers watched_state_queue_scheduler = scheduler.Scheduler(
search_queue_scheduler = scheduler.Scheduler( watchedstate_queue.WatchedStateQueue(),
search_queue.SearchQueue(),
cycle_time=datetime.timedelta(seconds=3), cycle_time=datetime.timedelta(seconds=3),
thread_name='SEARCHQUEUE') thread_name='WATCHEDSTATEQUEUE')
# /
# updaters
update_software_scheduler = scheduler.Scheduler(
version_checker.SoftwareUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_INTERVAL),
thread_name='SOFTWAREUPDATE',
silent=False)
update_packages_scheduler = scheduler.Scheduler(
version_checker.PackagesUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_PACKAGES_INTERVAL),
# run_delay=datetime.timedelta(minutes=2),
thread_name='PACKAGESUPDATE',
silent=False)
update_show_scheduler = scheduler.Scheduler(
show_updater.ShowUpdater(),
cycle_time=datetime.timedelta(hours=1),
start_time=datetime.time(hour=SHOW_UPDATE_HOUR),
thread_name='SHOWDATAUPDATE',
prevent_cycle_run=show_queue_scheduler.action.is_show_update_running) # 3AM
classes.loading_msg.message = 'Loading show maps'
update_release_mappings_scheduler = scheduler.Scheduler(
scene_exceptions.ReleaseMap(),
cycle_time=datetime.timedelta(hours=2),
thread_name='SHOWMAPSUPDATE',
silent=False)
# /
# searchers
init_search_delay = int(os.environ.get('INIT_SEARCH_DELAY', 0)) init_search_delay = int(os.environ.get('INIT_SEARCH_DELAY', 0))
# enter 4499 (was 4489) for experimental internal provider intervals # enter 4499 (was 4489) for experimental internal provider intervals
update_interval = datetime.timedelta(minutes=(RECENTSEARCH_INTERVAL, 1)[4499 == RECENTSEARCH_INTERVAL]) update_interval = datetime.timedelta(minutes=(RECENTSEARCH_INTERVAL, 1)[4499 == RECENTSEARCH_INTERVAL])
recent_search_scheduler = scheduler.Scheduler( update_now = datetime.timedelta(minutes=0)
search_recent_scheduler = scheduler.Scheduler(
search_recent.RecentSearcher(), search_recent.RecentSearcher(),
cycle_time=update_interval, cycle_time=update_interval,
run_delay=update_now if RECENTSEARCH_STARTUP else datetime.timedelta(minutes=init_search_delay or 5), run_delay=update_now if RECENTSEARCH_STARTUP else datetime.timedelta(minutes=init_search_delay or 5),
thread_name='RECENTSEARCHER', thread_name='RECENTSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_recentsearch_in_progress) prevent_cycle_run=search_queue_scheduler.action.is_recentsearch_in_progress)
if [x for x in providers.sorted_sources() if [x for x in providers.sorted_sources()
@ -1635,14 +1658,13 @@ def init_stage_2():
backlogdelay = helpers.try_int((time_diff.total_seconds() / 60) + 10, 10) backlogdelay = helpers.try_int((time_diff.total_seconds() / 60) + 10, 10)
else: else:
backlogdelay = 10 backlogdelay = 10
backlog_search_scheduler = search_backlog.BacklogSearchScheduler( search_backlog_scheduler = search_backlog.BacklogSearchScheduler(
search_backlog.BacklogSearcher(), search_backlog.BacklogSearcher(),
cycle_time=datetime.timedelta(minutes=get_backlog_cycle_time()), cycle_time=datetime.timedelta(minutes=get_backlog_cycle_time()),
run_delay=datetime.timedelta(minutes=init_search_delay or backlogdelay), run_delay=datetime.timedelta(minutes=init_search_delay or backlogdelay),
thread_name='BACKLOG', thread_name='BACKLOGSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_standard_backlog_in_progress) prevent_cycle_run=search_queue_scheduler.action.is_standard_backlog_in_progress)
propers_searcher = search_propers.ProperSearcher()
last_proper_search = datetime.datetime.fromtimestamp(properFinder.get_last_proper_search()) last_proper_search = datetime.datetime.fromtimestamp(properFinder.get_last_proper_search())
time_diff = datetime.timedelta(days=1) - (datetime.datetime.now() - last_proper_search) time_diff = datetime.timedelta(days=1) - (datetime.datetime.now() - last_proper_search)
if time_diff < datetime.timedelta(seconds=0): if time_diff < datetime.timedelta(seconds=0):
@ -1650,34 +1672,21 @@ def init_stage_2():
else: else:
properdelay = helpers.try_int((time_diff.total_seconds() / 60) + 5, 20) properdelay = helpers.try_int((time_diff.total_seconds() / 60) + 5, 20)
proper_finder_scheduler = scheduler.Scheduler( search_propers_scheduler = scheduler.Scheduler(
propers_searcher, search_propers.ProperSearcher(),
cycle_time=datetime.timedelta(days=1), cycle_time=datetime.timedelta(days=1),
run_delay=datetime.timedelta(minutes=init_search_delay or properdelay), run_delay=datetime.timedelta(minutes=init_search_delay or properdelay),
thread_name='FINDPROPERS', thread_name='PROPERSSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_propersearch_in_progress) prevent_cycle_run=search_queue_scheduler.action.is_propersearch_in_progress)
# processors search_subtitles_scheduler = scheduler.Scheduler(
media_process_scheduler = scheduler.Scheduler(
auto_post_processer.PostProcesser(),
cycle_time=datetime.timedelta(minutes=MEDIAPROCESS_INTERVAL),
thread_name='POSTPROCESSER',
silent=not PROCESS_AUTOMATICALLY)
subtitles_finder_scheduler = scheduler.Scheduler(
subtitles.SubtitlesFinder(), subtitles.SubtitlesFinder(),
cycle_time=datetime.timedelta(hours=SUBTITLES_FINDER_INTERVAL), cycle_time=datetime.timedelta(hours=SUBTITLES_FINDER_INTERVAL),
thread_name='FINDSUBTITLES', thread_name='SUBTITLESEARCH',
silent=not USE_SUBTITLES) silent=not USE_SUBTITLES)
background_mapping_task = threading.Thread(name='MAPPINGSUPDATER', target=indexermapper.load_mapped_ids, # /
kwargs={'load_all': True}) # others
watched_state_queue_scheduler = scheduler.Scheduler(
watchedstate_queue.WatchedStateQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='WATCHEDSTATEQUEUE')
emby_watched_state_scheduler = scheduler.Scheduler( emby_watched_state_scheduler = scheduler.Scheduler(
EmbyWatchedStateUpdater(), EmbyWatchedStateUpdater(),
cycle_time=datetime.timedelta(minutes=EMBY_WATCHEDSTATE_INTERVAL), cycle_time=datetime.timedelta(minutes=EMBY_WATCHEDSTATE_INTERVAL),
@ -1690,6 +1699,15 @@ def init_stage_2():
run_delay=datetime.timedelta(minutes=5), run_delay=datetime.timedelta(minutes=5),
thread_name='PLEXWATCHEDSTATE') thread_name='PLEXWATCHEDSTATE')
process_media_scheduler = scheduler.Scheduler(
auto_media_process.MediaProcess(),
cycle_time=datetime.timedelta(minutes=MEDIAPROCESS_INTERVAL),
thread_name='PROCESSMEDIA',
silent=not PROCESS_AUTOMATICALLY)
background_mapping_task = threading.Thread(name='MAPPINGUPDATES', target=indexermapper.load_mapped_ids,
kwargs={'load_all': True})
MEMCACHE['history_tab_limit'] = 11 MEMCACHE['history_tab_limit'] = 11
MEMCACHE['history_tab'] = History.menu_tab(MEMCACHE['history_tab_limit']) MEMCACHE['history_tab'] = History.menu_tab(MEMCACHE['history_tab_limit'])
@ -1707,11 +1725,15 @@ def init_stage_2():
def enabled_schedulers(is_init=False): def enabled_schedulers(is_init=False):
# ([], [trakt_checker_scheduler])[USE_TRAKT] + \ # ([], [trakt_checker_scheduler])[USE_TRAKT] + \
return ([], [events])[is_init] \ return ([], [events])[is_init] \
+ ([], [recent_search_scheduler, backlog_search_scheduler, show_update_scheduler, people_queue_scheduler, + ([], [update_software_scheduler, update_packages_scheduler,
update_software_scheduler, update_packages_scheduler, update_show_scheduler, update_release_mappings_scheduler,
show_queue_scheduler, search_queue_scheduler, proper_finder_scheduler, search_recent_scheduler, search_backlog_scheduler,
media_process_scheduler, subtitles_finder_scheduler, search_propers_scheduler, search_subtitles_scheduler,
emby_watched_state_scheduler, plex_watched_state_scheduler, watched_state_queue_scheduler] show_queue_scheduler, search_queue_scheduler,
people_queue_scheduler, watched_state_queue_scheduler,
emby_watched_state_scheduler, plex_watched_state_scheduler,
process_media_scheduler
]
)[not MEMCACHE.get('update_restart')] \ )[not MEMCACHE.get('update_restart')] \
+ ([events], [])[is_init] + ([events], [])[is_init]

View file

@ -18,32 +18,31 @@ import os.path
import sickgear import sickgear
from . import logger, processTV from . import logger, processTV
from .scheduler import Job
class PostProcesser(object): class MediaProcess(Job):
def __init__(self): def __init__(self):
self.amActive = False super(MediaProcess, self).__init__(self.job_run, kwargs={})
@staticmethod @staticmethod
def is_enabled(): def is_enabled():
return sickgear.PROCESS_AUTOMATICALLY return sickgear.PROCESS_AUTOMATICALLY
def run(self): def job_run(self):
if self.is_enabled(): if self.is_enabled():
self.amActive = True
self._main() self._main()
self.amActive = False
@staticmethod @staticmethod
def _main(): def _main():
if not os.path.isdir(sickgear.TV_DOWNLOAD_DIR): if not os.path.isdir(sickgear.TV_DOWNLOAD_DIR):
logger.error('Automatic post-processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR) logger.error('Automatic media processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR)
return return
if not os.path.isabs(sickgear.TV_DOWNLOAD_DIR): if not os.path.isabs(sickgear.TV_DOWNLOAD_DIR):
logger.error('Automatic post-processing attempted but dir %s is relative ' logger.error('Automatic media processing attempted but dir %s is relative '
'(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR) '(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR)
return return
processTV.processDir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True) processTV.process_dir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True)

View file

@ -152,8 +152,8 @@ def schedule_mediaprocess(iv):
if sickgear.MEDIAPROCESS_INTERVAL < sickgear.MIN_MEDIAPROCESS_INTERVAL: if sickgear.MEDIAPROCESS_INTERVAL < sickgear.MIN_MEDIAPROCESS_INTERVAL:
sickgear.MEDIAPROCESS_INTERVAL = sickgear.MIN_MEDIAPROCESS_INTERVAL sickgear.MEDIAPROCESS_INTERVAL = sickgear.MIN_MEDIAPROCESS_INTERVAL
sickgear.media_process_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL) sickgear.process_media_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL)
sickgear.media_process_scheduler.set_paused_state() sickgear.process_media_scheduler.set_paused_state()
def schedule_recentsearch(iv): def schedule_recentsearch(iv):
@ -162,14 +162,14 @@ def schedule_recentsearch(iv):
if sickgear.RECENTSEARCH_INTERVAL < sickgear.MIN_RECENTSEARCH_INTERVAL: if sickgear.RECENTSEARCH_INTERVAL < sickgear.MIN_RECENTSEARCH_INTERVAL:
sickgear.RECENTSEARCH_INTERVAL = sickgear.MIN_RECENTSEARCH_INTERVAL sickgear.RECENTSEARCH_INTERVAL = sickgear.MIN_RECENTSEARCH_INTERVAL
sickgear.recent_search_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL) sickgear.search_recent_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL)
def schedule_backlog(iv): def schedule_backlog(iv):
sickgear.BACKLOG_PERIOD = minimax(iv, sickgear.DEFAULT_BACKLOG_PERIOD, sickgear.BACKLOG_PERIOD = minimax(iv, sickgear.DEFAULT_BACKLOG_PERIOD,
sickgear.MIN_BACKLOG_PERIOD, sickgear.MAX_BACKLOG_PERIOD) sickgear.MIN_BACKLOG_PERIOD, sickgear.MAX_BACKLOG_PERIOD)
sickgear.backlog_search_scheduler.action.cycle_time = sickgear.BACKLOG_PERIOD sickgear.search_backlog_scheduler.action.cycle_time = sickgear.BACKLOG_PERIOD
def schedule_update_software(iv): def schedule_update_software(iv):
@ -220,7 +220,7 @@ def schedule_update_packages_notify(update_packages_notify):
def schedule_download_propers(download_propers): def schedule_download_propers(download_propers):
if sickgear.DOWNLOAD_PROPERS != download_propers: if sickgear.DOWNLOAD_PROPERS != download_propers:
sickgear.DOWNLOAD_PROPERS = download_propers sickgear.DOWNLOAD_PROPERS = download_propers
sickgear.proper_finder_scheduler.set_paused_state() sickgear.search_propers_scheduler.set_paused_state()
def schedule_trakt(use_trakt): def schedule_trakt(use_trakt):
@ -233,7 +233,7 @@ def schedule_trakt(use_trakt):
def schedule_subtitles(use_subtitles): def schedule_subtitles(use_subtitles):
if sickgear.USE_SUBTITLES != use_subtitles: if sickgear.USE_SUBTITLES != use_subtitles:
sickgear.USE_SUBTITLES = use_subtitles sickgear.USE_SUBTITLES = use_subtitles
sickgear.subtitles_finder_scheduler.set_paused_state() sickgear.search_subtitles_scheduler.set_paused_state()
def schedule_emby_watched(emby_watched_interval): def schedule_emby_watched(emby_watched_interval):

View file

@ -1,5 +1,4 @@
from lib.six import moves import queue
import threading import threading
@ -15,7 +14,7 @@ class Event(object):
class Events(threading.Thread): class Events(threading.Thread):
def __init__(self, callback): def __init__(self, callback):
super(Events, self).__init__() super(Events, self).__init__()
self.queue = moves.queue.Queue() self.queue = queue.Queue()
self.daemon = True self.daemon = True
self.callback = callback self.callback = callback
self.name = 'EVENT-QUEUE' self.name = 'EVENT-QUEUE'
@ -31,24 +30,24 @@ class Events(threading.Thread):
while not self._stopper.is_set(): while not self._stopper.is_set():
try: try:
# get event type # get event type
etype = self.queue.get(True, 1) ev_type = self.queue.get(True, 1)
except moves.queue.Empty: except queue.Empty:
etype = 'Empty' ev_type = 'Empty'
except(BaseException, Exception): except(BaseException, Exception):
etype = None ev_type = None
if etype in (self.SystemEvent.RESTART, self.SystemEvent.SHUTDOWN, None, 'Empty'): if ev_type in (self.SystemEvent.RESTART, self.SystemEvent.SHUTDOWN, None, 'Empty'):
if etype in ('Empty',): if ev_type in ('Empty',):
continue continue
from sickgear import logger from sickgear import logger
logger.debug(f'Callback {self.callback.__name__}(event type:{etype})') logger.debug(f'Callback {self.callback.__name__}(event type:{ev_type})')
try: try:
# perform callback if we got an event type # perform callback if we got an event type
self.callback(etype) self.callback(ev_type)
# event completed # event completed
self.queue.task_done() self.queue.task_done()
except moves.queue.Empty: except queue.Empty:
pass pass
# exiting thread # exiting thread

View file

@ -19,6 +19,7 @@ import datetime
import threading import threading
from . import db, logger from . import db, logger
from .scheduler import Job
from exceptions_helper import ex from exceptions_helper import ex
from six import integer_types from six import integer_types
@ -37,9 +38,10 @@ class QueuePriorities(object):
VERYHIGH = 40 VERYHIGH = 40
class GenericQueue(object): class GenericQueue(Job):
def __init__(self, cache_db_tables=None, main_db_tables=None): def __init__(self, cache_db_tables=None, main_db_tables=None):
# type: (List[AnyStr], List[AnyStr]) -> None # type: (List[AnyStr], List[AnyStr]) -> None
super(GenericQueue, self).__init__(self.job_run, silent=True, kwargs={}, reentrant_lock=True)
self.currentItem = None # type: QueueItem or None self.currentItem = None # type: QueueItem or None
@ -51,13 +53,41 @@ class GenericQueue(object):
self.events = {} # type: Dict[int, List[Callable]] self.events = {} # type: Dict[int, List[Callable]]
self.lock = threading.RLock()
self.cache_db_tables = cache_db_tables or [] # type: List[AnyStr] self.cache_db_tables = cache_db_tables or [] # type: List[AnyStr]
self.main_db_tables = main_db_tables or [] # type: List[AnyStr] self.main_db_tables = main_db_tables or [] # type: List[AnyStr]
self._id_counter = self._load_init_id() # type: integer_types self._id_counter = self._load_init_id() # type: integer_types
def job_run(self):
# only start a new task if one isn't already going
with self.lock:
if None is self.currentItem or not self.currentItem.is_alive():
# if the thread is dead then the current item should be finished
if self.currentItem:
self.currentItem.finish()
try:
self.delete_item(self.currentItem, finished_run=True)
except (BaseException, Exception):
pass
self.currentItem = None
# if there's something in the queue then run it in a thread and take it out of the queue
if 0 < len(self.queue):
self.queue.sort(key=lambda y: (-y.priority, y.added))
if self.queue[0].priority < self.min_priority:
return
# launch the queue item in a thread
self.currentItem = self.queue.pop(0)
if 'SEARCHQUEUE' != self.queue_name:
self.currentItem.name = self.queue_name + '-' + self.currentItem.name
self.currentItem.start()
self.check_events()
def _load_init_id(self): def _load_init_id(self):
# type: (...) -> integer_types # type: (...) -> integer_types
""" """
@ -216,7 +246,7 @@ class GenericQueue(object):
self.min_priority = 999999999999 self.min_priority = 999999999999
def unpause(self): def unpause(self):
logger.log('Unpausing queue') logger.log('Un-pausing queue')
with self.lock: with self.lock:
self.min_priority = 0 self.min_priority = 0
@ -269,36 +299,6 @@ class GenericQueue(object):
except (BaseException, Exception) as e: except (BaseException, Exception) as e:
logger.error('Error executing Event: %s' % ex(e)) logger.error('Error executing Event: %s' % ex(e))
def run(self):
# only start a new task if one isn't already going
with self.lock:
if None is self.currentItem or not self.currentItem.is_alive():
# if the thread is dead then the current item should be finished
if self.currentItem:
self.currentItem.finish()
try:
self.delete_item(self.currentItem, finished_run=True)
except (BaseException, Exception):
pass
self.currentItem = None
# if there's something in the queue then run it in a thread and take it out of the queue
if 0 < len(self.queue):
self.queue.sort(key=lambda y: (-y.priority, y.added))
if self.queue[0].priority < self.min_priority:
return
# launch the queue item in a thread
self.currentItem = self.queue.pop(0)
if 'SEARCHQUEUE' != self.queue_name:
self.currentItem.name = self.queue_name + '-' + self.currentItem.name
self.currentItem.start()
self.check_events()
class QueueItem(threading.Thread): class QueueItem(threading.Thread):
def __init__(self, name, action_id=0, uid=None): def __init__(self, name, action_id=0, uid=None):

View file

@ -14,6 +14,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
import threading import threading
import sickgear import sickgear
@ -21,6 +22,7 @@ from . import db
from .helpers import full_sanitize_scene_name, try_int from .helpers import full_sanitize_scene_name, try_int
from six import iteritems from six import iteritems
from _23 import map_consume
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -85,18 +87,19 @@ def build_name_cache(show_obj=None, update_only_scene=False):
if show_obj: if show_obj:
# search for only the requested show id and flush old show entries from namecache # search for only the requested show id and flush old show entries from namecache
show_ids = {show_obj.tvid: [show_obj.prodid]} show_ids = {show_obj.tvid: [show_obj.prodid]}
nameCache = dict([(k, v) for k, v in iteritems(nameCache) nameCache = dict([(k, v) for k, v in iteritems(nameCache)
if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)]) if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)])
sceneNameCache = dict([(k, v) for k, v in iteritems(sceneNameCache) sceneNameCache = dict([(k, v) for k, v in iteritems(sceneNameCache)
if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)]) if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)])
# add standard indexer name to namecache # add standard indexer name to namecache
nameCache[full_sanitize_scene_name(show_obj.unique_name or show_obj.name)] = [show_obj.tvid, show_obj.prodid, -1] nameCache[full_sanitize_scene_name(show_obj.unique_name or show_obj.name)] = \
[show_obj.tvid, show_obj.prodid, -1]
else: else:
# generate list of production ids to look up in cache.db # generate list of production ids to look up in cache.db
show_ids = {} show_ids = defaultdict(list)
for cur_show_obj in sickgear.showList: map_consume(lambda _so: show_ids[_so.tvid].append(_so.prodid), sickgear.showList)
show_ids.setdefault(cur_show_obj.tvid, []).append(cur_show_obj.prodid)
# add all standard show indexer names to namecache # add all standard show indexer names to namecache
nameCache = dict( nameCache = dict(
@ -104,33 +107,32 @@ def build_name_cache(show_obj=None, update_only_scene=False):
for cur_so in sickgear.showList if cur_so]) for cur_so in sickgear.showList if cur_so])
sceneNameCache = {} sceneNameCache = {}
cache_db = db.DBConnection()
cache_results = []
if update_only_scene:
# generate list of production ids to look up in cache.db
show_ids = {}
for cur_show_obj in sickgear.showList:
show_ids.setdefault(cur_show_obj.tvid, []).append(cur_show_obj.prodid)
tmp_scene_name_cache = {}
else:
tmp_scene_name_cache = sceneNameCache.copy() tmp_scene_name_cache = sceneNameCache.copy()
for t, s in iteritems(show_ids): else:
# generate list of production ids to look up in cache.db
show_ids = defaultdict(list)
map_consume(lambda _so: show_ids[_so.tvid].append(_so.prodid), sickgear.showList)
tmp_scene_name_cache = {}
cache_results = []
cache_db = db.DBConnection()
for cur_tvid, cur_prodid_list in iteritems(show_ids):
cache_results += cache_db.select( cache_results += cache_db.select(
'SELECT show_name, indexer AS tv_id, indexer_id AS prod_id, season' f'SELECT show_name, indexer AS tv_id, indexer_id AS prod_id, season'
' FROM scene_exceptions' f' FROM scene_exceptions'
' WHERE indexer = %s AND indexer_id IN (%s)' % (t, ','.join(['%s' % i for i in s]))) f' WHERE indexer = {cur_tvid} AND indexer_id IN ({",".join(map(str, cur_prodid_list))})')
if cache_results: if cache_results:
for cache_result in cache_results: for cur_result in cache_results:
tvid = int(cache_result['tv_id']) tvid = int(cur_result['tv_id'])
prodid = int(cache_result['prod_id']) prodid = int(cur_result['prod_id'])
season = try_int(cache_result['season'], -1) season = try_int(cur_result['season'], -1)
name = full_sanitize_scene_name(cache_result['show_name']) name = full_sanitize_scene_name(cur_result['show_name'])
tmp_scene_name_cache[name] = [tvid, prodid, season] tmp_scene_name_cache[name] = [tvid, prodid, season]
sceneNameCache = tmp_scene_name_cache sceneNameCache = tmp_scene_name_cache.copy()
def remove_from_namecache(tvid, prodid): def remove_from_namecache(tvid, prodid):

View file

@ -407,7 +407,8 @@ class NameParser(object):
new_season_numbers.append(s) new_season_numbers.append(s)
elif show_obj.is_anime and len(best_result.ab_episode_numbers) and not self.testing: elif show_obj.is_anime and len(best_result.ab_episode_numbers) and not self.testing:
scene_season = scene_exceptions.get_scene_exception_by_name(best_result.series_name)[2] scene_season = scene_exceptions.get_scene_exception_by_name(
best_result.series_name)[2]
for epAbsNo in best_result.ab_episode_numbers: for epAbsNo in best_result.ab_episode_numbers:
a = epAbsNo a = epAbsNo

View file

@ -1141,8 +1141,7 @@ class ProcessTVShow(object):
self._buffer(processor.log.strip('\n')) self._buffer(processor.log.strip('\n'))
# backward compatibility prevents the case of this function name from being updated to PEP8 def process_dir(dir_name, nzb_name=None, process_method=None, force=False, force_replace=None,
def processDir(dir_name, nzb_name=None, process_method=None, force=False, force_replace=None,
failed=False, pp_type='auto', cleanup=False, webhandler=None, show_obj=None, is_basedir=True, failed=False, pp_type='auto', cleanup=False, webhandler=None, show_obj=None, is_basedir=True,
skip_failure_processing=False, client=None): skip_failure_processing=False, client=None):
""" """
@ -1182,6 +1181,10 @@ def processDir(dir_name, nzb_name=None, process_method=None, force=False, force_
pp_type, cleanup, show_obj) pp_type, cleanup, show_obj)
# backward compatibility
processDir = process_dir
def process_minimal(nzb_name, show_obj, failed, webhandler): def process_minimal(nzb_name, show_obj, failed, webhandler):
# type: (AnyStr, TVShow, bool, Any) -> None # type: (AnyStr, TVShow, bool, Any) -> None
ProcessTVShow(webhandler).process_minimal(nzb_name, show_obj, failed, webhandler) ProcessTVShow(webhandler).process_minimal(nzb_name, show_obj, failed, webhandler)

View file

@ -71,7 +71,7 @@ def search_propers(provider_proper_obj=None):
if None is provider_proper_obj: if None is provider_proper_obj:
_set_last_proper_search(datetime.datetime.now()) _set_last_proper_search(datetime.datetime.now())
proper_sch = sickgear.proper_finder_scheduler proper_sch = sickgear.search_propers_scheduler
if None is proper_sch.start_time: if None is proper_sch.start_time:
run_in = proper_sch.last_run + proper_sch.cycle_time - datetime.datetime.now() run_in = proper_sch.last_run + proper_sch.cycle_time - datetime.datetime.now()
run_at = ', next check ' run_at = ', next check '
@ -696,7 +696,7 @@ def _set_last_proper_search(when):
def next_proper_timeleft(): def next_proper_timeleft():
return sickgear.proper_finder_scheduler.time_left() return sickgear.search_propers_scheduler.time_left()
def get_last_proper_search(): def get_last_proper_search():

View file

@ -37,7 +37,7 @@ from ..classes import NZBSearchResult, TorrentSearchResult, SearchResult
from ..common import Quality, MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT from ..common import Quality, MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
from ..helpers import maybe_plural, remove_file_perm from ..helpers import maybe_plural, remove_file_perm
from ..name_parser.parser import InvalidNameException, InvalidShowException, NameParser from ..name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from ..scene_exceptions import has_season_exceptions from ..scene_exceptions import ReleaseMap
from ..show_name_helpers import get_show_names_all_possible from ..show_name_helpers import get_show_names_all_possible
from ..sgdatetime import SGDatetime from ..sgdatetime import SGDatetime
from ..tv import TVEpisode, TVShow from ..tv import TVEpisode, TVShow
@ -1743,7 +1743,8 @@ class TorrentProvider(GenericProvider):
return [] return []
show_obj = ep_obj.show_obj show_obj = ep_obj.show_obj
season = (-1, ep_obj.season)[has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)] season = (-1, ep_obj.season)[ReleaseMap().has_season_exceptions(
ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
ep_dict = self._ep_dict(ep_obj) ep_dict = self._ep_dict(ep_obj)
sp_detail = (show_obj.air_by_date or show_obj.is_sports) and str(ep_obj.airdate).split('-')[0] or \ sp_detail = (show_obj.air_by_date or show_obj.is_sports) and str(ep_obj.airdate).split('-')[0] or \
(show_obj.is_anime and ep_obj.scene_absolute_number or (show_obj.is_anime and ep_obj.scene_absolute_number or
@ -1779,7 +1780,8 @@ class TorrentProvider(GenericProvider):
return [] return []
show_obj = ep_obj.show_obj show_obj = ep_obj.show_obj
season = (-1, ep_obj.season)[has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)] season = (-1, ep_obj.season)[ReleaseMap().has_season_exceptions(
ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
if show_obj.air_by_date or show_obj.is_sports: if show_obj.air_by_date or show_obj.is_sports:
ep_detail = [str(ep_obj.airdate).replace('-', sep_date)]\ ep_detail = [str(ep_obj.airdate).replace('-', sep_date)]\
if 'date_detail' not in kwargs else kwargs['date_detail'](ep_obj.airdate) if 'date_detail' not in kwargs else kwargs['date_detail'](ep_obj.airdate)

View file

@ -34,7 +34,7 @@ from ..network_timezones import SG_TIMEZONE
from ..sgdatetime import SGDatetime from ..sgdatetime import SGDatetime
from ..search import get_aired_in_season, get_wanted_qualities from ..search import get_aired_in_season, get_wanted_qualities
from ..show_name_helpers import get_show_names from ..show_name_helpers import get_show_names
from ..scene_exceptions import has_season_exceptions from ..scene_exceptions import ReleaseMap
from ..tv import TVEpisode, TVShow from ..tv import TVEpisode, TVShow
from lib.dateutil import parser from lib.dateutil import parser
@ -470,7 +470,7 @@ class NewznabProvider(generic.NZBProvider):
# id search # id search
params = base_params.copy() params = base_params.copy()
use_id = False use_id = False
if not has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season): if not ReleaseMap().has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
for i in sickgear.TVInfoAPI().all_sources: for i in sickgear.TVInfoAPI().all_sources:
if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps: if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps:
params[self.caps[i]] = ep_obj.show_obj.ids[i]['id'] params[self.caps[i]] = ep_obj.show_obj.ids[i]['id']
@ -528,7 +528,7 @@ class NewznabProvider(generic.NZBProvider):
# id search # id search
params = base_params.copy() params = base_params.copy()
use_id = False use_id = False
if not has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season): if not ReleaseMap().has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
for i in sickgear.TVInfoAPI().all_sources: for i in sickgear.TVInfoAPI().all_sources:
if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps: if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps:
params[self.caps[i]] = ep_obj.show_obj.ids[i]['id'] params[self.caps[i]] = ep_obj.show_obj.ids[i]['id']

View file

@ -16,12 +16,10 @@
from collections import defaultdict from collections import defaultdict
import datetime
import io import io
import os import os
import re import re
import sys import sys
import threading
import traceback import traceback
import sickgear import sickgear
@ -31,6 +29,7 @@ from . import db, helpers, logger, name_cache
from .anime import create_anidb_obj from .anime import create_anidb_obj
from .classes import OrderedDefaultdict from .classes import OrderedDefaultdict
from .indexers.indexer_config import TVINFO_TVDB from .indexers.indexer_config import TVINFO_TVDB
from .scheduler import Job
from .sgdatetime import SGDatetime from .sgdatetime import SGDatetime
import lib.rarfile.rarfile as rarfile import lib.rarfile.rarfile as rarfile
@ -41,215 +40,206 @@ from six import iteritems
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from typing import AnyStr, List, Tuple, Union from typing import AnyStr, List, Tuple, Optional, Union
from .tv import TVShow
exception_dict = {} MEMCACHE = {}
anidb_exception_dict = {}
xem_exception_dict = {}
xem_ids_list = defaultdict(list)
exceptionsCache = {}
exceptionsSeasonCache = {}
exceptionLock = threading.Lock()
def should_refresh(name, max_refresh_age_secs=86400, remaining=False): class ReleaseMap(Job):
# type: (AnyStr, int, bool) -> Union[bool, int] def __init__(self):
super(ReleaseMap, self).__init__(self.job_run, thread_lock=True, kwargs={})
MEMCACHE.setdefault('release_map', {})
MEMCACHE.setdefault('release_map_season', {})
MEMCACHE.setdefault('release_map_xem', defaultdict(list))
def job_run(self):
# update xem id lists
self.fetch_xem_ids()
# update release exceptions
self.fetch_exceptions()
def fetch_xem_ids(self):
for cur_tvid, cur_name in iteritems(sickgear.TVInfoAPI().xem_supported_sources):
xem_ids = self._get_xem_ids(cur_name, sickgear.TVInfoAPI(cur_tvid).config['xem_origin'])
if len(xem_ids):
MEMCACHE['release_map_xem'][cur_tvid] = xem_ids
@staticmethod
def _get_xem_ids(infosrc_name, xem_origin):
# type: (AnyStr, AnyStr) -> List
""" """
:param name: name :param infosrc_name:
:param max_refresh_age_secs: :param xem_origin:
:param remaining: True to return remaining seconds
:return:
""" """
my_db = db.DBConnection() result = []
rows = my_db.select('SELECT last_refreshed FROM scene_exceptions_refresh WHERE list = ?', [name])
if rows:
last_refresh = int(rows[0]['last_refreshed'])
if remaining:
time_left = (last_refresh + max_refresh_age_secs - SGDatetime.timestamp_near())
return (0, time_left)[time_left > 0]
return SGDatetime.timestamp_near() > last_refresh + max_refresh_age_secs
return True
url = 'https://thexem.info/map/havemap?origin=%s' % xem_origin
def set_last_refresh(name): task = 'Fetching show ids with%s xem scene mapping%s for origin'
""" logger.log(f'{task % ("", "s")} {infosrc_name}')
parsed_json = helpers.get_url(url, parse_json=True, timeout=90)
:param name: name if not isinstance(parsed_json, dict) or not parsed_json:
:type name: AnyStr logger.error(f'Failed {task.lower() % ("", "s")} {infosrc_name},'
""" f' Unable to get URL: {url}')
my_db = db.DBConnection()
my_db.upsert('scene_exceptions_refresh',
{'last_refreshed': SGDatetime.timestamp_near()},
{'list': name})
def has_season_exceptions(tvid, prodid, season):
get_scene_exceptions(tvid, prodid, season=season)
if (tvid, prodid) in exceptionsCache and -1 < season and season in exceptionsCache[(tvid, prodid)]:
return True
return False
def get_scene_exceptions(tvid, prodid, season=-1):
"""
Given a indexer_id, return a list of all the scene exceptions.
:param tvid: tvid
:type tvid: int
:param prodid: prodid
:type prodid: int or long
:param season: season number
:type season: int
:return:
:rtype: List
"""
global exceptionsCache
exceptions_list = []
if (tvid, prodid) not in exceptionsCache or season not in exceptionsCache[(tvid, prodid)]:
my_db = db.DBConnection()
exceptions = my_db.select('SELECT show_name'
' FROM scene_exceptions'
' WHERE indexer = ? AND indexer_id = ?'
' AND season = ?',
[tvid, prodid, season])
if exceptions:
exceptions_list = list(set([cur_exception['show_name'] for cur_exception in exceptions]))
if (tvid, prodid) not in exceptionsCache:
exceptionsCache[(tvid, prodid)] = {}
exceptionsCache[(tvid, prodid)][season] = exceptions_list
else: else:
exceptions_list = exceptionsCache[(tvid, prodid)][season] if 'success' == parsed_json.get('result', '') and 'data' in parsed_json:
result = list(set(filter(lambda prodid: 0 < prodid,
map(lambda pid: helpers.try_int(pid), parsed_json['data']))))
if 0 == len(result):
logger.warning(f'Failed {task.lower() % ("", "s")} {infosrc_name},'
f' no data items parsed from URL: {url}')
if 1 == season: # if we where looking for season 1 we can add generic names logger.log(f'Finished {task.lower() % (f" {len(result)}", helpers.maybe_plural(result))} {infosrc_name}')
exceptions_list += get_scene_exceptions(tvid, prodid, season=-1) return result
return exceptions_list def _xem_exceptions_fetcher(self):
result = {}
def get_all_scene_exceptions(tvid_prodid): xem_list = 'xem_us'
""" for cur_show_obj in sickgear.showList:
if cur_show_obj.is_anime and not cur_show_obj.paused:
xem_list = 'xem'
break
:param tvid_prodid: if self._should_refresh(xem_list):
:type tvid_prodid: AnyStr for cur_tvid in [_i for _i in sickgear.TVInfoAPI().sources
:return: if 'xem_origin' in sickgear.TVInfoAPI(_i).config]:
:rtype: OrderedDefaultdict logger.log(f'Checking for XEM scene exception updates for {sickgear.TVInfoAPI(cur_tvid).name}')
"""
exceptions_dict = OrderedDefaultdict(list)
from sickgear.tv import TVidProdid url = 'https://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\
% (sickgear.TVInfoAPI(cur_tvid).config['xem_origin'], ('&language=us', '')['xem' == xem_list])
my_db = db.DBConnection() parsed_json = helpers.get_url(url, parse_json=True, timeout=90)
exceptions = my_db.select('SELECT show_name,season' if not parsed_json:
' FROM scene_exceptions' logger.error(f'Check scene exceptions update failed for {sickgear.TVInfoAPI(cur_tvid).name},'
' WHERE indexer = ? AND indexer_id = ?' f' Unable to get URL: {url}')
' ORDER BY season DESC, show_name DESC', continue
TVidProdid(tvid_prodid).list)
exceptions_seasons = [] if 'failure' == parsed_json['result']:
if exceptions: continue
for cur_exception in exceptions:
# order as, s*, and then season desc, show_name also desc (so years in names may fall newest on top)
if -1 == cur_exception['season']:
exceptions_dict[cur_exception['season']].append(cur_exception['show_name'])
else:
exceptions_seasons += [cur_exception]
for cur_exception in exceptions_seasons: for cur_prodid, cur_names in iteritems(parsed_json['data']):
exceptions_dict[cur_exception['season']].append(cur_exception['show_name'])
return exceptions_dict
def get_scene_seasons(tvid, prodid):
"""
return a list of season numbers that have scene exceptions
:param tvid: tvid
:type tvid: int
:param prodid: prodid
:type prodid: int or long
:return:
:rtype: List
"""
global exceptionsSeasonCache
exception_season_list = []
if (tvid, prodid) not in exceptionsSeasonCache:
my_db = db.DBConnection()
sql_result = my_db.select('SELECT DISTINCT(season) AS season'
' FROM scene_exceptions'
' WHERE indexer = ? AND indexer_id = ?',
[tvid, prodid])
if sql_result:
exception_season_list = list(set([int(x['season']) for x in sql_result]))
if (tvid, prodid) not in exceptionsSeasonCache:
exceptionsSeasonCache[(tvid, prodid)] = {}
exceptionsSeasonCache[(tvid, prodid)] = exception_season_list
else:
exception_season_list = exceptionsSeasonCache[(tvid, prodid)]
return exception_season_list
def get_scene_exception_by_name(show_name):
"""
:param show_name: show name
:type show_name: AnyStr
:return:
:rtype: Tuple[None, None, None] or Tuple[int, int or long, int]
"""
return get_scene_exception_by_name_multiple(show_name)[0]
def get_scene_exception_by_name_multiple(show_name):
"""
:param show_name: show name
:type show_name: AnyStr
:return: (tvid, prodid, season) of the exception, None if no exception is present.
:rtype: Tuple[None, None, None] or Tuple[int, int or long, int]
"""
try: try:
exception_result = name_cache.sceneNameCache[helpers.full_sanitize_scene_name(show_name)] result[(cur_tvid, int(cur_prodid))] = cur_names
return [exception_result]
except (BaseException, Exception): except (BaseException, Exception):
return [[None, None, None]] continue
self._set_last_refresh(xem_list)
def retrieve_exceptions(): return result
def _anidb_exceptions_fetcher(self):
result = {}
if self._should_refresh('anidb'):
logger.log('Checking for AniDB scene exception updates')
for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList):
try:
anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True)
except (BaseException, Exception):
continue
if anime.name and anime.name != cur_show_obj.name:
result[(cur_show_obj.tvid, cur_show_obj.prodid)] = [{anime.name: -1}]
self._set_last_refresh('anidb')
return result
def fetch_exceptions(self):
""" """
Looks up the exceptions on github, parses them into a dict, and inserts them into the Looks up release exceptions on GitHub, Xem, and Anidb, parses them into a dict, and inserts them into the
scene_exceptions table in cache.db. Also clears the scene name cache. scene_exceptions table in cache.db. Finally, clears the scene name cache.
""" """
global exception_dict, anidb_exception_dict, xem_exception_dict def _merge_exceptions(source, dest):
for cur_ex in source:
dest[cur_ex] = source[cur_ex] + ([] if cur_ex not in dest else dest[cur_ex])
exceptions = self._xem_exceptions_fetcher() # XEM scene exceptions
_merge_exceptions(self._anidb_exceptions_fetcher(), exceptions) # AniDB scene exceptions
_merge_exceptions(self._github_exceptions_fetcher(), exceptions) # GitHub stored release exceptions
exceptions_custom, count_updated_numbers, min_remain_iv = self._custom_exceptions_fetcher()
_merge_exceptions(exceptions_custom, exceptions) # Custom exceptions
is_changed_exceptions = False
# write all the exceptions we got off the net into the database
my_db = db.DBConnection()
cl = []
for cur_tvid_prodid in exceptions:
# get a list of the existing exceptions for this ID
existing_exceptions = [{_x['show_name']: _x['season']} for _x in
my_db.select('SELECT show_name, season'
' FROM [scene_exceptions]'
' WHERE indexer = ? AND indexer_id = ?',
list(cur_tvid_prodid))]
# if this exception isn't already in the DB then add it
for cur_ex_dict in filter(lambda e: e not in existing_exceptions, exceptions[cur_tvid_prodid]):
try:
exception, season = next(iteritems(cur_ex_dict))
except (BaseException, Exception):
logger.error('release exception error')
logger.error(traceback.format_exc())
continue
cl.append(['INSERT INTO [scene_exceptions]'
' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
list(cur_tvid_prodid) + [exception, season]])
is_changed_exceptions = True
if cl:
my_db.mass_action(cl)
name_cache.build_name_cache(update_only_scene=True)
# since this could invalidate the results of the cache we clear it out after updating
if is_changed_exceptions:
logger.log('Updated release exceptions')
else:
logger.log('No release exceptions update needed')
# cleanup
exceptions.clear()
return is_changed_exceptions, count_updated_numbers, min_remain_iv
def _github_exceptions_fetcher(self):
"""
Looks up the exceptions on GitHub
"""
# global exception_dict
result = {}
# exceptions are stored on GitHub pages # exceptions are stored on GitHub pages
for tvid in sickgear.TVInfoAPI().sources: for cur_tvid in sickgear.TVInfoAPI().sources:
if should_refresh(sickgear.TVInfoAPI(tvid).name): if self._should_refresh(sickgear.TVInfoAPI(cur_tvid).name):
logger.log(f'Checking for scene exception updates for {sickgear.TVInfoAPI(tvid).name}') url = sickgear.TVInfoAPI(cur_tvid).config.get('scene_url')
url = sickgear.TVInfoAPI(tvid).config.get('scene_url')
if not url: if not url:
continue continue
logger.log(f'Checking for release exception updates for {sickgear.TVInfoAPI(cur_tvid).name}')
url_data = helpers.get_url(url) url_data = helpers.get_url(url)
if None is url_data: if None is url_data:
# When None is urlData, trouble connecting to github # When None is urlData, trouble connecting to GitHub
logger.error(f'Check scene exceptions update failed. Unable to get URL: {url}') logger.error(f'Check release exceptions update failed. Unable to get URL: {url}')
continue continue
else: else:
set_last_refresh(sickgear.TVInfoAPI(tvid).name) self._set_last_refresh(sickgear.TVInfoAPI(cur_tvid).name)
# each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
for cur_line in url_data.splitlines(): for cur_line in url_data.splitlines():
cur_line = cur_line
prodid, sep, aliases = cur_line.partition(':') prodid, sep, aliases = cur_line.partition(':')
if not aliases: if not aliases:
@ -258,122 +248,15 @@ def retrieve_exceptions():
prodid = int(prodid) prodid = int(prodid)
# regex out the list of shows, taking \' into account # regex out the list of shows, taking \' into account
# alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)] alias_list = [{re.sub(r'\\(.)', r'\1', _x): -1} for _x in
alias_list = [{re.sub(r'\\(.)', r'\1', x): -1} for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)] re.findall(r"'(.*?)(?<!\\)',?", aliases)]
exception_dict[(tvid, prodid)] = alias_list result[(cur_tvid, prodid)] = alias_list
del alias_list del alias_list
del url_data del url_data
# XEM scene exceptions return result
_xem_exceptions_fetcher()
for xem_ex in xem_exception_dict:
if xem_ex in exception_dict:
exception_dict[xem_ex] = exception_dict[xem_ex] + xem_exception_dict[xem_ex]
else:
exception_dict[xem_ex] = xem_exception_dict[xem_ex]
# AniDB scene exceptions def _custom_exceptions_fetcher(self):
_anidb_exceptions_fetcher()
for anidb_ex in anidb_exception_dict:
if anidb_ex in exception_dict:
exception_dict[anidb_ex] = exception_dict[anidb_ex] + anidb_exception_dict[anidb_ex]
else:
exception_dict[anidb_ex] = anidb_exception_dict[anidb_ex]
# Custom exceptions
custom_exception_dict, cnt_updated_numbers, min_remain_iv = _custom_exceptions_fetcher()
for custom_ex in custom_exception_dict:
if custom_ex in exception_dict:
exception_dict[custom_ex] = exception_dict[custom_ex] + custom_exception_dict[custom_ex]
else:
exception_dict[custom_ex] = custom_exception_dict[custom_ex]
changed_exceptions = False
# write all the exceptions we got off the net into the database
my_db = db.DBConnection()
cl = []
for cur_tvid_prodid in exception_dict:
# get a list of the existing exceptions for this ID
existing_exceptions = [{x['show_name']: x['season']} for x in
my_db.select('SELECT show_name, season'
' FROM scene_exceptions'
' WHERE indexer = ? AND indexer_id = ?',
list(cur_tvid_prodid))]
# if this exception isn't already in the DB then add it
for cur_exception_dict in filter(lambda e: e not in existing_exceptions, exception_dict[cur_tvid_prodid]):
try:
cur_exception, cur_season = next(iteritems(cur_exception_dict))
except (BaseException, Exception):
logger.error('scene exception error')
logger.error(traceback.format_exc())
continue
cl.append(['INSERT INTO scene_exceptions'
' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
list(cur_tvid_prodid) + [cur_exception, cur_season]])
changed_exceptions = True
if cl:
my_db.mass_action(cl)
name_cache.build_name_cache(update_only_scene=True)
# since this could invalidate the results of the cache we clear it out after updating
if changed_exceptions:
logger.log('Updated scene exceptions')
else:
logger.log('No scene exceptions update needed')
# cleanup
exception_dict.clear()
anidb_exception_dict.clear()
xem_exception_dict.clear()
return changed_exceptions, cnt_updated_numbers, min_remain_iv
def update_scene_exceptions(tvid, prodid, scene_exceptions):
"""
Given a indexer_id, and a list of all show scene exceptions, update the db.
:param tvid: tvid
:type tvid: int
:param prodid: prodid
:type prodid: int or long
:param scene_exceptions:
:type scene_exceptions: List
"""
global exceptionsCache
my_db = db.DBConnection()
my_db.action('DELETE FROM scene_exceptions'
' WHERE indexer = ? AND indexer_id = ?',
[tvid, prodid])
# A change has been made to the scene exception list. Let's clear the cache, to make this visible
exceptionsCache[(tvid, prodid)] = defaultdict(list)
logger.log('Updating scene exceptions', logger.MESSAGE)
for exception in scene_exceptions:
cur_season, cur_exception = exception.split('|', 1)
try:
cur_season = int(cur_season)
except (BaseException, Exception):
logger.error('invalid scene exception: %s - %s:%s' % ('%s:%s' % (tvid, prodid), cur_season, cur_exception))
continue
exceptionsCache[(tvid, prodid)][cur_season].append(cur_exception)
my_db.action('INSERT INTO scene_exceptions'
' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
[tvid, prodid, cur_exception, cur_season])
sickgear.name_cache.build_name_cache(update_only_scene=True)
def _custom_exceptions_fetcher():
custom_exception_dict = {}
cnt_updated_numbers = 0
src_id = 'GHSG' src_id = 'GHSG'
logger.log(f'Checking to update custom alternatives from {src_id}') logger.log(f'Checking to update custom alternatives from {src_id}')
@ -383,7 +266,7 @@ def _custom_exceptions_fetcher():
file_rar = os.path.join(tmppath, 'alt.rar') file_rar = os.path.join(tmppath, 'alt.rar')
file_cache = os.path.join(dirpath, 'alt.json') file_cache = os.path.join(dirpath, 'alt.json')
iv = 30 * 60 # min interval to fetch updates iv = 30 * 60 # min interval to fetch updates
refresh = should_refresh(src_id, iv) refresh = self._should_refresh(src_id, iv)
fetch_data = not os.path.isfile(file_cache) or (not int(os.environ.get('NO_ALT_GET', 0)) and refresh) fetch_data = not os.path.isfile(file_cache) or (not int(os.environ.get('NO_ALT_GET', 0)) and refresh)
if fetch_data: if fetch_data:
if os.path.exists(tmppath): if os.path.exists(tmppath):
@ -407,42 +290,56 @@ def _custom_exceptions_fetcher():
helpers.remove_file(tmppath, tree=True) helpers.remove_file(tmppath, tree=True)
if refresh: if refresh:
set_last_refresh(src_id) self._set_last_refresh(src_id)
if not fetch_data and not os.path.isfile(file_cache): result = {}
logger.debug(f'Unable to fetch custom exceptions, skipped: {file_rar}') count_updated_numbers = 0
return custom_exception_dict, cnt_updated_numbers, should_refresh(src_id, iv, remaining=True) if fetch_data or os.path.isfile(file_cache):
data = {}
try: try:
with io.open(file_cache) as fh: with io.open(file_cache) as fh:
data = json_load(fh) data = json_load(fh)
result, count_updated_numbers = self._parse_custom_exceptions(data)
except(BaseException, Exception) as e: except(BaseException, Exception) as e:
logger.error(f'Failed to unpack json data: {file_rar} with error: {ex(e)}') logger.error(f'Failed to unpack json data: {file_rar} with error: {ex(e)}')
else:
logger.debug(f'Unable to fetch custom exceptions, skipped: {file_rar}')
return result, count_updated_numbers, self._should_refresh(src_id, iv, remaining=True)
@staticmethod
def _parse_custom_exceptions(data):
# type: (AnyStr) -> tuple
"""
:param data: json text
"""
# handle data # handle data
from .scene_numbering import find_scene_numbering, set_scene_numbering_helper from .scene_numbering import find_scene_numbering, set_scene_numbering_helper
from .tv import TVidProdid from .tv import TVidProdid
for tvid_prodid, season_data in iteritems(data): result = {}
show_obj = sickgear.helpers.find_show_by_id(tvid_prodid, no_mapped_ids=True) count_updated_numbers = 0
for cur_tvid_prodid, cur_season_data in iteritems(data):
show_obj = sickgear.helpers.find_show_by_id(cur_tvid_prodid, no_mapped_ids=True)
if not show_obj: if not show_obj:
continue continue
used = set() used = set()
for for_season, data in iteritems(season_data): for cur_for_season, cur_data in iteritems(cur_season_data):
for_season = helpers.try_int(for_season, None) cur_for_season = helpers.try_int(cur_for_season, None)
tvid, prodid = TVidProdid(tvid_prodid).tuple tvid, prodid = TVidProdid(cur_tvid_prodid).tuple
if data.get('n'): # alt names if cur_data.get('n'): # alt names
custom_exception_dict.setdefault((tvid, prodid), []) result.setdefault((tvid, prodid), [])
custom_exception_dict[(tvid, prodid)] += [{name: for_season} for name in data.get('n')] result[(tvid, prodid)] += [{_name: cur_for_season} for _name in cur_data.get('n')]
for update in data.get('se') or []: for cur_update in cur_data.get('se') or []:
for for_episode, se_range in iteritems(update): # scene episode alt numbers for cur_for_episode, cur_se_range in iteritems(cur_update): # scene episode alt numbers
for_episode = helpers.try_int(for_episode, None) cur_for_episode = helpers.try_int(cur_for_episode, None)
target_season, episode_range = se_range.split('x') target_season, episode_range = cur_se_range.split('x')
scene_episodes = [int(x) for x in episode_range.split('-') if None is not helpers.try_int(x, None)] scene_episodes = [int(_x) for _x in episode_range.split('-')
if None is not helpers.try_int(_x, None)]
if 2 == len(scene_episodes): if 2 == len(scene_episodes):
desc = scene_episodes[0] > scene_episodes[1] desc = scene_episodes[0] > scene_episodes[1]
@ -453,132 +350,223 @@ def _custom_exceptions_fetcher():
scene_episodes.reverse() scene_episodes.reverse()
target_season = helpers.try_int(target_season, None) target_season = helpers.try_int(target_season, None)
for target_episode in scene_episodes: for cur_target_episode in scene_episodes:
sn = find_scene_numbering(tvid, prodid, for_season, for_episode) sn = find_scene_numbering(tvid, prodid, cur_for_season, cur_for_episode)
used.add((for_season, for_episode, target_season, target_episode)) used.add((cur_for_season, cur_for_episode, target_season, cur_target_episode))
if sn and ((for_season, for_episode) + sn) not in used \ if sn and ((cur_for_season, cur_for_episode) + sn) not in used \
and (for_season, for_episode) not in used: and (cur_for_season, cur_for_episode) not in used:
logger.debug(f'Skipped setting "{show_obj.unique_name}" episode {for_season}x{for_episode}' logger.debug(f'Skipped setting "{show_obj.unique_name}"'
f' to target a release {target_season}x{target_episode}' f' episode {cur_for_season}x{cur_for_episode}'
f' to target a release {target_season}x{cur_target_episode}'
f' because set to {sn[0]}x{sn[1]}') f' because set to {sn[0]}x{sn[1]}')
else: else:
used.add((for_season, for_episode)) used.add((cur_for_season, cur_for_episode))
if not sn or sn != (target_season, target_episode): # not already set if not sn or sn != (target_season, cur_target_episode): # not already set
result = set_scene_numbering_helper( result = set_scene_numbering_helper(
tvid, prodid, for_season=for_season, for_episode=for_episode, tvid, prodid, for_season=cur_for_season, for_episode=cur_for_episode,
scene_season=target_season, scene_episode=target_episode) scene_season=target_season, scene_episode=cur_target_episode)
if result.get('success'): if result.get('success'):
cnt_updated_numbers += 1 count_updated_numbers += 1
for_episode = for_episode + 1 cur_for_episode += 1
return custom_exception_dict, cnt_updated_numbers, should_refresh(src_id, iv, remaining=True) return result, count_updated_numbers
@staticmethod
def _anidb_exceptions_fetcher(): def _should_refresh(name, max_refresh_age_secs=86400, remaining=False):
global anidb_exception_dict # type: (AnyStr, int, bool) -> Union[bool, int]
if should_refresh('anidb'):
logger.log('Checking for AniDB scene exception updates')
for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList):
try:
anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True)
except (BaseException, Exception):
continue
if anime.name and anime.name != cur_show_obj.name:
anidb_exception_dict[(cur_show_obj.tvid, cur_show_obj.prodid)] = [{anime.name: -1}]
set_last_refresh('anidb')
return anidb_exception_dict
def _xem_exceptions_fetcher():
global xem_exception_dict
xem_list = 'xem_us'
for cur_show_obj in sickgear.showList:
if cur_show_obj.is_anime and not cur_show_obj.paused:
xem_list = 'xem'
break
if should_refresh(xem_list):
for tvid in [i for i in sickgear.TVInfoAPI().sources if 'xem_origin' in sickgear.TVInfoAPI(i).config]:
logger.log(f'Checking for XEM scene exception updates for {sickgear.TVInfoAPI(tvid).name}')
url = 'https://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\
% (sickgear.TVInfoAPI(tvid).config['xem_origin'], ('&language=us', '')['xem' == xem_list])
parsed_json = helpers.get_url(url, parse_json=True, timeout=90)
if not parsed_json:
logger.error(f'Check scene exceptions update failed for {sickgear.TVInfoAPI(tvid).name},'
f' Unable to get URL: {url}')
continue
if 'failure' == parsed_json['result']:
continue
for prodid, names in iteritems(parsed_json['data']):
try:
xem_exception_dict[(tvid, int(prodid))] = names
except (BaseException, Exception):
continue
set_last_refresh(xem_list)
return xem_exception_dict
def _xem_get_ids(infosrc_name, xem_origin):
""" """
:param infosrc_name: :param name: name
:type infosrc_name: AnyStr :param max_refresh_age_secs:
:param xem_origin: :param remaining: True to return remaining seconds
:type xem_origin: AnyStr
:return: :return:
:rtype: List
""" """
xem_ids = [] my_db = db.DBConnection()
rows = my_db.select('SELECT last_refreshed FROM [scene_exceptions_refresh] WHERE list = ?', [name])
if rows:
last_refresh = int(rows[0]['last_refreshed'])
if remaining:
time_left = (last_refresh + max_refresh_age_secs - SGDatetime.timestamp_near())
return (0, time_left)[time_left > 0]
return SGDatetime.timestamp_near() > last_refresh + max_refresh_age_secs
return True
url = 'https://thexem.info/map/havemap?origin=%s' % xem_origin @staticmethod
def _set_last_refresh(name):
# type: (AnyStr) -> None
"""
task = 'Fetching show ids with%s xem scene mapping%s for origin' :param name: name
logger.log(f'{task % ("", "s")} {infosrc_name}') :type name: AnyStr
parsed_json = helpers.get_url(url, parse_json=True, timeout=90) """
if not isinstance(parsed_json, dict) or not parsed_json: my_db = db.DBConnection()
logger.error(f'Failed {task.lower() % ("", "s")} {infosrc_name},' my_db.upsert('scene_exceptions_refresh',
f' Unable to get URL: {url}') {'last_refreshed': SGDatetime.timestamp_near()},
{'list': name})
@staticmethod
def update_exceptions(show_obj, release_exceptions):
# type: (TVShow, list) -> None
"""
Given a show object and a list of alternative names,
update MEMCACHE['release_map'], the db, and rebuild name_cache.
"""
logger.log(f'Updating release exceptions for {show_obj.unique_name or show_obj.name}')
my_db = db.DBConnection()
my_db.action('DELETE FROM [scene_exceptions]'
' WHERE indexer = ? AND indexer_id = ?',
[show_obj.tvid, show_obj.prodid])
# A change has been made to the scene exception list. Clear the cache, to make this visible
MEMCACHE['release_map'][(show_obj.tvid, show_obj.prodid)] = defaultdict(list)
for cur_ex in release_exceptions:
season, alt_name = cur_ex.split('|', 1)
try:
season = int(season)
except (BaseException, Exception):
logger.error(f'invalid season for release exception: {show_obj.tvid_prodid} - {season}:{alt_name}')
continue
MEMCACHE['release_map'][(show_obj.tvid, show_obj.prodid)][season].append(alt_name)
my_db.action('INSERT INTO [scene_exceptions]'
' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
[show_obj.tvid, show_obj.prodid, alt_name, season])
sickgear.name_cache.build_name_cache(update_only_scene=True)
def has_season_exceptions(self, tvid, prodid, season):
# type: (int, int, int) -> bool
self.get_alt_names(tvid, prodid, season)
return (-1 < season) and season in MEMCACHE['release_map'].get((tvid, prodid), {})
def get_alt_names(self, tvid, prodid, season=-1):
# type: (int, int, Optional[int]) -> List
"""
Return a list and update MEMCACHE['release_map'] of alternative show names from db
for all seasons, or a specific show season.
:param tvid: show tvid
:param prodid: show prodid
:param season: optional season number
"""
alt_names = MEMCACHE['release_map'].get((tvid, prodid), {}).get(season, [])
if not alt_names:
my_db = db.DBConnection()
exceptions = my_db.select('SELECT show_name'
' FROM [scene_exceptions]'
' WHERE indexer = ? AND indexer_id = ?'
' AND season = ?',
[tvid, prodid, season])
if exceptions:
alt_names = list(set([_ex['show_name'] for _ex in exceptions]))
if (tvid, prodid) not in MEMCACHE['release_map']:
MEMCACHE['release_map'][(tvid, prodid)] = {}
MEMCACHE['release_map'][(tvid, prodid)][season] = alt_names
if 1 == season: # if we were looking for season 1 we can add generic names
alt_names += self.get_alt_names(tvid, prodid)
return alt_names
@staticmethod
def get_show_exceptions(tvid_prodid):
# type: (AnyStr) -> OrderedDefaultdict
"""
return a scene exceptions dict for a show
:param tvid_prodid: a show tvid:prodid
"""
exceptions_dict = OrderedDefaultdict(list)
from .tv import TVidProdid
my_db = db.DBConnection()
exceptions = my_db.select('SELECT show_name, season'
' FROM [scene_exceptions]'
' WHERE indexer = ? AND indexer_id = ?'
' ORDER BY season DESC, show_name DESC',
TVidProdid(tvid_prodid).list)
exceptions_seasons = []
if exceptions:
for cur_ex in exceptions:
# order as, s*, and then season desc, show_name also desc (so years in names fall the newest on top)
if -1 == cur_ex['season']:
exceptions_dict[-1].append(cur_ex['show_name'])
else: else:
if 'success' == parsed_json.get('result', '') and 'data' in parsed_json: exceptions_seasons += [cur_ex]
xem_ids = list(set(filter(lambda prodid: 0 < prodid,
map(lambda pid: helpers.try_int(pid), parsed_json['data']))))
if 0 == len(xem_ids):
logger.warning(f'Failed {task.lower() % ("", "s")} {infosrc_name},'
f' no data items parsed from URL: {url}')
logger.log(f'Finished {task.lower() % (f" {len(xem_ids)}", helpers.maybe_plural(xem_ids))} {infosrc_name}') for cur_ex in exceptions_seasons:
return xem_ids exceptions_dict[cur_ex['season']].append(cur_ex['show_name'])
return exceptions_dict
@staticmethod
def get_exception_seasons(tvid, prodid):
# type: (int, int) -> List[int]
"""
return a list of season numbers that have alternative names
:param tvid: show tvid
:param prodid: show prodid
"""
exception_seasons = MEMCACHE['release_map_season'].get((tvid, prodid), [])
if not exception_seasons:
my_db = db.DBConnection()
sql_result = my_db.select('SELECT DISTINCT(season) AS season'
' FROM [scene_exceptions]'
' WHERE indexer = ? AND indexer_id = ?',
[tvid, prodid])
if sql_result:
exception_seasons = list(set([int(_x['season']) for _x in sql_result]))
if (tvid, prodid) not in MEMCACHE['release_map_season']:
MEMCACHE['release_map_season'][(tvid, prodid)] = {}
MEMCACHE['release_map_season'][(tvid, prodid)] = exception_seasons
return exception_seasons
def get_xem_ids(): def get_scene_exception_by_name(show_name):
global xem_ids_list # type: (AnyStr) -> List[None, None, None] or List[int, int, int]
"""
for tvid, name in iteritems(sickgear.TVInfoAPI().xem_supported_sources): :param show_name: show name
xem_ids = _xem_get_ids(name, sickgear.TVInfoAPI(tvid).config['xem_origin']) """
if len(xem_ids): return _get_scene_exception_by_name_multiple(show_name)[0]
xem_ids_list[tvid] = xem_ids
def _get_scene_exception_by_name_multiple(show_name):
# type: (AnyStr) -> List[List[None, None, None] or List[int, int, int]]
"""
:param show_name: show name
:return: (tvid, prodid, season) of the exception, None if no exception is present.
"""
try:
exception_result = name_cache.sceneNameCache[helpers.full_sanitize_scene_name(show_name)]
except (BaseException, Exception):
return [[None, None, None]]
return [exception_result]
def has_abs_episodes(ep_obj=None, name=None): def has_abs_episodes(ep_obj=None, name=None):
# type: (Optional[sickgear.tv.TVEpisode], Optional[AnyStr]) -> bool
""" """
:param ep_obj: episode object :param ep_obj: episode object
:type ep_obj: sickgear.tv.TVEpisode or None
:param name: name :param name: name
:type name: AnyStr
:return:
:rtype: bool
""" """
return any((name or ep_obj.show_obj.name or '').lower().startswith(x.lower()) for x in [ return any((name or ep_obj.show_obj.name or '').lower().startswith(_x.lower()) for _x in [
'The Eighties', 'The Making of the Mob', 'The Night Of', 'Roots 2016', 'Trepalium' 'The Eighties', 'The Making of the Mob', 'The Night Of', 'Roots 2016', 'Trepalium'
]) ])

View file

@ -19,17 +19,14 @@
# @copyright: Dermot Buckley # @copyright: Dermot Buckley
# #
import datetime
import traceback import traceback
from sqlite3 import Row from sqlite3 import Row
from exceptions_helper import ex from exceptions_helper import ex
import sickgear import sickgear
from . import db, logger from . import db, logger
from .helpers import try_int from .helpers import try_int
from .scene_exceptions import xem_ids_list
from .sgdatetime import SGDatetime from .sgdatetime import SGDatetime
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -774,7 +771,8 @@ def xem_refresh(tvid, prodid, force=False):
tvid, prodid = int(tvid), int(prodid) tvid, prodid = int(tvid), int(prodid)
tvinfo = sickgear.TVInfoAPI(tvid) tvinfo = sickgear.TVInfoAPI(tvid)
if 'xem_origin' not in tvinfo.config or prodid not in xem_ids_list.get(tvid, []): if 'xem_origin' not in tvinfo.config \
or prodid not in sickgear.scene_exceptions.MEMCACHE['release_map_xem'].get(tvid, []):
return return
xem_origin = tvinfo.config['xem_origin'] xem_origin = tvinfo.config['xem_origin']

View file

@ -24,10 +24,12 @@ import traceback
from . import logger from . import logger
from exceptions_helper import ex from exceptions_helper import ex
import sickgear
class Scheduler(threading.Thread): class Scheduler(threading.Thread):
def __init__(self, action, cycle_time=datetime.timedelta(minutes=10), run_delay=datetime.timedelta(minutes=0), def __init__(self, action, cycle_time=datetime.timedelta(minutes=10), run_delay=datetime.timedelta(minutes=0),
start_time=None, thread_name="ScheduledThread", silent=True, prevent_cycle_run=None, paused=False): start_time=None, thread_name='ScheduledThread', silent=True, prevent_cycle_run=None, paused=False):
super(Scheduler, self).__init__() super(Scheduler, self).__init__()
self.last_run = datetime.datetime.now() + run_delay - cycle_time self.last_run = datetime.datetime.now() + run_delay - cycle_time
@ -41,10 +43,18 @@ class Scheduler(threading.Thread):
self._stopper = threading.Event() self._stopper = threading.Event()
self._unpause = threading.Event() self._unpause = threading.Event()
if not paused: if not paused:
self._unpause.set() self.unpause()
self.lock = threading.Lock() self.lock = threading.Lock()
self.force = False self.force = False
@property
def is_running_job(self):
# type: (...) -> bool
"""
Return running state of the scheduled action
"""
return self.action.amActive
def pause(self): def pause(self):
self._unpause.clear() self._unpause.clear()
@ -69,7 +79,7 @@ class Scheduler(threading.Thread):
return self.cycle_time - (datetime.datetime.now() - self.last_run) return self.cycle_time - (datetime.datetime.now() - self.last_run)
def force_run(self): def force_run(self):
if not self.action.amActive: if not self.is_running_job:
self.force = True self.force = True
return True return True
return False return False
@ -139,3 +149,72 @@ class Scheduler(threading.Thread):
# exiting thread # exiting thread
self._stopper.clear() self._stopper.clear()
self._unpause.clear() self._unpause.clear()
@staticmethod
def blocking_jobs():
# type (...) -> bool
"""
Return description of running jobs, or False if none are running
These jobs should prevent a restart/shutdown while running.
"""
job_report = []
if sickgear.process_media_scheduler.is_running_job:
job_report.append('Media processing')
if sickgear.update_show_scheduler.is_running_job:
job_report.append(f'{("U", "u")[len(job_report)]}pdating shows data')
# this idea is not ready for production. issues identified...
# 1. many are just the queue filling process, so this doesn't actually prevents restart/shutdown during those
# 2. in case something goes wrong or there is a bad bug in ithe code, the restart would be prevented
# (meaning no auto- / manual update via ui, user is forced to kill the process, manually update and restart)
# 3. just by bad timing the autoupdate process maybe at the same time as another blocking thread = updates but
# never restarts
# therefore, with these issues, the next two lines cannot allow this feature to be brought into service :(
# if len(job_report):
# return '%s %s running' % (' and '.join(job_report), ('are', 'is')[1 == len(job_report)])
return False
class Job(object):
"""
The job class centralises tasks with states
"""
def __init__(self, func, silent=False, thread_lock=False, reentrant_lock=False, args=(), kwargs=None):
self.amActive = False
self._func = func
self._silent = silent
self._args = args
self._kwargs = (kwargs, {})[None is kwargs]
if thread_lock:
self.lock = threading.Lock()
elif reentrant_lock:
self.lock = threading.RLock()
def run(self):
if self.amActive and self.__class__.__name__ in ('BacklogSearcher', 'MediaProcess'):
logger.log(u'%s is still running, not starting it again' % self.__class__.__name__)
return
if self._func:
result, re_raise = None, False
try:
self.amActive = True
result = self._func(*self._args, **self._kwargs)
except(BaseException, Exception) as e:
re_raise = e
finally:
self.amActive = False
not self._silent and logger.log(u'%s(%s) completed' % (self.__class__.__name__, self._func.__name__))
if re_raise:
raise re_raise
return result

View file

@ -17,13 +17,13 @@
from __future__ import with_statement, division from __future__ import with_statement, division
import datetime import datetime
import threading
from math import ceil from math import ceil
import sickgear import sickgear
from . import db, logger, scheduler, search_queue, ui from . import db, logger, scheduler, search_queue, ui
from .helpers import find_show_by_id from .helpers import find_show_by_id
from .providers.generic import GenericProvider from .providers.generic import GenericProvider
from .scheduler import Job
from .search import wanted_episodes from .search import wanted_episodes
from .sgdatetime import SGDatetime from .sgdatetime import SGDatetime
from .tv import TVidProdid, TVEpisode, TVShow from .tv import TVidProdid, TVEpisode, TVShow
@ -74,13 +74,12 @@ class BacklogSearchScheduler(scheduler.Scheduler):
return self.action.nextBacklog - now if self.action.nextBacklog > now else datetime.timedelta(seconds=0) return self.action.nextBacklog - now if self.action.nextBacklog > now else datetime.timedelta(seconds=0)
class BacklogSearcher(object): class BacklogSearcher(Job):
def __init__(self): def __init__(self):
super(BacklogSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
self.last_backlog = self._get_last_backlog() self.last_backlog = self._get_last_backlog()
self.cycle_time = sickgear.BACKLOG_PERIOD self.cycle_time = sickgear.BACKLOG_PERIOD
self.lock = threading.Lock()
self.amActive = False # type: bool
self.amPaused = False # type: bool self.amPaused = False # type: bool
self.amWaiting = False # type: bool self.amWaiting = False # type: bool
self.forcetype = NORMAL_BACKLOG # type: int self.forcetype = NORMAL_BACKLOG # type: int
@ -196,9 +195,6 @@ class BacklogSearcher(object):
:return: nothing :return: nothing
:rtype: None :rtype: None
""" """
if self.amActive and not which_shows:
logger.debug('Backlog is still running, not starting it again')
return
if which_shows: if which_shows:
show_list = which_shows show_list = which_shows
@ -225,7 +221,6 @@ class BacklogSearcher(object):
return return
self._get_last_backlog() self._get_last_backlog()
self.amActive = True
self.amPaused = False self.amPaused = False
cur_date = datetime.date.today().toordinal() cur_date = datetime.date.today().toordinal()
@ -328,7 +323,6 @@ class BacklogSearcher(object):
if standard_backlog and not any_torrent_enabled: if standard_backlog and not any_torrent_enabled:
self._set_last_runtime(now) self._set_last_runtime(now)
self.amActive = False
self._reset_progress_indicator() self._reset_progress_indicator()
@staticmethod @staticmethod
@ -401,7 +395,7 @@ class BacklogSearcher(object):
# noinspection SqlConstantCondition # noinspection SqlConstantCondition
my_db.action('UPDATE info SET last_backlog=%s WHERE 1=1' % when) my_db.action('UPDATE info SET last_backlog=%s WHERE 1=1' % when)
def run(self): def job_run(self):
try: try:
force_type = self.forcetype force_type = self.forcetype
force = self.force force = self.force
@ -409,5 +403,4 @@ class BacklogSearcher(object):
self.force = False self.force = False
self.search_backlog(force_type=force_type, force=force) self.search_backlog(force_type=force_type, force=force)
except (BaseException, Exception): except (BaseException, Exception):
self.amActive = False
raise raise

View file

@ -16,24 +16,21 @@
from __future__ import with_statement from __future__ import with_statement
import threading
import sickgear import sickgear
from .scheduler import Job
class ProperSearcher(object): class ProperSearcher(Job):
def __init__(self): def __init__(self):
self.lock = threading.Lock() super(ProperSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
self.amActive = False
@staticmethod @staticmethod
def is_enabled(): def is_enabled():
# type: (...) -> bool # type: (...) -> bool
return sickgear.DOWNLOAD_PROPERS return sickgear.DOWNLOAD_PROPERS
def run(self): @staticmethod
self.amActive = True def job_run():
propersearch_queue_item = sickgear.search_queue.ProperSearchQueueItem() propersearch_queue_item = sickgear.search_queue.ProperSearchQueueItem()
sickgear.search_queue_scheduler.action.add_item(propersearch_queue_item) sickgear.search_queue_scheduler.action.add_item(propersearch_queue_item)
self.amActive = False

View file

@ -16,19 +16,15 @@
from __future__ import with_statement from __future__ import with_statement
import threading
import sickgear import sickgear
from .scheduler import Job
class RecentSearcher(object): class RecentSearcher(Job):
def __init__(self): def __init__(self):
self.lock = threading.Lock() super(RecentSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
self.amActive = False
def run(self): def job_run(self):
self.amActive = True
recentsearch_queue_item = sickgear.search_queue.RecentSearchQueueItem() recentsearch_queue_item = sickgear.search_queue.RecentSearchQueueItem()
sickgear.search_queue_scheduler.action.add_item(recentsearch_queue_item) sickgear.search_queue_scheduler.action.add_item(recentsearch_queue_item)
self.amActive = False

View file

@ -25,7 +25,7 @@ import sickgear
from . import common, db, logger from . import common, db, logger
from .helpers import sanitize_scene_name from .helpers import sanitize_scene_name
from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from .scene_exceptions import get_scene_exceptions from .scene_exceptions import ReleaseMap
from sg_helpers import scantree from sg_helpers import scantree
from _23 import quote_plus from _23 import quote_plus
@ -384,10 +384,10 @@ def all_possible_show_names(show_obj, season=-1, force_anime=False):
:return: a list of all the possible show names :return: a list of all the possible show names
""" """
show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] show_names = ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid, season)[:]
if not show_names: # if we don't have any season specific exceptions fallback to generic exceptions if -1 != season and not show_names: # fallback to generic exceptions if no season specific exceptions
season = -1 season = -1
show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:] show_names = ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid)[:]
if -1 == season: if -1 == season:
show_names.append(show_obj.name) show_names.append(show_obj.name)

View file

@ -70,7 +70,7 @@ class ShowQueue(generic_queue.GenericQueue):
def check_events(self): def check_events(self):
if self.daily_update_running and \ if self.daily_update_running and \
not (self.is_show_update_running() or sickgear.show_update_scheduler.action.amActive): not (self.is_show_update_running() or sickgear.update_show_scheduler.is_running_job):
self.execute_events(DAILY_SHOW_UPDATE_FINISHED_EVENT) self.execute_events(DAILY_SHOW_UPDATE_FINISHED_EVENT)
self.daily_update_running = False self.daily_update_running = False
@ -1139,7 +1139,7 @@ class QueueItemAdd(ShowQueueItem):
self.show_obj.tvid, self.show_obj.prodid) self.show_obj.tvid, self.show_obj.prodid)
# if "scene" numbering is disabled during add show, output availability to log # if "scene" numbering is disabled during add show, output availability to log
if None is not self.scene and not self.show_obj.scene and \ if None is not self.scene and not self.show_obj.scene and \
self.show_obj.prodid in sickgear.scene_exceptions.xem_ids_list[self.show_obj.tvid]: self.show_obj.prodid in sickgear.scene_exceptions.MEMCACHE['release_map_xem'][self.show_obj.tvid]:
logger.log('No scene number mappings found at TheXEM. Therefore, episode scene numbering disabled, ' logger.log('No scene number mappings found at TheXEM. Therefore, episode scene numbering disabled, '
'edit show and enable it to manually add custom numbers for search and media processing.') 'edit show and enable it to manually add custom numbers for search and media processing.')
try: try:
@ -1179,7 +1179,7 @@ class QueueItemAdd(ShowQueueItem):
# if started with WANTED eps then run the backlog # if started with WANTED eps then run the backlog
if WANTED == self.default_status or items_wanted: if WANTED == self.default_status or items_wanted:
logger.log('Launching backlog for this show since episodes are WANTED') logger.log('Launching backlog for this show since episodes are WANTED')
sickgear.backlog_search_scheduler.action.search_backlog([self.show_obj], prevent_same=True) sickgear.search_backlog_scheduler.action.search_backlog([self.show_obj], prevent_same=True)
ui.notifications.message('Show added/search', 'Adding and searching for episodes of' + msg) ui.notifications.message('Show added/search', 'Adding and searching for episodes of' + msg)
else: else:
ui.notifications.message('Show added', 'Adding' + msg) ui.notifications.message('Show added', 'Adding' + msg)
@ -1253,7 +1253,7 @@ class QueueItemRefresh(ShowQueueItem):
self.show_obj.populate_cache(self.force_image_cache) self.show_obj.populate_cache(self.force_image_cache)
# Load XEM data to DB for show # Load XEM data to DB for show
if self.show_obj.prodid in sickgear.scene_exceptions.xem_ids_list[self.show_obj.tvid]: if self.show_obj.prodid in sickgear.scene_exceptions.MEMCACHE['release_map_xem'][self.show_obj.tvid]:
sickgear.scene_numbering.xem_refresh(self.show_obj.tvid, self.show_obj.prodid) sickgear.scene_numbering.xem_refresh(self.show_obj.tvid, self.show_obj.prodid)
if 'pausestatus_after' in self.kwargs and None is not self.kwargs['pausestatus_after']: if 'pausestatus_after' in self.kwargs and None is not self.kwargs['pausestatus_after']:

View file

@ -23,6 +23,7 @@ from exceptions_helper import ex
import sickgear import sickgear
from . import db, logger, network_timezones, properFinder, ui from . import db, logger, network_timezones, properFinder, ui
from .scheduler import Job
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
@ -54,13 +55,12 @@ def clean_ignore_require_words():
pass pass
class ShowUpdater(object): class ShowUpdater(Job):
def __init__(self): def __init__(self):
self.amActive = False super(ShowUpdater, self).__init__(self.job_run, kwargs={})
def run(self): @staticmethod
def job_run():
self.amActive = True
try: try:
update_datetime = datetime.datetime.now() update_datetime = datetime.datetime.now()
@ -89,14 +89,14 @@ class ShowUpdater(object):
# update xem id lists # update xem id lists
try: try:
sickgear.scene_exceptions.get_xem_ids() sickgear.scene_exceptions.ReleaseMap().fetch_xem_ids()
except (BaseException, Exception): except (BaseException, Exception):
logger.error('xem id list update error') logger.error('xem id list update error')
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
# update scene exceptions # update scene exceptions
try: try:
sickgear.scene_exceptions.retrieve_exceptions() sickgear.scene_exceptions.ReleaseMap().fetch_exceptions()
except (BaseException, Exception): except (BaseException, Exception):
logger.error('scene exceptions update error') logger.error('scene exceptions update error')
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
@ -147,7 +147,7 @@ class ShowUpdater(object):
import threading import threading
try: try:
sickgear.background_mapping_task = threading.Thread( sickgear.background_mapping_task = threading.Thread(
name='MAPPINGSUPDATER', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True}) name='MAPPINGUPDATES', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True})
sickgear.background_mapping_task.start() sickgear.background_mapping_task.start()
except (BaseException, Exception): except (BaseException, Exception):
logger.error('missing mapped ids update error') logger.error('missing mapped ids update error')
@ -224,8 +224,8 @@ class ShowUpdater(object):
logger.log('Added all shows to show queue for full update') logger.log('Added all shows to show queue for full update')
finally: except(BaseException, Exception):
self.amActive = False pass
def __del__(self): def __del__(self):
pass pass

View file

@ -19,6 +19,7 @@ import datetime
from . import db, helpers, logger from . import db, helpers, logger
from .common import * from .common import *
from .scheduler import Job
import sickgear import sickgear
@ -103,24 +104,22 @@ def subtitle_language_filter():
return [language for language in subliminal.language.LANGUAGES if language[2] != ""] return [language for language in subliminal.language.LANGUAGES if language[2] != ""]
class SubtitlesFinder(object): class SubtitlesFinder(Job):
""" """
The SubtitlesFinder will be executed every hour but will not necessarily search The SubtitlesFinder will be executed every hour but will not necessarily search
and download subtitles. Only if the defined rule is true and download subtitles. Only if the defined rule is true
""" """
def __init__(self): def __init__(self):
self.amActive = False super(SubtitlesFinder, self).__init__(self.job_run, kwargs={}, thread_lock=True)
@staticmethod @staticmethod
def is_enabled(): def is_enabled():
return sickgear.USE_SUBTITLES return sickgear.USE_SUBTITLES
def run(self): def job_run(self):
if self.is_enabled(): if self.is_enabled():
self.amActive = True
self._main() self._main()
self.amActive = False
def _main(self): def _main(self):
if 1 > len(sickgear.subtitles.get_enabled_service_list()): if 1 > len(sickgear.subtitles.get_enabled_service_list()):

View file

@ -29,6 +29,7 @@ from exceptions_helper import ex
import sickgear import sickgear
from . import logger, notifiers, ui from . import logger, notifiers, ui
from .scheduler import (Scheduler, Job)
from .piper import check_pip_outdated from .piper import check_pip_outdated
from sg_helpers import cmdline_runner, get_url from sg_helpers import cmdline_runner, get_url
@ -41,12 +42,14 @@ if False:
from typing import Tuple from typing import Tuple
class PackagesUpdater(object): class PackagesUpdater(Job):
def __init__(self): def __init__(self):
super(PackagesUpdater, self).__init__(self.job_run, kwargs={})
self.install_type = 'Python package updates' self.install_type = 'Python package updates'
def run(self, force=False): def job_run(self, force=False):
if not sickgear.EXT_UPDATES \ if not sickgear.EXT_UPDATES \
and self.check_for_new_version(force) \ and self.check_for_new_version(force) \
and sickgear.UPDATE_PACKAGES_AUTO: and sickgear.UPDATE_PACKAGES_AUTO:
@ -64,6 +67,11 @@ class PackagesUpdater(object):
:returns: True when package install/updates are available :returns: True when package install/updates are available
:rtype: bool :rtype: bool
""" """
response = Scheduler.blocking_jobs()
if response:
logger.log(f'Update skipped because {response}', logger.DEBUG)
return False
if force and not sickgear.UPDATE_PACKAGES_MENU: if force and not sickgear.UPDATE_PACKAGES_MENU:
logger.log('Checking not enabled from menu action for %s' % self.install_type) logger.log('Checking not enabled from menu action for %s' % self.install_type)
return False return False
@ -100,12 +108,14 @@ class PackagesUpdater(object):
return True return True
class SoftwareUpdater(object): class SoftwareUpdater(Job):
""" """
Version check class meant to run as a thread object with the sg scheduler. Version check class meant to run as a thread object with the sg scheduler.
""" """
def __init__(self): def __init__(self):
super(SoftwareUpdater, self).__init__(self.job_run, kwargs={})
self._min_python = (100, 0) # set default to absurdly high to prevent update self._min_python = (100, 0) # set default to absurdly high to prevent update
self.install_type = self.find_install_type() self.install_type = self.find_install_type()
@ -150,7 +160,7 @@ class SoftwareUpdater(object):
except (BaseException, Exception): except (BaseException, Exception):
pass pass
def run(self, force=False): def job_run(self, force=False):
# set current branch version # set current branch version
sickgear.BRANCH = self.get_branch() sickgear.BRANCH = self.get_branch()
@ -219,6 +229,11 @@ class SoftwareUpdater(object):
# update branch with current config branch value # update branch with current config branch value
self.updater.branch = sickgear.BRANCH self.updater.branch = sickgear.BRANCH
response = Scheduler.blocking_jobs()
if response:
logger.log(f'Update skipped because {response}', logger.DEBUG)
return False
if not self.is_updatable: if not self.is_updatable:
self._log_cannot_update() self._log_cannot_update()
return False return False

View file

@ -14,17 +14,15 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import threading
import sickgear import sickgear
from .scheduler import Job
from sickgear import watchedstate_queue from sickgear import watchedstate_queue
class WatchedStateUpdater(object): class WatchedStateUpdater(Job):
def __init__(self, name, queue_item): def __init__(self, name, queue_item):
super(WatchedStateUpdater, self).__init__(self.job_run, silent=True, kwargs={}, thread_lock=True)
self.amActive = False
self.lock = threading.Lock()
self.name = name self.name = name
self.queue_item = queue_item self.queue_item = queue_item
@ -32,13 +30,15 @@ class WatchedStateUpdater(object):
def prevent_run(self): def prevent_run(self):
return sickgear.watched_state_queue_scheduler.action.is_in_queue(self.queue_item) return sickgear.watched_state_queue_scheduler.action.is_in_queue(self.queue_item)
def run(self): @staticmethod
def is_enabled():
return True
def job_run(self):
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
if self.is_enabled(): if self.is_enabled():
self.amActive = True
new_item = self.queue_item() new_item = self.queue_item()
sickgear.watched_state_queue_scheduler.action.add_item(new_item) sickgear.watched_state_queue_scheduler.action.add_item(new_item)
self.amActive = False
class EmbyWatchedStateUpdater(WatchedStateUpdater): class EmbyWatchedStateUpdater(WatchedStateUpdater):

View file

@ -48,6 +48,7 @@ from .indexers import indexer_api, indexer_config
from .indexers.indexer_config import * from .indexers.indexer_config import *
from lib.tvinfo_base.exceptions import * from lib.tvinfo_base.exceptions import *
from .scene_numbering import set_scene_numbering_helper from .scene_numbering import set_scene_numbering_helper
from .scheduler import Scheduler
from .search_backlog import FORCED_BACKLOG from .search_backlog import FORCED_BACKLOG
from .show_updater import clean_ignore_require_words from .show_updater import clean_ignore_require_words
from .sgdatetime import SGDatetime from .sgdatetime import SGDatetime
@ -1915,7 +1916,7 @@ class CMD_SickGearPostProcess(ApiCall):
if not self.type: if not self.type:
self.type = 'manual' self.type = 'manual'
data = processTV.processDir(self.path, process_method=self.process_method, force=self.force_replace, data = processTV.process_dir(self.path, process_method=self.process_method, force=self.force_replace,
force_replace=self.is_priority, failed=self.failed, pp_type=self.type, force_replace=self.is_priority, failed=self.failed, pp_type=self.type,
client=self.client) client=self.client)
@ -2074,7 +2075,7 @@ class CMD_SickGearCheckScheduler(ApiCall):
backlogPaused = sickgear.search_queue_scheduler.action.is_backlog_paused() backlogPaused = sickgear.search_queue_scheduler.action.is_backlog_paused()
backlogRunning = sickgear.search_queue_scheduler.action.is_backlog_in_progress() backlogRunning = sickgear.search_queue_scheduler.action.is_backlog_in_progress()
nextBacklog = sickgear.backlog_search_scheduler.next_run().strftime(dateFormat) nextBacklog = sickgear.search_backlog_scheduler.next_run().strftime(dateFormat)
data = {"backlog_is_paused": int(backlogPaused), "backlog_is_running": int(backlogRunning), data = {"backlog_is_paused": int(backlogPaused), "backlog_is_running": int(backlogRunning),
"last_backlog": (0 < len(sql_result) and _ordinal_to_dateForm(sql_result[0]["last_backlog"])) or '', "last_backlog": (0 < len(sql_result) and _ordinal_to_dateForm(sql_result[0]["last_backlog"])) or '',
@ -2177,15 +2178,15 @@ class CMD_SickGearForceSearch(ApiCall):
""" force the specified search type to run """ """ force the specified search type to run """
result = None result = None
if 'recent' == self.searchtype and not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress() \ if 'recent' == self.searchtype and not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress() \
and not sickgear.recent_search_scheduler.action.amActive: and not sickgear.search_recent_scheduler.is_running_job:
result = sickgear.recent_search_scheduler.force_run() result = sickgear.search_recent_scheduler.force_run()
elif 'backlog' == self.searchtype and not sickgear.search_queue_scheduler.action.is_backlog_in_progress() \ elif 'backlog' == self.searchtype and not sickgear.search_queue_scheduler.action.is_backlog_in_progress() \
and not sickgear.backlog_search_scheduler.action.amActive: and not sickgear.search_backlog_scheduler.is_running_job:
sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG) sickgear.search_backlog_scheduler.force_search(force_type=FORCED_BACKLOG)
result = True result = True
elif 'proper' == self.searchtype and not sickgear.search_queue_scheduler.action.is_propersearch_in_progress() \ elif 'proper' == self.searchtype and not sickgear.search_queue_scheduler.action.is_propersearch_in_progress() \
and not sickgear.proper_finder_scheduler.action.amActive: and not sickgear.search_propers_scheduler.is_running_job:
result = sickgear.proper_finder_scheduler.force_run() result = sickgear.search_propers_scheduler.force_run()
if result: if result:
return _responds(RESULT_SUCCESS, msg='%s search successfully forced' % self.searchtype) return _responds(RESULT_SUCCESS, msg='%s search successfully forced' % self.searchtype)
return _responds(RESULT_FAILURE, return _responds(RESULT_FAILURE,
@ -2499,6 +2500,13 @@ class CMD_SickGearRestart(ApiCall):
def run(self): def run(self):
""" restart sickgear """ """ restart sickgear """
response = Scheduler.blocking_jobs()
if response:
msg = f'Restart aborted from API because {response.lower()}'
logger.log(msg, logger.DEBUG)
return _responds(RESULT_FAILURE, msg=msg)
sickgear.restart(soft=False) sickgear.restart(soft=False)
return _responds(RESULT_SUCCESS, msg="SickGear is restarting...") return _responds(RESULT_SUCCESS, msg="SickGear is restarting...")
@ -2817,6 +2825,13 @@ class CMD_SickGearShutdown(ApiCall):
def run(self): def run(self):
""" shutdown sickgear """ """ shutdown sickgear """
response = Scheduler.blocking_jobs()
if response:
msg = f'Shutdown aborted from API because {response.lower()}'
logger.log(msg, logger.DEBUG)
return _responds(RESULT_FAILURE, msg=msg)
sickgear.events.put(sickgear.events.SystemEvent.SHUTDOWN) sickgear.events.put(sickgear.events.SystemEvent.SHUTDOWN)
return _responds(RESULT_SUCCESS, msg="SickGear is shutting down...") return _responds(RESULT_SUCCESS, msg="SickGear is shutting down...")
@ -4668,10 +4683,10 @@ class CMD_SickGearShowsForceUpdate(ApiCall):
def run(self): def run(self):
""" force the daily show update now """ """ force the daily show update now """
if sickgear.show_queue_scheduler.action.is_show_update_running() \ if sickgear.show_queue_scheduler.action.is_show_update_running() \
or sickgear.show_update_scheduler.action.amActive: or sickgear.update_show_scheduler.is_running_job:
return _responds(RESULT_FAILURE, msg="show update already running.") return _responds(RESULT_FAILURE, msg="show update already running.")
result = sickgear.show_update_scheduler.force_run() result = sickgear.update_show_scheduler.force_run()
if result: if result:
return _responds(RESULT_SUCCESS, msg="daily show update started") return _responds(RESULT_SUCCESS, msg="daily show update started")
return _responds(RESULT_FAILURE, msg="can't start show update currently") return _responds(RESULT_FAILURE, msg="can't start show update currently")

View file

@ -65,6 +65,7 @@ from .name_parser.parser import InvalidNameException, InvalidShowException, Name
from .providers import newznab, rsstorrent from .providers import newznab, rsstorrent
from .scene_numbering import get_scene_absolute_numbering_for_show, get_scene_numbering_for_show, \ from .scene_numbering import get_scene_absolute_numbering_for_show, get_scene_numbering_for_show, \
get_xem_absolute_numbering_for_show, get_xem_numbering_for_show, set_scene_numbering_helper get_xem_absolute_numbering_for_show, get_xem_numbering_for_show, set_scene_numbering_helper
from .scheduler import Scheduler
from .search_backlog import FORCED_BACKLOG from .search_backlog import FORCED_BACKLOG
from .sgdatetime import SGDatetime from .sgdatetime import SGDatetime
from .show_name_helpers import abbr_showname from .show_name_helpers import abbr_showname
@ -1327,8 +1328,8 @@ class MainHandler(WebHandler):
now = datetime.datetime.now() now = datetime.datetime.now()
events = [ events = [
('recent', sickgear.recent_search_scheduler.time_left), ('recent', sickgear.search_recent_scheduler.time_left),
('backlog', sickgear.backlog_search_scheduler.next_backlog_timeleft), ('backlog', sickgear.search_backlog_scheduler.next_backlog_timeleft),
] ]
if sickgear.DOWNLOAD_PROPERS: if sickgear.DOWNLOAD_PROPERS:
@ -2070,6 +2071,9 @@ class Home(MainHandler):
if str(pid) != str(sickgear.PID): if str(pid) != str(sickgear.PID):
return self.redirect('/home/') return self.redirect('/home/')
if self.maybe_ignore('Shutdown'):
return
t = PageTemplate(web_handler=self, file='restart.tmpl') t = PageTemplate(web_handler=self, file='restart.tmpl')
t.shutdown = True t.shutdown = True
@ -2082,6 +2086,9 @@ class Home(MainHandler):
if str(pid) != str(sickgear.PID): if str(pid) != str(sickgear.PID):
return self.redirect('/home/') return self.redirect('/home/')
if self.maybe_ignore('Restart'):
return
t = PageTemplate(web_handler=self, file='restart.tmpl') t = PageTemplate(web_handler=self, file='restart.tmpl')
t.shutdown = False t.shutdown = False
@ -2089,6 +2096,17 @@ class Home(MainHandler):
return t.respond() return t.respond()
def maybe_ignore(self, task):
response = Scheduler.blocking_jobs()
if response:
task and logger.log('%s aborted because %s' % (task, response.lower()), logger.DEBUG)
self.redirect(self.request.headers['Referer'])
if task:
ui.notifications.message(u'Fail %s because %s, please try later' % (task.lower(), response.lower()))
return True
return False
def update(self, pid=None): def update(self, pid=None):
if str(pid) != str(sickgear.PID): if str(pid) != str(sickgear.PID):
@ -2326,15 +2344,15 @@ class Home(MainHandler):
t.season_min = ([], [1])[2 < t.latest_season] + [t.latest_season] t.season_min = ([], [1])[2 < t.latest_season] + [t.latest_season]
t.other_seasons = (list(set(all_seasons) - set(t.season_min)), [])[display_show_minimum] t.other_seasons = (list(set(all_seasons) - set(t.season_min)), [])[display_show_minimum]
t.seasons = [] t.seasons = []
for x in all_seasons: for cur_season in all_seasons:
t.seasons += [(x, [None] if x not in (t.season_min + t.other_seasons) else my_db.select( t.seasons += [(cur_season, [None] if cur_season not in (t.season_min + t.other_seasons) else my_db.select(
'SELECT *' 'SELECT *'
' FROM tv_episodes' ' FROM tv_episodes'
' WHERE indexer = ? AND showid = ?' ' WHERE indexer = ? AND showid = ?'
' AND season = ?' ' AND season = ?'
' ORDER BY episode DESC', ' ORDER BY episode DESC',
[show_obj.tvid, show_obj.prodid, x] [show_obj.tvid, show_obj.prodid, cur_season]
), scene_exceptions.has_season_exceptions(show_obj.tvid, show_obj.prodid, x))] ), scene_exceptions.ReleaseMap().has_season_exceptions(show_obj.tvid, show_obj.prodid, cur_season))]
for row in my_db.select('SELECT season, episode, status' for row in my_db.select('SELECT season, episode, status'
' FROM tv_episodes' ' FROM tv_episodes'
@ -2423,7 +2441,7 @@ class Home(MainHandler):
t.clean_show_name = quote_plus(sickgear.indexermapper.clean_show_name(show_obj.name)) t.clean_show_name = quote_plus(sickgear.indexermapper.clean_show_name(show_obj.name))
t.min_initial = Quality.get_quality_ui(min(Quality.split_quality(show_obj.quality)[0])) t.min_initial = Quality.get_quality_ui(min(Quality.split_quality(show_obj.quality)[0]))
t.show_obj.exceptions = scene_exceptions.get_scene_exceptions(show_obj.tvid, show_obj.prodid) t.show_obj.exceptions = scene_exceptions.ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid)
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
t.all_scene_exceptions = show_obj.exceptions # normally Unresolved as not a class attribute, force set above t.all_scene_exceptions = show_obj.exceptions # normally Unresolved as not a class attribute, force set above
t.scene_numbering = get_scene_numbering_for_show(show_obj.tvid, show_obj.prodid) t.scene_numbering = get_scene_numbering_for_show(show_obj.tvid, show_obj.prodid)
@ -2562,7 +2580,7 @@ class Home(MainHandler):
@staticmethod @staticmethod
def scene_exceptions(tvid_prodid, wanted_season=None): def scene_exceptions(tvid_prodid, wanted_season=None):
exceptions_list = sickgear.scene_exceptions.get_all_scene_exceptions(tvid_prodid) exceptions_list = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
wanted_season = helpers.try_int(wanted_season, None) wanted_season = helpers.try_int(wanted_season, None)
wanted_not_found = None is not wanted_season and wanted_season not in exceptions_list wanted_not_found = None is not wanted_season and wanted_season not in exceptions_list
if not exceptions_list or wanted_not_found: if not exceptions_list or wanted_not_found:
@ -2754,7 +2772,7 @@ class Home(MainHandler):
return [err_string] return [err_string]
return self._generic_message('Error', err_string) return self._generic_message('Error', err_string)
show_obj.exceptions = scene_exceptions.get_all_scene_exceptions(tvid_prodid) show_obj.exceptions = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
if None is not quality_preset and int(quality_preset): if None is not quality_preset and int(quality_preset):
best_qualities = [] best_qualities = []
@ -2971,7 +2989,7 @@ class Home(MainHandler):
if do_update_exceptions: if do_update_exceptions:
try: try:
scene_exceptions.update_scene_exceptions(show_obj.tvid, show_obj.prodid, exceptions_list) scene_exceptions.ReleaseMap().update_exceptions(show_obj, exceptions_list)
helpers.cpu_sleep() helpers.cpu_sleep()
except exceptions_helper.CantUpdateException: except exceptions_helper.CantUpdateException:
errors.append('Unable to force an update on scene exceptions of the show.') errors.append('Unable to force an update on scene exceptions of the show.')
@ -3912,7 +3930,7 @@ class HomeProcessMedia(Home):
m = sickgear.NZBGET_MAP.split('=') m = sickgear.NZBGET_MAP.split('=')
dir_name, not_used = helpers.path_mapper(m[0], m[1], dir_name) dir_name, not_used = helpers.path_mapper(m[0], m[1], dir_name)
result = processTV.processDir(dir_name if dir_name else None, result = processTV.process_dir(dir_name if dir_name else None,
None if not nzb_name else decode_str(nzb_name), None if not nzb_name else decode_str(nzb_name),
process_method=process_method, pp_type=process_type, process_method=process_method, pp_type=process_type,
cleanup=cleanup, cleanup=cleanup,
@ -4448,7 +4466,7 @@ class AddShows(Home):
t.blocklist = [] t.blocklist = []
t.groups = [] t.groups = []
t.show_scene_maps = list(itervalues(sickgear.scene_exceptions.xem_ids_list)) t.show_scene_maps = list(itervalues(scene_exceptions.MEMCACHE['release_map_xem']))
has_shows = len(sickgear.showList) has_shows = len(sickgear.showList)
t.try_id = [] # [dict try_tip: try_term] t.try_id = [] # [dict try_tip: try_term]
@ -6616,7 +6634,7 @@ class Manage(MainHandler):
show_obj = helpers.find_show_by_id(tvid_prodid) show_obj = helpers.find_show_by_id(tvid_prodid)
if show_obj: if show_obj:
sickgear.backlog_search_scheduler.action.search_backlog([show_obj]) sickgear.search_backlog_scheduler.action.search_backlog([show_obj])
self.redirect('/manage/backlog-overview/') self.redirect('/manage/backlog-overview/')
@ -7116,11 +7134,11 @@ class ManageSearch(Manage):
def index(self): def index(self):
t = PageTemplate(web_handler=self, file='manage_manageSearches.tmpl') t = PageTemplate(web_handler=self, file='manage_manageSearches.tmpl')
# t.backlog_pi = sickgear.backlog_search_scheduler.action.get_progress_indicator() # t.backlog_pi = sickgear.search_backlog_scheduler.action.get_progress_indicator()
t.backlog_paused = sickgear.search_queue_scheduler.action.is_backlog_paused() t.backlog_paused = sickgear.search_queue_scheduler.action.is_backlog_paused()
t.scheduled_backlog_active_providers = sickgear.search_backlog.BacklogSearcher.providers_active(scheduled=True) t.scheduled_backlog_active_providers = sickgear.search_backlog.BacklogSearcher.providers_active(scheduled=True)
t.backlog_running = sickgear.search_queue_scheduler.action.is_backlog_in_progress() t.backlog_running = sickgear.search_queue_scheduler.action.is_backlog_in_progress()
t.backlog_is_active = sickgear.backlog_search_scheduler.action.am_running() t.backlog_is_active = sickgear.search_backlog_scheduler.action.am_running()
t.standard_backlog_running = sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress() t.standard_backlog_running = sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress()
t.backlog_running_type = sickgear.search_queue_scheduler.action.type_of_backlog_in_progress() t.backlog_running_type = sickgear.search_queue_scheduler.action.type_of_backlog_in_progress()
t.recent_search_status = sickgear.search_queue_scheduler.action.is_recentsearch_in_progress() t.recent_search_status = sickgear.search_queue_scheduler.action.is_recentsearch_in_progress()
@ -7161,7 +7179,7 @@ class ManageSearch(Manage):
def force_backlog(self): def force_backlog(self):
# force it to run the next time it looks # force it to run the next time it looks
if not sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress(): if not sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress():
sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG) sickgear.search_backlog_scheduler.force_search(force_type=FORCED_BACKLOG)
logger.log('Backlog search forced') logger.log('Backlog search forced')
ui.notifications.message('Backlog search started') ui.notifications.message('Backlog search started')
@ -7172,7 +7190,7 @@ class ManageSearch(Manage):
# force it to run the next time it looks # force it to run the next time it looks
if not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress(): if not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress():
result = sickgear.recent_search_scheduler.force_run() result = sickgear.search_recent_scheduler.force_run()
if result: if result:
logger.log('Recent search forced') logger.log('Recent search forced')
ui.notifications.message('Recent search started') ui.notifications.message('Recent search started')
@ -7183,7 +7201,7 @@ class ManageSearch(Manage):
def force_find_propers(self): def force_find_propers(self):
# force it to run the next time it looks # force it to run the next time it looks
result = sickgear.proper_finder_scheduler.force_run() result = sickgear.search_propers_scheduler.force_run()
if result: if result:
logger.log('Find propers search forced') logger.log('Find propers search forced')
ui.notifications.message('Find propers search started') ui.notifications.message('Find propers search started')
@ -7207,10 +7225,10 @@ class ShowTasks(Manage):
t = PageTemplate(web_handler=self, file='manage_showProcesses.tmpl') t = PageTemplate(web_handler=self, file='manage_showProcesses.tmpl')
t.queue_length = sickgear.show_queue_scheduler.action.queue_length() t.queue_length = sickgear.show_queue_scheduler.action.queue_length()
t.people_queue = sickgear.people_queue_scheduler.action.queue_data() t.people_queue = sickgear.people_queue_scheduler.action.queue_data()
t.next_run = sickgear.show_update_scheduler.last_run.replace( t.next_run = sickgear.update_show_scheduler.last_run.replace(
hour=sickgear.show_update_scheduler.start_time.hour) hour=sickgear.update_show_scheduler.start_time.hour)
t.show_update_running = sickgear.show_queue_scheduler.action.is_show_update_running() \ t.show_update_running = sickgear.show_queue_scheduler.action.is_show_update_running() \
or sickgear.show_update_scheduler.action.amActive or sickgear.update_show_scheduler.is_running_job
my_db = db.DBConnection(row_type='dict') my_db = db.DBConnection(row_type='dict')
sql_result = my_db.select('SELECT n.indexer || ? || n.indexer_id AS tvid_prodid,' sql_result = my_db.select('SELECT n.indexer || ? || n.indexer_id AS tvid_prodid,'
@ -7293,7 +7311,7 @@ class ShowTasks(Manage):
def force_show_update(self): def force_show_update(self):
result = sickgear.show_update_scheduler.force_run() result = sickgear.update_show_scheduler.force_run()
if result: if result:
logger.log('Show Update forced') logger.log('Show Update forced')
ui.notifications.message('Forced Show Update started') ui.notifications.message('Forced Show Update started')
@ -7982,7 +8000,7 @@ class ConfigGeneral(Config):
def update_alt(): def update_alt():
""" Load scene exceptions """ """ Load scene exceptions """
changed_exceptions, cnt_updated_numbers, min_remain_iv = scene_exceptions.retrieve_exceptions() changed_exceptions, cnt_updated_numbers, min_remain_iv = scene_exceptions.ReleaseMap().fetch_exceptions()
return json_dumps(dict(names=int(changed_exceptions), numbers=cnt_updated_numbers, min_remain_iv=min_remain_iv)) return json_dumps(dict(names=int(changed_exceptions), numbers=cnt_updated_numbers, min_remain_iv=min_remain_iv))
@ -7991,7 +8009,7 @@ class ConfigGeneral(Config):
""" Return alternative release names and numbering as json text""" """ Return alternative release names and numbering as json text"""
# alternative release names and numbers # alternative release names and numbers
alt_names = scene_exceptions.get_all_scene_exceptions(tvid_prodid) alt_names = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
alt_numbers = get_scene_numbering_for_show(*TVidProdid(tvid_prodid).tuple) # arbitrary order alt_numbers = get_scene_numbering_for_show(*TVidProdid(tvid_prodid).tuple) # arbitrary order
ui_output = 'No alternative names or numbers to export' ui_output = 'No alternative names or numbers to export'
@ -8180,8 +8198,8 @@ class ConfigGeneral(Config):
sickgear.UPDATE_SHOWS_ON_START = config.checkbox_to_value(update_shows_on_start) sickgear.UPDATE_SHOWS_ON_START = config.checkbox_to_value(update_shows_on_start)
sickgear.SHOW_UPDATE_HOUR = config.minimax(show_update_hour, 3, 0, 23) sickgear.SHOW_UPDATE_HOUR = config.minimax(show_update_hour, 3, 0, 23)
try: try:
with sickgear.show_update_scheduler.lock: with sickgear.update_show_scheduler.lock:
sickgear.show_update_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR) sickgear.update_show_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR)
except (BaseException, Exception) as e: except (BaseException, Exception) as e:
logger.error('Could not change Show Update Scheduler time: %s' % ex(e)) logger.error('Could not change Show Update Scheduler time: %s' % ex(e))
sickgear.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show) sickgear.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show)

View file

@ -74,14 +74,15 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
sickgear.showList.append(s) sickgear.showList.append(s)
sickgear.showDict[s.sid_int] = s sickgear.showDict[s.sid_int] = s
sickgear.webserve.Home.make_showlist_unique_names() sickgear.webserve.Home.make_showlist_unique_names()
scene_exceptions.retrieve_exceptions() scene_exceptions.ReleaseMap().fetch_exceptions()
name_cache.build_name_cache() name_cache.build_name_cache()
def test_sceneExceptionsEmpty(self): def test_sceneExceptionsEmpty(self):
self.assertEqual(scene_exceptions.get_scene_exceptions(0, 0), []) self.assertEqual(scene_exceptions.ReleaseMap().get_alt_names(0, 0), [])
def test_sceneExceptionsBlack_Lagoon(self): def test_sceneExceptionsBlack_Lagoon(self):
self.assertEqual(sorted(scene_exceptions.get_scene_exceptions(1, 79604)), ['Black-Lagoon']) self.assertEqual(sorted(
scene_exceptions.ReleaseMap().get_alt_names(1, 79604)), ['Black-Lagoon'])
def test_sceneExceptionByName(self): def test_sceneExceptionByName(self):
self.assertEqual(scene_exceptions.get_scene_exception_by_name( self.assertEqual(scene_exceptions.get_scene_exception_by_name(
@ -98,14 +99,18 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
s.anime = 1 s.anime = 1
sickgear.showList.append(s) sickgear.showList.append(s)
sickgear.showDict[s.sid_int] = s sickgear.showDict[s.sid_int] = s
scene_exceptions.retrieve_exceptions() scene_exceptions.ReleaseMap().fetch_exceptions()
name_cache.build_name_cache() name_cache.build_name_cache()
self.assertEqual(scene_exceptions.get_scene_exception_by_name('ブラック・ラグーン'), [1, 79604, -1]) self.assertEqual(scene_exceptions.get_scene_exception_by_name(
self.assertEqual(scene_exceptions.get_scene_exception_by_name('Burakku Ragūn'), [1, 79604, -1]) 'ブラック・ラグーン'), [1, 79604, -1])
self.assertEqual(scene_exceptions.get_scene_exception_by_name('Rokka no Yuusha'), [1, 295243, -1]) self.assertEqual(scene_exceptions.get_scene_exception_by_name(
'Burakku Ragūn'), [1, 79604, -1])
self.assertEqual(scene_exceptions.get_scene_exception_by_name(
'Rokka no Yuusha'), [1, 295243, -1])
def test_sceneExceptionByNameEmpty(self): def test_sceneExceptionByNameEmpty(self):
self.assertEqual(scene_exceptions.get_scene_exception_by_name('nothing useful'), [None, None, None]) self.assertEqual(scene_exceptions.get_scene_exception_by_name(
'nothing useful'), [None, None, None])
def test_sceneExceptionsResetNameCache(self): def test_sceneExceptionsResetNameCache(self):
# clear the exceptions # clear the exceptions
@ -117,7 +122,7 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
name_cache.add_name_to_cache('Cached Name', prodid=0) name_cache.add_name_to_cache('Cached Name', prodid=0)
# updating should not clear the cache this time since our exceptions didn't change # updating should not clear the cache this time since our exceptions didn't change
scene_exceptions.retrieve_exceptions() scene_exceptions.ReleaseMap().fetch_exceptions()
self.assertEqual(name_cache.retrieve_name_from_cache('Cached Name'), (0, 0)) self.assertEqual(name_cache.retrieve_name_from_cache('Cached Name'), (0, 0))

View file

@ -180,7 +180,7 @@ class WebAPICase(test.SickbeardTestDBCase):
search_queue.SearchQueue(), search_queue.SearchQueue(),
cycle_time=datetime.timedelta(seconds=3), cycle_time=datetime.timedelta(seconds=3),
thread_name='SEARCHQUEUE') thread_name='SEARCHQUEUE')
sickgear.backlog_search_scheduler = search_backlog.BacklogSearchScheduler( sickgear.search_backlog_scheduler = search_backlog.BacklogSearchScheduler(
search_backlog.BacklogSearcher(), search_backlog.BacklogSearcher(),
cycle_time=datetime.timedelta(minutes=60), cycle_time=datetime.timedelta(minutes=60),
run_delay=datetime.timedelta(minutes=60), run_delay=datetime.timedelta(minutes=60),