diff --git a/CHANGES.md b/CHANGES.md
index 0bcb07a3..a1c15055 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -10,6 +10,8 @@
* Update SimpleJSON 3.18.1 (c891b95) to 3.19.1 (aeb63ee)
* Update Tornado Web Server 6.3.0 (7186b86) to 6.3.1 (419838b)
* Update urllib3 1.26.14 (a06c05c) to 1.26.15 (25cca389)
+* Change add jobs to centralise scheduler activities
+* Change refactor scene_exceptions
### 3.28.0 (2023-04-12 13:05:00 UTC)
diff --git a/sickgear.py b/sickgear.py
index 056fcd3b..b664f58f 100755
--- a/sickgear.py
+++ b/sickgear.py
@@ -555,9 +555,9 @@ class SickGear(object):
name_cache.build_name_cache()
# load all ids from xem
- sickgear.classes.loading_msg.message = 'Loading xem data'
- startup_background_tasks = threading.Thread(name='XEMUPDATER', target=sickgear.scene_exceptions.get_xem_ids)
- startup_background_tasks.start()
+# sickgear.classes.loading_msg.message = 'Loading xem data'
+# startup_background_tasks = threading.Thread(name='XEMUPDATER', target=sickgear.scene_exceptions.ReleaseMap().fetch_xem_ids)
+# startup_background_tasks.start()
sickgear.classes.loading_msg.message = 'Checking history'
# check history snatched_proper update
@@ -624,7 +624,7 @@ class SickGear(object):
if not switching and (self.force_update or sickgear.UPDATE_SHOWS_ON_START):
sickgear.classes.loading_msg.message = 'Starting a forced show update'
background_start_forced_show_update = threading.Thread(name='STARTUP-FORCE-SHOW-UPDATE',
- target=sickgear.show_update_scheduler.action.run)
+ target=sickgear.update_show_scheduler.action.run)
background_start_forced_show_update.start()
sickgear.classes.loading_msg.message = 'Switching to default web server'
diff --git a/sickgear/__init__.py b/sickgear/__init__.py
index 305952a2..1accc16d 100644
--- a/sickgear/__init__.py
+++ b/sickgear/__init__.py
@@ -37,7 +37,7 @@ import zlib
from . import classes, db, helpers, image_cache, indexermapper, logger, metadata, naming, people_queue, providers, \
scene_exceptions, scene_numbering, scheduler, search_backlog, search_propers, search_queue, search_recent, \
show_queue, show_updater, subtitles, trakt_helpers, version_checker, watchedstate_queue
-from . import auto_post_processer, properFinder # must come after the above imports
+from . import auto_media_process, properFinder # must come after the above imports
from .common import SD, SKIPPED, USER_AGENT
from .config import check_section, check_setting_int, check_setting_str, ConfigMigrator, minimax
from .databases import cache_db, failed_db, mainDB
@@ -61,7 +61,7 @@ import sg_helpers
# noinspection PyUnreachableCode
if False:
- from typing import AnyStr, Dict, List
+ from typing import AnyStr, Dict, List, Optional
from adba import Connection
from .event_queue import Events
from .tv import TVShow
@@ -88,23 +88,25 @@ DATA_DIR = ''
# noinspection PyTypeChecker
events = None # type: Events
-recent_search_scheduler = None
-backlog_search_scheduler = None
-show_update_scheduler = None
-people_queue_scheduler = None
-update_software_scheduler = None
-update_packages_scheduler = None
-show_queue_scheduler = None
-search_queue_scheduler = None
-proper_finder_scheduler = None
-media_process_scheduler = None
-subtitles_finder_scheduler = None
-# trakt_checker_scheduler = None
-emby_watched_state_scheduler = None
-plex_watched_state_scheduler = None
-watched_state_queue_scheduler = None
+show_queue_scheduler = None # type: Optional[scheduler.Scheduler]
+search_queue_scheduler = None # type: Optional[scheduler.Scheduler]
+people_queue_scheduler = None # type: Optional[scheduler.Scheduler]
+watched_state_queue_scheduler = None # type: Optional[scheduler.Scheduler]
+update_software_scheduler = None # type: Optional[scheduler.Scheduler]
+update_packages_scheduler = None # type: Optional[scheduler.Scheduler]
+update_show_scheduler = None # type: Optional[scheduler.Scheduler]
+update_release_mappings_scheduler = None # type: Optional[scheduler.Scheduler]
+search_recent_scheduler = None # type: Optional[scheduler.Scheduler]
+search_backlog_scheduler = None # type: Optional[search_backlog.BacklogSearchScheduler]
+search_propers_scheduler = None # type: Optional[scheduler.Scheduler]
+search_subtitles_scheduler = None # type: Optional[scheduler.Scheduler]
+emby_watched_state_scheduler = None # type: Optional[scheduler.Scheduler]
+plex_watched_state_scheduler = None # type: Optional[scheduler.Scheduler]
+process_media_scheduler = None # type: Optional[scheduler.Scheduler]
# noinspection PyTypeChecker
background_mapping_task = None # type: threading.Thread
+# deprecated
+# trakt_checker_scheduler = None
provider_ping_thread_pool = {}
@@ -624,9 +626,11 @@ __INITIALIZED__ = False
__INIT_STAGE__ = 0
# don't reassign MEMCACHE var without reassigning sg_helpers.MEMCACHE
+# and scene_exceptions.MEMCACHE
# as long as the pointer is the same (dict only modified) all is fine
MEMCACHE = {}
sg_helpers.MEMCACHE = MEMCACHE
+scene_exceptions.MEMCACHE = MEMCACHE
MEMCACHE_FLAG_IMAGES = {}
@@ -1518,11 +1522,14 @@ def init_stage_2():
global __INITIALIZED__, MEMCACHE, MEMCACHE_FLAG_IMAGES, RECENTSEARCH_STARTUP
# Schedulers
# global trakt_checker_scheduler
- global recent_search_scheduler, backlog_search_scheduler, people_queue_scheduler, show_update_scheduler, \
- update_software_scheduler, update_packages_scheduler, show_queue_scheduler, search_queue_scheduler, \
- proper_finder_scheduler, media_process_scheduler, subtitles_finder_scheduler, \
- background_mapping_task, \
- watched_state_queue_scheduler, emby_watched_state_scheduler, plex_watched_state_scheduler
+ global update_software_scheduler, update_packages_scheduler, \
+ update_show_scheduler, update_release_mappings_scheduler, \
+ search_backlog_scheduler, search_propers_scheduler, \
+ search_recent_scheduler, search_subtitles_scheduler, \
+ search_queue_scheduler, show_queue_scheduler, people_queue_scheduler, \
+ watched_state_queue_scheduler, emby_watched_state_scheduler, plex_watched_state_scheduler, \
+ process_media_scheduler, background_mapping_task
+
# Gen Config/Misc
global SHOW_UPDATE_HOUR, UPDATE_INTERVAL, UPDATE_PACKAGES_INTERVAL
# Search Settings/Episode
@@ -1570,32 +1577,17 @@ def init_stage_2():
metadata_provider_dict[tmp_provider.name] = tmp_provider
# initialize schedulers
- # updaters
- update_now = datetime.timedelta(minutes=0)
- update_software_scheduler = scheduler.Scheduler(
- version_checker.SoftwareUpdater(),
- cycle_time=datetime.timedelta(hours=UPDATE_INTERVAL),
- thread_name='SOFTWAREUPDATER',
- silent=False)
-
- update_packages_scheduler = scheduler.Scheduler(
- version_checker.PackagesUpdater(),
- cycle_time=datetime.timedelta(hours=UPDATE_PACKAGES_INTERVAL),
- # run_delay=datetime.timedelta(minutes=2),
- thread_name='PACKAGESUPDATER',
- silent=False)
-
+ # /
+ # queues must be first
show_queue_scheduler = scheduler.Scheduler(
show_queue.ShowQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='SHOWQUEUE')
- show_update_scheduler = scheduler.Scheduler(
- show_updater.ShowUpdater(),
- cycle_time=datetime.timedelta(hours=1),
- start_time=datetime.time(hour=SHOW_UPDATE_HOUR),
- thread_name='SHOWUPDATER',
- prevent_cycle_run=show_queue_scheduler.action.is_show_update_running) # 3AM
+ search_queue_scheduler = scheduler.Scheduler(
+ search_queue.SearchQueue(),
+ cycle_time=datetime.timedelta(seconds=3),
+ thread_name='SEARCHQUEUE')
people_queue_scheduler = scheduler.Scheduler(
people_queue.PeopleQueue(),
@@ -1603,21 +1595,52 @@ def init_stage_2():
thread_name='PEOPLEQUEUE'
)
- # searchers
- search_queue_scheduler = scheduler.Scheduler(
- search_queue.SearchQueue(),
+ watched_state_queue_scheduler = scheduler.Scheduler(
+ watchedstate_queue.WatchedStateQueue(),
cycle_time=datetime.timedelta(seconds=3),
- thread_name='SEARCHQUEUE')
+ thread_name='WATCHEDSTATEQUEUE')
+ # /
+ # updaters
+ update_software_scheduler = scheduler.Scheduler(
+ version_checker.SoftwareUpdater(),
+ cycle_time=datetime.timedelta(hours=UPDATE_INTERVAL),
+ thread_name='SOFTWAREUPDATE',
+ silent=False)
+
+ update_packages_scheduler = scheduler.Scheduler(
+ version_checker.PackagesUpdater(),
+ cycle_time=datetime.timedelta(hours=UPDATE_PACKAGES_INTERVAL),
+ # run_delay=datetime.timedelta(minutes=2),
+ thread_name='PACKAGESUPDATE',
+ silent=False)
+
+ update_show_scheduler = scheduler.Scheduler(
+ show_updater.ShowUpdater(),
+ cycle_time=datetime.timedelta(hours=1),
+ start_time=datetime.time(hour=SHOW_UPDATE_HOUR),
+ thread_name='SHOWDATAUPDATE',
+ prevent_cycle_run=show_queue_scheduler.action.is_show_update_running) # 3AM
+
+ classes.loading_msg.message = 'Loading show maps'
+ update_release_mappings_scheduler = scheduler.Scheduler(
+ scene_exceptions.ReleaseMap(),
+ cycle_time=datetime.timedelta(hours=2),
+ thread_name='SHOWMAPSUPDATE',
+ silent=False)
+
+ # /
+ # searchers
init_search_delay = int(os.environ.get('INIT_SEARCH_DELAY', 0))
# enter 4499 (was 4489) for experimental internal provider intervals
update_interval = datetime.timedelta(minutes=(RECENTSEARCH_INTERVAL, 1)[4499 == RECENTSEARCH_INTERVAL])
- recent_search_scheduler = scheduler.Scheduler(
+ update_now = datetime.timedelta(minutes=0)
+ search_recent_scheduler = scheduler.Scheduler(
search_recent.RecentSearcher(),
cycle_time=update_interval,
run_delay=update_now if RECENTSEARCH_STARTUP else datetime.timedelta(minutes=init_search_delay or 5),
- thread_name='RECENTSEARCHER',
+ thread_name='RECENTSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_recentsearch_in_progress)
if [x for x in providers.sorted_sources()
@@ -1635,14 +1658,13 @@ def init_stage_2():
backlogdelay = helpers.try_int((time_diff.total_seconds() / 60) + 10, 10)
else:
backlogdelay = 10
- backlog_search_scheduler = search_backlog.BacklogSearchScheduler(
+ search_backlog_scheduler = search_backlog.BacklogSearchScheduler(
search_backlog.BacklogSearcher(),
cycle_time=datetime.timedelta(minutes=get_backlog_cycle_time()),
run_delay=datetime.timedelta(minutes=init_search_delay or backlogdelay),
- thread_name='BACKLOG',
+ thread_name='BACKLOGSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_standard_backlog_in_progress)
- propers_searcher = search_propers.ProperSearcher()
last_proper_search = datetime.datetime.fromtimestamp(properFinder.get_last_proper_search())
time_diff = datetime.timedelta(days=1) - (datetime.datetime.now() - last_proper_search)
if time_diff < datetime.timedelta(seconds=0):
@@ -1650,34 +1672,21 @@ def init_stage_2():
else:
properdelay = helpers.try_int((time_diff.total_seconds() / 60) + 5, 20)
- proper_finder_scheduler = scheduler.Scheduler(
- propers_searcher,
+ search_propers_scheduler = scheduler.Scheduler(
+ search_propers.ProperSearcher(),
cycle_time=datetime.timedelta(days=1),
run_delay=datetime.timedelta(minutes=init_search_delay or properdelay),
- thread_name='FINDPROPERS',
+ thread_name='PROPERSSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_propersearch_in_progress)
- # processors
- media_process_scheduler = scheduler.Scheduler(
- auto_post_processer.PostProcesser(),
- cycle_time=datetime.timedelta(minutes=MEDIAPROCESS_INTERVAL),
- thread_name='POSTPROCESSER',
- silent=not PROCESS_AUTOMATICALLY)
-
- subtitles_finder_scheduler = scheduler.Scheduler(
+ search_subtitles_scheduler = scheduler.Scheduler(
subtitles.SubtitlesFinder(),
cycle_time=datetime.timedelta(hours=SUBTITLES_FINDER_INTERVAL),
- thread_name='FINDSUBTITLES',
+ thread_name='SUBTITLESEARCH',
silent=not USE_SUBTITLES)
- background_mapping_task = threading.Thread(name='MAPPINGSUPDATER', target=indexermapper.load_mapped_ids,
- kwargs={'load_all': True})
-
- watched_state_queue_scheduler = scheduler.Scheduler(
- watchedstate_queue.WatchedStateQueue(),
- cycle_time=datetime.timedelta(seconds=3),
- thread_name='WATCHEDSTATEQUEUE')
-
+ # /
+ # others
emby_watched_state_scheduler = scheduler.Scheduler(
EmbyWatchedStateUpdater(),
cycle_time=datetime.timedelta(minutes=EMBY_WATCHEDSTATE_INTERVAL),
@@ -1690,6 +1699,15 @@ def init_stage_2():
run_delay=datetime.timedelta(minutes=5),
thread_name='PLEXWATCHEDSTATE')
+ process_media_scheduler = scheduler.Scheduler(
+ auto_media_process.MediaProcess(),
+ cycle_time=datetime.timedelta(minutes=MEDIAPROCESS_INTERVAL),
+ thread_name='PROCESSMEDIA',
+ silent=not PROCESS_AUTOMATICALLY)
+
+ background_mapping_task = threading.Thread(name='MAPPINGUPDATES', target=indexermapper.load_mapped_ids,
+ kwargs={'load_all': True})
+
MEMCACHE['history_tab_limit'] = 11
MEMCACHE['history_tab'] = History.menu_tab(MEMCACHE['history_tab_limit'])
@@ -1707,11 +1725,15 @@ def init_stage_2():
def enabled_schedulers(is_init=False):
# ([], [trakt_checker_scheduler])[USE_TRAKT] + \
return ([], [events])[is_init] \
- + ([], [recent_search_scheduler, backlog_search_scheduler, show_update_scheduler, people_queue_scheduler,
- update_software_scheduler, update_packages_scheduler,
- show_queue_scheduler, search_queue_scheduler, proper_finder_scheduler,
- media_process_scheduler, subtitles_finder_scheduler,
- emby_watched_state_scheduler, plex_watched_state_scheduler, watched_state_queue_scheduler]
+ + ([], [update_software_scheduler, update_packages_scheduler,
+ update_show_scheduler, update_release_mappings_scheduler,
+ search_recent_scheduler, search_backlog_scheduler,
+ search_propers_scheduler, search_subtitles_scheduler,
+ show_queue_scheduler, search_queue_scheduler,
+ people_queue_scheduler, watched_state_queue_scheduler,
+ emby_watched_state_scheduler, plex_watched_state_scheduler,
+ process_media_scheduler
+ ]
)[not MEMCACHE.get('update_restart')] \
+ ([events], [])[is_init]
diff --git a/sickgear/auto_post_processer.py b/sickgear/auto_media_process.py
similarity index 73%
rename from sickgear/auto_post_processer.py
rename to sickgear/auto_media_process.py
index 124e8b4a..fd707037 100644
--- a/sickgear/auto_post_processer.py
+++ b/sickgear/auto_media_process.py
@@ -18,32 +18,31 @@ import os.path
import sickgear
from . import logger, processTV
+from .scheduler import Job
-class PostProcesser(object):
+class MediaProcess(Job):
def __init__(self):
- self.amActive = False
+ super(MediaProcess, self).__init__(self.job_run, kwargs={})
@staticmethod
def is_enabled():
return sickgear.PROCESS_AUTOMATICALLY
- def run(self):
+ def job_run(self):
if self.is_enabled():
- self.amActive = True
self._main()
- self.amActive = False
@staticmethod
def _main():
if not os.path.isdir(sickgear.TV_DOWNLOAD_DIR):
- logger.error('Automatic post-processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR)
+ logger.error('Automatic media processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR)
return
if not os.path.isabs(sickgear.TV_DOWNLOAD_DIR):
- logger.error('Automatic post-processing attempted but dir %s is relative '
+ logger.error('Automatic media processing attempted but dir %s is relative '
'(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR)
return
- processTV.processDir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True)
+ processTV.process_dir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True)
diff --git a/sickgear/config.py b/sickgear/config.py
index c7adc21a..92ed1db8 100644
--- a/sickgear/config.py
+++ b/sickgear/config.py
@@ -152,8 +152,8 @@ def schedule_mediaprocess(iv):
if sickgear.MEDIAPROCESS_INTERVAL < sickgear.MIN_MEDIAPROCESS_INTERVAL:
sickgear.MEDIAPROCESS_INTERVAL = sickgear.MIN_MEDIAPROCESS_INTERVAL
- sickgear.media_process_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL)
- sickgear.media_process_scheduler.set_paused_state()
+ sickgear.process_media_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL)
+ sickgear.process_media_scheduler.set_paused_state()
def schedule_recentsearch(iv):
@@ -162,14 +162,14 @@ def schedule_recentsearch(iv):
if sickgear.RECENTSEARCH_INTERVAL < sickgear.MIN_RECENTSEARCH_INTERVAL:
sickgear.RECENTSEARCH_INTERVAL = sickgear.MIN_RECENTSEARCH_INTERVAL
- sickgear.recent_search_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL)
+ sickgear.search_recent_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL)
def schedule_backlog(iv):
sickgear.BACKLOG_PERIOD = minimax(iv, sickgear.DEFAULT_BACKLOG_PERIOD,
sickgear.MIN_BACKLOG_PERIOD, sickgear.MAX_BACKLOG_PERIOD)
- sickgear.backlog_search_scheduler.action.cycle_time = sickgear.BACKLOG_PERIOD
+ sickgear.search_backlog_scheduler.action.cycle_time = sickgear.BACKLOG_PERIOD
def schedule_update_software(iv):
@@ -220,7 +220,7 @@ def schedule_update_packages_notify(update_packages_notify):
def schedule_download_propers(download_propers):
if sickgear.DOWNLOAD_PROPERS != download_propers:
sickgear.DOWNLOAD_PROPERS = download_propers
- sickgear.proper_finder_scheduler.set_paused_state()
+ sickgear.search_propers_scheduler.set_paused_state()
def schedule_trakt(use_trakt):
@@ -233,7 +233,7 @@ def schedule_trakt(use_trakt):
def schedule_subtitles(use_subtitles):
if sickgear.USE_SUBTITLES != use_subtitles:
sickgear.USE_SUBTITLES = use_subtitles
- sickgear.subtitles_finder_scheduler.set_paused_state()
+ sickgear.search_subtitles_scheduler.set_paused_state()
def schedule_emby_watched(emby_watched_interval):
diff --git a/sickgear/event_queue.py b/sickgear/event_queue.py
index 0c894e00..1d028d84 100644
--- a/sickgear/event_queue.py
+++ b/sickgear/event_queue.py
@@ -1,5 +1,4 @@
-from lib.six import moves
-
+import queue
import threading
@@ -15,7 +14,7 @@ class Event(object):
class Events(threading.Thread):
def __init__(self, callback):
super(Events, self).__init__()
- self.queue = moves.queue.Queue()
+ self.queue = queue.Queue()
self.daemon = True
self.callback = callback
self.name = 'EVENT-QUEUE'
@@ -31,24 +30,24 @@ class Events(threading.Thread):
while not self._stopper.is_set():
try:
# get event type
- etype = self.queue.get(True, 1)
- except moves.queue.Empty:
- etype = 'Empty'
+ ev_type = self.queue.get(True, 1)
+ except queue.Empty:
+ ev_type = 'Empty'
except(BaseException, Exception):
- etype = None
- if etype in (self.SystemEvent.RESTART, self.SystemEvent.SHUTDOWN, None, 'Empty'):
- if etype in ('Empty',):
+ ev_type = None
+ if ev_type in (self.SystemEvent.RESTART, self.SystemEvent.SHUTDOWN, None, 'Empty'):
+ if ev_type in ('Empty',):
continue
from sickgear import logger
- logger.debug(f'Callback {self.callback.__name__}(event type:{etype})')
+ logger.debug(f'Callback {self.callback.__name__}(event type:{ev_type})')
try:
# perform callback if we got an event type
- self.callback(etype)
+ self.callback(ev_type)
# event completed
self.queue.task_done()
- except moves.queue.Empty:
+ except queue.Empty:
pass
# exiting thread
diff --git a/sickgear/generic_queue.py b/sickgear/generic_queue.py
index 11c77bbe..7f6b0d4d 100644
--- a/sickgear/generic_queue.py
+++ b/sickgear/generic_queue.py
@@ -19,6 +19,7 @@ import datetime
import threading
from . import db, logger
+from .scheduler import Job
from exceptions_helper import ex
from six import integer_types
@@ -37,9 +38,10 @@ class QueuePriorities(object):
VERYHIGH = 40
-class GenericQueue(object):
+class GenericQueue(Job):
def __init__(self, cache_db_tables=None, main_db_tables=None):
# type: (List[AnyStr], List[AnyStr]) -> None
+ super(GenericQueue, self).__init__(self.job_run, silent=True, kwargs={}, reentrant_lock=True)
self.currentItem = None # type: QueueItem or None
@@ -51,13 +53,41 @@ class GenericQueue(object):
self.events = {} # type: Dict[int, List[Callable]]
- self.lock = threading.RLock()
-
self.cache_db_tables = cache_db_tables or [] # type: List[AnyStr]
self.main_db_tables = main_db_tables or [] # type: List[AnyStr]
self._id_counter = self._load_init_id() # type: integer_types
+ def job_run(self):
+
+ # only start a new task if one isn't already going
+ with self.lock:
+ if None is self.currentItem or not self.currentItem.is_alive():
+
+ # if the thread is dead then the current item should be finished
+ if self.currentItem:
+ self.currentItem.finish()
+ try:
+ self.delete_item(self.currentItem, finished_run=True)
+ except (BaseException, Exception):
+ pass
+ self.currentItem = None
+
+ # if there's something in the queue then run it in a thread and take it out of the queue
+ if 0 < len(self.queue):
+
+ self.queue.sort(key=lambda y: (-y.priority, y.added))
+ if self.queue[0].priority < self.min_priority:
+ return
+
+ # launch the queue item in a thread
+ self.currentItem = self.queue.pop(0)
+ if 'SEARCHQUEUE' != self.queue_name:
+ self.currentItem.name = self.queue_name + '-' + self.currentItem.name
+ self.currentItem.start()
+
+ self.check_events()
+
def _load_init_id(self):
# type: (...) -> integer_types
"""
@@ -216,7 +246,7 @@ class GenericQueue(object):
self.min_priority = 999999999999
def unpause(self):
- logger.log('Unpausing queue')
+ logger.log('Un-pausing queue')
with self.lock:
self.min_priority = 0
@@ -269,36 +299,6 @@ class GenericQueue(object):
except (BaseException, Exception) as e:
logger.error('Error executing Event: %s' % ex(e))
- def run(self):
-
- # only start a new task if one isn't already going
- with self.lock:
- if None is self.currentItem or not self.currentItem.is_alive():
-
- # if the thread is dead then the current item should be finished
- if self.currentItem:
- self.currentItem.finish()
- try:
- self.delete_item(self.currentItem, finished_run=True)
- except (BaseException, Exception):
- pass
- self.currentItem = None
-
- # if there's something in the queue then run it in a thread and take it out of the queue
- if 0 < len(self.queue):
-
- self.queue.sort(key=lambda y: (-y.priority, y.added))
- if self.queue[0].priority < self.min_priority:
- return
-
- # launch the queue item in a thread
- self.currentItem = self.queue.pop(0)
- if 'SEARCHQUEUE' != self.queue_name:
- self.currentItem.name = self.queue_name + '-' + self.currentItem.name
- self.currentItem.start()
-
- self.check_events()
-
class QueueItem(threading.Thread):
def __init__(self, name, action_id=0, uid=None):
diff --git a/sickgear/name_cache.py b/sickgear/name_cache.py
index c7225a27..d2ebedad 100644
--- a/sickgear/name_cache.py
+++ b/sickgear/name_cache.py
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see .
+from collections import defaultdict
import threading
import sickgear
@@ -21,6 +22,7 @@ from . import db
from .helpers import full_sanitize_scene_name, try_int
from six import iteritems
+from _23 import map_consume
# noinspection PyUnreachableCode
if False:
@@ -85,18 +87,19 @@ def build_name_cache(show_obj=None, update_only_scene=False):
if show_obj:
# search for only the requested show id and flush old show entries from namecache
show_ids = {show_obj.tvid: [show_obj.prodid]}
+
nameCache = dict([(k, v) for k, v in iteritems(nameCache)
if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)])
sceneNameCache = dict([(k, v) for k, v in iteritems(sceneNameCache)
if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)])
# add standard indexer name to namecache
- nameCache[full_sanitize_scene_name(show_obj.unique_name or show_obj.name)] = [show_obj.tvid, show_obj.prodid, -1]
+ nameCache[full_sanitize_scene_name(show_obj.unique_name or show_obj.name)] = \
+ [show_obj.tvid, show_obj.prodid, -1]
else:
# generate list of production ids to look up in cache.db
- show_ids = {}
- for cur_show_obj in sickgear.showList:
- show_ids.setdefault(cur_show_obj.tvid, []).append(cur_show_obj.prodid)
+ show_ids = defaultdict(list)
+ map_consume(lambda _so: show_ids[_so.tvid].append(_so.prodid), sickgear.showList)
# add all standard show indexer names to namecache
nameCache = dict(
@@ -104,33 +107,32 @@ def build_name_cache(show_obj=None, update_only_scene=False):
for cur_so in sickgear.showList if cur_so])
sceneNameCache = {}
- cache_db = db.DBConnection()
-
- cache_results = []
- if update_only_scene:
- # generate list of production ids to look up in cache.db
- show_ids = {}
- for cur_show_obj in sickgear.showList:
- show_ids.setdefault(cur_show_obj.tvid, []).append(cur_show_obj.prodid)
- tmp_scene_name_cache = {}
- else:
tmp_scene_name_cache = sceneNameCache.copy()
- for t, s in iteritems(show_ids):
+ else:
+ # generate list of production ids to look up in cache.db
+ show_ids = defaultdict(list)
+ map_consume(lambda _so: show_ids[_so.tvid].append(_so.prodid), sickgear.showList)
+
+ tmp_scene_name_cache = {}
+
+ cache_results = []
+ cache_db = db.DBConnection()
+ for cur_tvid, cur_prodid_list in iteritems(show_ids):
cache_results += cache_db.select(
- 'SELECT show_name, indexer AS tv_id, indexer_id AS prod_id, season'
- ' FROM scene_exceptions'
- ' WHERE indexer = %s AND indexer_id IN (%s)' % (t, ','.join(['%s' % i for i in s])))
+ f'SELECT show_name, indexer AS tv_id, indexer_id AS prod_id, season'
+ f' FROM scene_exceptions'
+ f' WHERE indexer = {cur_tvid} AND indexer_id IN ({",".join(map(str, cur_prodid_list))})')
if cache_results:
- for cache_result in cache_results:
- tvid = int(cache_result['tv_id'])
- prodid = int(cache_result['prod_id'])
- season = try_int(cache_result['season'], -1)
- name = full_sanitize_scene_name(cache_result['show_name'])
+ for cur_result in cache_results:
+ tvid = int(cur_result['tv_id'])
+ prodid = int(cur_result['prod_id'])
+ season = try_int(cur_result['season'], -1)
+ name = full_sanitize_scene_name(cur_result['show_name'])
tmp_scene_name_cache[name] = [tvid, prodid, season]
- sceneNameCache = tmp_scene_name_cache
+ sceneNameCache = tmp_scene_name_cache.copy()
def remove_from_namecache(tvid, prodid):
diff --git a/sickgear/name_parser/parser.py b/sickgear/name_parser/parser.py
index 0132056c..9c0bd936 100644
--- a/sickgear/name_parser/parser.py
+++ b/sickgear/name_parser/parser.py
@@ -407,7 +407,8 @@ class NameParser(object):
new_season_numbers.append(s)
elif show_obj.is_anime and len(best_result.ab_episode_numbers) and not self.testing:
- scene_season = scene_exceptions.get_scene_exception_by_name(best_result.series_name)[2]
+ scene_season = scene_exceptions.get_scene_exception_by_name(
+ best_result.series_name)[2]
for epAbsNo in best_result.ab_episode_numbers:
a = epAbsNo
diff --git a/sickgear/processTV.py b/sickgear/processTV.py
index 16326af2..08c53072 100644
--- a/sickgear/processTV.py
+++ b/sickgear/processTV.py
@@ -1141,10 +1141,9 @@ class ProcessTVShow(object):
self._buffer(processor.log.strip('\n'))
-# backward compatibility prevents the case of this function name from being updated to PEP8
-def processDir(dir_name, nzb_name=None, process_method=None, force=False, force_replace=None,
- failed=False, pp_type='auto', cleanup=False, webhandler=None, show_obj=None, is_basedir=True,
- skip_failure_processing=False, client=None):
+def process_dir(dir_name, nzb_name=None, process_method=None, force=False, force_replace=None,
+ failed=False, pp_type='auto', cleanup=False, webhandler=None, show_obj=None, is_basedir=True,
+ skip_failure_processing=False, client=None):
"""
:param dir_name: dir name
@@ -1182,6 +1181,10 @@ def processDir(dir_name, nzb_name=None, process_method=None, force=False, force_
pp_type, cleanup, show_obj)
+# backward compatibility
+processDir = process_dir
+
+
def process_minimal(nzb_name, show_obj, failed, webhandler):
# type: (AnyStr, TVShow, bool, Any) -> None
ProcessTVShow(webhandler).process_minimal(nzb_name, show_obj, failed, webhandler)
diff --git a/sickgear/properFinder.py b/sickgear/properFinder.py
index 1397e06a..1a7a6339 100644
--- a/sickgear/properFinder.py
+++ b/sickgear/properFinder.py
@@ -71,7 +71,7 @@ def search_propers(provider_proper_obj=None):
if None is provider_proper_obj:
_set_last_proper_search(datetime.datetime.now())
- proper_sch = sickgear.proper_finder_scheduler
+ proper_sch = sickgear.search_propers_scheduler
if None is proper_sch.start_time:
run_in = proper_sch.last_run + proper_sch.cycle_time - datetime.datetime.now()
run_at = ', next check '
@@ -696,7 +696,7 @@ def _set_last_proper_search(when):
def next_proper_timeleft():
- return sickgear.proper_finder_scheduler.time_left()
+ return sickgear.search_propers_scheduler.time_left()
def get_last_proper_search():
diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py
index dab1fc5c..e5ccb797 100644
--- a/sickgear/providers/generic.py
+++ b/sickgear/providers/generic.py
@@ -37,7 +37,7 @@ from ..classes import NZBSearchResult, TorrentSearchResult, SearchResult
from ..common import Quality, MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
from ..helpers import maybe_plural, remove_file_perm
from ..name_parser.parser import InvalidNameException, InvalidShowException, NameParser
-from ..scene_exceptions import has_season_exceptions
+from ..scene_exceptions import ReleaseMap
from ..show_name_helpers import get_show_names_all_possible
from ..sgdatetime import SGDatetime
from ..tv import TVEpisode, TVShow
@@ -1743,7 +1743,8 @@ class TorrentProvider(GenericProvider):
return []
show_obj = ep_obj.show_obj
- season = (-1, ep_obj.season)[has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
+ season = (-1, ep_obj.season)[ReleaseMap().has_season_exceptions(
+ ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
ep_dict = self._ep_dict(ep_obj)
sp_detail = (show_obj.air_by_date or show_obj.is_sports) and str(ep_obj.airdate).split('-')[0] or \
(show_obj.is_anime and ep_obj.scene_absolute_number or
@@ -1779,7 +1780,8 @@ class TorrentProvider(GenericProvider):
return []
show_obj = ep_obj.show_obj
- season = (-1, ep_obj.season)[has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
+ season = (-1, ep_obj.season)[ReleaseMap().has_season_exceptions(
+ ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
if show_obj.air_by_date or show_obj.is_sports:
ep_detail = [str(ep_obj.airdate).replace('-', sep_date)]\
if 'date_detail' not in kwargs else kwargs['date_detail'](ep_obj.airdate)
diff --git a/sickgear/providers/newznab.py b/sickgear/providers/newznab.py
index b9cac5e2..244b92e2 100644
--- a/sickgear/providers/newznab.py
+++ b/sickgear/providers/newznab.py
@@ -34,7 +34,7 @@ from ..network_timezones import SG_TIMEZONE
from ..sgdatetime import SGDatetime
from ..search import get_aired_in_season, get_wanted_qualities
from ..show_name_helpers import get_show_names
-from ..scene_exceptions import has_season_exceptions
+from ..scene_exceptions import ReleaseMap
from ..tv import TVEpisode, TVShow
from lib.dateutil import parser
@@ -470,7 +470,7 @@ class NewznabProvider(generic.NZBProvider):
# id search
params = base_params.copy()
use_id = False
- if not has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
+ if not ReleaseMap().has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
for i in sickgear.TVInfoAPI().all_sources:
if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps:
params[self.caps[i]] = ep_obj.show_obj.ids[i]['id']
@@ -528,7 +528,7 @@ class NewznabProvider(generic.NZBProvider):
# id search
params = base_params.copy()
use_id = False
- if not has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
+ if not ReleaseMap().has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
for i in sickgear.TVInfoAPI().all_sources:
if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps:
params[self.caps[i]] = ep_obj.show_obj.ids[i]['id']
diff --git a/sickgear/scene_exceptions.py b/sickgear/scene_exceptions.py
index 148285cc..f89abe76 100644
--- a/sickgear/scene_exceptions.py
+++ b/sickgear/scene_exceptions.py
@@ -16,12 +16,10 @@
from collections import defaultdict
-import datetime
import io
import os
import re
import sys
-import threading
import traceback
import sickgear
@@ -31,6 +29,7 @@ from . import db, helpers, logger, name_cache
from .anime import create_anidb_obj
from .classes import OrderedDefaultdict
from .indexers.indexer_config import TVINFO_TVDB
+from .scheduler import Job
from .sgdatetime import SGDatetime
import lib.rarfile.rarfile as rarfile
@@ -41,544 +40,533 @@ from six import iteritems
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
- from typing import AnyStr, List, Tuple, Union
+ from typing import AnyStr, List, Tuple, Optional, Union
+ from .tv import TVShow
-exception_dict = {}
-anidb_exception_dict = {}
-xem_exception_dict = {}
-xem_ids_list = defaultdict(list)
-
-exceptionsCache = {}
-exceptionsSeasonCache = {}
-
-exceptionLock = threading.Lock()
+MEMCACHE = {}
-def should_refresh(name, max_refresh_age_secs=86400, remaining=False):
- # type: (AnyStr, int, bool) -> Union[bool, int]
- """
+class ReleaseMap(Job):
+ def __init__(self):
+ super(ReleaseMap, self).__init__(self.job_run, thread_lock=True, kwargs={})
- :param name: name
- :param max_refresh_age_secs:
- :param remaining: True to return remaining seconds
- :return:
- """
- my_db = db.DBConnection()
- rows = my_db.select('SELECT last_refreshed FROM scene_exceptions_refresh WHERE list = ?', [name])
- if rows:
- last_refresh = int(rows[0]['last_refreshed'])
- if remaining:
- time_left = (last_refresh + max_refresh_age_secs - SGDatetime.timestamp_near())
- return (0, time_left)[time_left > 0]
- return SGDatetime.timestamp_near() > last_refresh + max_refresh_age_secs
- return True
+ MEMCACHE.setdefault('release_map', {})
+ MEMCACHE.setdefault('release_map_season', {})
+ MEMCACHE.setdefault('release_map_xem', defaultdict(list))
+ def job_run(self):
-def set_last_refresh(name):
- """
+ # update xem id lists
+ self.fetch_xem_ids()
- :param name: name
- :type name: AnyStr
- """
- my_db = db.DBConnection()
- my_db.upsert('scene_exceptions_refresh',
- {'last_refreshed': SGDatetime.timestamp_near()},
- {'list': name})
+ # update release exceptions
+ self.fetch_exceptions()
+ def fetch_xem_ids(self):
-def has_season_exceptions(tvid, prodid, season):
- get_scene_exceptions(tvid, prodid, season=season)
- if (tvid, prodid) in exceptionsCache and -1 < season and season in exceptionsCache[(tvid, prodid)]:
+ for cur_tvid, cur_name in iteritems(sickgear.TVInfoAPI().xem_supported_sources):
+ xem_ids = self._get_xem_ids(cur_name, sickgear.TVInfoAPI(cur_tvid).config['xem_origin'])
+ if len(xem_ids):
+ MEMCACHE['release_map_xem'][cur_tvid] = xem_ids
+
+ @staticmethod
+ def _get_xem_ids(infosrc_name, xem_origin):
+ # type: (AnyStr, AnyStr) -> List
+ """
+
+ :param infosrc_name:
+ :param xem_origin:
+ """
+ result = []
+
+ url = 'https://thexem.info/map/havemap?origin=%s' % xem_origin
+
+ task = 'Fetching show ids with%s xem scene mapping%s for origin'
+ logger.log(f'{task % ("", "s")} {infosrc_name}')
+ parsed_json = helpers.get_url(url, parse_json=True, timeout=90)
+ if not isinstance(parsed_json, dict) or not parsed_json:
+ logger.error(f'Failed {task.lower() % ("", "s")} {infosrc_name},'
+ f' Unable to get URL: {url}')
+ else:
+ if 'success' == parsed_json.get('result', '') and 'data' in parsed_json:
+ result = list(set(filter(lambda prodid: 0 < prodid,
+ map(lambda pid: helpers.try_int(pid), parsed_json['data']))))
+ if 0 == len(result):
+ logger.warning(f'Failed {task.lower() % ("", "s")} {infosrc_name},'
+ f' no data items parsed from URL: {url}')
+
+ logger.log(f'Finished {task.lower() % (f" {len(result)}", helpers.maybe_plural(result))} {infosrc_name}')
+ return result
+
+ def _xem_exceptions_fetcher(self):
+
+ result = {}
+
+ xem_list = 'xem_us'
+ for cur_show_obj in sickgear.showList:
+ if cur_show_obj.is_anime and not cur_show_obj.paused:
+ xem_list = 'xem'
+ break
+
+ if self._should_refresh(xem_list):
+ for cur_tvid in [_i for _i in sickgear.TVInfoAPI().sources
+ if 'xem_origin' in sickgear.TVInfoAPI(_i).config]:
+ logger.log(f'Checking for XEM scene exception updates for {sickgear.TVInfoAPI(cur_tvid).name}')
+
+ url = 'https://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\
+ % (sickgear.TVInfoAPI(cur_tvid).config['xem_origin'], ('&language=us', '')['xem' == xem_list])
+
+ parsed_json = helpers.get_url(url, parse_json=True, timeout=90)
+ if not parsed_json:
+ logger.error(f'Check scene exceptions update failed for {sickgear.TVInfoAPI(cur_tvid).name},'
+ f' Unable to get URL: {url}')
+ continue
+
+ if 'failure' == parsed_json['result']:
+ continue
+
+ for cur_prodid, cur_names in iteritems(parsed_json['data']):
+ try:
+ result[(cur_tvid, int(cur_prodid))] = cur_names
+ except (BaseException, Exception):
+ continue
+
+ self._set_last_refresh(xem_list)
+
+ return result
+
+ def _anidb_exceptions_fetcher(self):
+
+ result = {}
+
+ if self._should_refresh('anidb'):
+ logger.log('Checking for AniDB scene exception updates')
+ for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList):
+ try:
+ anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True)
+ except (BaseException, Exception):
+ continue
+ if anime.name and anime.name != cur_show_obj.name:
+ result[(cur_show_obj.tvid, cur_show_obj.prodid)] = [{anime.name: -1}]
+
+ self._set_last_refresh('anidb')
+
+ return result
+
+ def fetch_exceptions(self):
+ """
+ Looks up release exceptions on GitHub, Xem, and Anidb, parses them into a dict, and inserts them into the
+ scene_exceptions table in cache.db. Finally, clears the scene name cache.
+ """
+ def _merge_exceptions(source, dest):
+ for cur_ex in source:
+ dest[cur_ex] = source[cur_ex] + ([] if cur_ex not in dest else dest[cur_ex])
+
+ exceptions = self._xem_exceptions_fetcher() # XEM scene exceptions
+ _merge_exceptions(self._anidb_exceptions_fetcher(), exceptions) # AniDB scene exceptions
+ _merge_exceptions(self._github_exceptions_fetcher(), exceptions) # GitHub stored release exceptions
+
+ exceptions_custom, count_updated_numbers, min_remain_iv = self._custom_exceptions_fetcher()
+ _merge_exceptions(exceptions_custom, exceptions) # Custom exceptions
+
+ is_changed_exceptions = False
+
+ # write all the exceptions we got off the net into the database
+ my_db = db.DBConnection()
+ cl = []
+ for cur_tvid_prodid in exceptions:
+
+ # get a list of the existing exceptions for this ID
+ existing_exceptions = [{_x['show_name']: _x['season']} for _x in
+ my_db.select('SELECT show_name, season'
+ ' FROM [scene_exceptions]'
+ ' WHERE indexer = ? AND indexer_id = ?',
+ list(cur_tvid_prodid))]
+
+ # if this exception isn't already in the DB then add it
+ for cur_ex_dict in filter(lambda e: e not in existing_exceptions, exceptions[cur_tvid_prodid]):
+ try:
+ exception, season = next(iteritems(cur_ex_dict))
+ except (BaseException, Exception):
+ logger.error('release exception error')
+ logger.error(traceback.format_exc())
+ continue
+
+ cl.append(['INSERT INTO [scene_exceptions]'
+ ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
+ list(cur_tvid_prodid) + [exception, season]])
+ is_changed_exceptions = True
+
+ if cl:
+ my_db.mass_action(cl)
+ name_cache.build_name_cache(update_only_scene=True)
+
+ # since this could invalidate the results of the cache we clear it out after updating
+ if is_changed_exceptions:
+ logger.log('Updated release exceptions')
+ else:
+ logger.log('No release exceptions update needed')
+
+ # cleanup
+ exceptions.clear()
+
+ return is_changed_exceptions, count_updated_numbers, min_remain_iv
+
+ def _github_exceptions_fetcher(self):
+ """
+ Looks up the exceptions on GitHub
+ """
+
+ # global exception_dict
+ result = {}
+
+ # exceptions are stored on GitHub pages
+ for cur_tvid in sickgear.TVInfoAPI().sources:
+ if self._should_refresh(sickgear.TVInfoAPI(cur_tvid).name):
+ url = sickgear.TVInfoAPI(cur_tvid).config.get('scene_url')
+ if not url:
+ continue
+
+ logger.log(f'Checking for release exception updates for {sickgear.TVInfoAPI(cur_tvid).name}')
+
+ url_data = helpers.get_url(url)
+ if None is url_data:
+ # When None is urlData, trouble connecting to GitHub
+ logger.error(f'Check release exceptions update failed. Unable to get URL: {url}')
+ continue
+
+ else:
+ self._set_last_refresh(sickgear.TVInfoAPI(cur_tvid).name)
+
+ # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
+ for cur_line in url_data.splitlines():
+ prodid, sep, aliases = cur_line.partition(':')
+
+ if not aliases:
+ continue
+
+ prodid = int(prodid)
+
+ # regex out the list of shows, taking \' into account
+ alias_list = [{re.sub(r'\\(.)', r'\1', _x): -1} for _x in
+ re.findall(r"'(.*?)(? tuple
+ """
+
+ :param data: json text
+ """
+ # handle data
+ from .scene_numbering import find_scene_numbering, set_scene_numbering_helper
+ from .tv import TVidProdid
+
+ result = {}
+ count_updated_numbers = 0
+ for cur_tvid_prodid, cur_season_data in iteritems(data):
+ show_obj = sickgear.helpers.find_show_by_id(cur_tvid_prodid, no_mapped_ids=True)
+ if not show_obj:
+ continue
+
+ used = set()
+ for cur_for_season, cur_data in iteritems(cur_season_data):
+ cur_for_season = helpers.try_int(cur_for_season, None)
+ tvid, prodid = TVidProdid(cur_tvid_prodid).tuple
+ if cur_data.get('n'): # alt names
+ result.setdefault((tvid, prodid), [])
+ result[(tvid, prodid)] += [{_name: cur_for_season} for _name in cur_data.get('n')]
+
+ for cur_update in cur_data.get('se') or []:
+ for cur_for_episode, cur_se_range in iteritems(cur_update): # scene episode alt numbers
+ cur_for_episode = helpers.try_int(cur_for_episode, None)
+
+ target_season, episode_range = cur_se_range.split('x')
+ scene_episodes = [int(_x) for _x in episode_range.split('-')
+ if None is not helpers.try_int(_x, None)]
+
+ if 2 == len(scene_episodes):
+ desc = scene_episodes[0] > scene_episodes[1]
+ if desc: # handle a descending range case
+ scene_episodes.reverse()
+ scene_episodes = list_range(*[scene_episodes[0], scene_episodes[1] + 1])
+ if desc:
+ scene_episodes.reverse()
+
+ target_season = helpers.try_int(target_season, None)
+ for cur_target_episode in scene_episodes:
+ sn = find_scene_numbering(tvid, prodid, cur_for_season, cur_for_episode)
+ used.add((cur_for_season, cur_for_episode, target_season, cur_target_episode))
+ if sn and ((cur_for_season, cur_for_episode) + sn) not in used \
+ and (cur_for_season, cur_for_episode) not in used:
+ logger.debug(f'Skipped setting "{show_obj.unique_name}"'
+ f' episode {cur_for_season}x{cur_for_episode}'
+ f' to target a release {target_season}x{cur_target_episode}'
+ f' because set to {sn[0]}x{sn[1]}')
+ else:
+ used.add((cur_for_season, cur_for_episode))
+ if not sn or sn != (target_season, cur_target_episode): # not already set
+ result = set_scene_numbering_helper(
+ tvid, prodid, for_season=cur_for_season, for_episode=cur_for_episode,
+ scene_season=target_season, scene_episode=cur_target_episode)
+ if result.get('success'):
+ count_updated_numbers += 1
+
+ cur_for_episode += 1
+
+ return result, count_updated_numbers
+
+ @staticmethod
+ def _should_refresh(name, max_refresh_age_secs=86400, remaining=False):
+ # type: (AnyStr, int, bool) -> Union[bool, int]
+ """
+
+ :param name: name
+ :param max_refresh_age_secs:
+ :param remaining: True to return remaining seconds
+ :return:
+ """
+ my_db = db.DBConnection()
+ rows = my_db.select('SELECT last_refreshed FROM [scene_exceptions_refresh] WHERE list = ?', [name])
+ if rows:
+ last_refresh = int(rows[0]['last_refreshed'])
+ if remaining:
+ time_left = (last_refresh + max_refresh_age_secs - SGDatetime.timestamp_near())
+ return (0, time_left)[time_left > 0]
+ return SGDatetime.timestamp_near() > last_refresh + max_refresh_age_secs
return True
- return False
+ @staticmethod
+ def _set_last_refresh(name):
+ # type: (AnyStr) -> None
+ """
-def get_scene_exceptions(tvid, prodid, season=-1):
- """
- Given a indexer_id, return a list of all the scene exceptions.
- :param tvid: tvid
- :type tvid: int
- :param prodid: prodid
- :type prodid: int or long
- :param season: season number
- :type season: int
- :return:
- :rtype: List
- """
- global exceptionsCache
- exceptions_list = []
-
- if (tvid, prodid) not in exceptionsCache or season not in exceptionsCache[(tvid, prodid)]:
+ :param name: name
+ :type name: AnyStr
+ """
my_db = db.DBConnection()
- exceptions = my_db.select('SELECT show_name'
- ' FROM scene_exceptions'
+ my_db.upsert('scene_exceptions_refresh',
+ {'last_refreshed': SGDatetime.timestamp_near()},
+ {'list': name})
+
+ @staticmethod
+ def update_exceptions(show_obj, release_exceptions):
+ # type: (TVShow, list) -> None
+ """
+ Given a show object and a list of alternative names,
+ update MEMCACHE['release_map'], the db, and rebuild name_cache.
+ """
+ logger.log(f'Updating release exceptions for {show_obj.unique_name or show_obj.name}')
+
+ my_db = db.DBConnection()
+ my_db.action('DELETE FROM [scene_exceptions]'
+ ' WHERE indexer = ? AND indexer_id = ?',
+ [show_obj.tvid, show_obj.prodid])
+
+ # A change has been made to the scene exception list. Clear the cache, to make this visible
+ MEMCACHE['release_map'][(show_obj.tvid, show_obj.prodid)] = defaultdict(list)
+
+ for cur_ex in release_exceptions:
+
+ season, alt_name = cur_ex.split('|', 1)
+ try:
+ season = int(season)
+ except (BaseException, Exception):
+ logger.error(f'invalid season for release exception: {show_obj.tvid_prodid} - {season}:{alt_name}')
+ continue
+
+ MEMCACHE['release_map'][(show_obj.tvid, show_obj.prodid)][season].append(alt_name)
+
+ my_db.action('INSERT INTO [scene_exceptions]'
+ ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)',
+ [show_obj.tvid, show_obj.prodid, alt_name, season])
+
+ sickgear.name_cache.build_name_cache(update_only_scene=True)
+
+ def has_season_exceptions(self, tvid, prodid, season):
+ # type: (int, int, int) -> bool
+
+ self.get_alt_names(tvid, prodid, season)
+ return (-1 < season) and season in MEMCACHE['release_map'].get((tvid, prodid), {})
+
+ def get_alt_names(self, tvid, prodid, season=-1):
+ # type: (int, int, Optional[int]) -> List
+ """
+ Return a list and update MEMCACHE['release_map'] of alternative show names from db
+ for all seasons, or a specific show season.
+
+ :param tvid: show tvid
+ :param prodid: show prodid
+ :param season: optional season number
+ """
+ alt_names = MEMCACHE['release_map'].get((tvid, prodid), {}).get(season, [])
+
+ if not alt_names:
+ my_db = db.DBConnection()
+ exceptions = my_db.select('SELECT show_name'
+ ' FROM [scene_exceptions]'
+ ' WHERE indexer = ? AND indexer_id = ?'
+ ' AND season = ?',
+ [tvid, prodid, season])
+ if exceptions:
+ alt_names = list(set([_ex['show_name'] for _ex in exceptions]))
+
+ if (tvid, prodid) not in MEMCACHE['release_map']:
+ MEMCACHE['release_map'][(tvid, prodid)] = {}
+ MEMCACHE['release_map'][(tvid, prodid)][season] = alt_names
+
+ if 1 == season: # if we were looking for season 1 we can add generic names
+ alt_names += self.get_alt_names(tvid, prodid)
+
+ return alt_names
+
+ @staticmethod
+ def get_show_exceptions(tvid_prodid):
+ # type: (AnyStr) -> OrderedDefaultdict
+ """
+ return a scene exceptions dict for a show
+
+ :param tvid_prodid: a show tvid:prodid
+ """
+ exceptions_dict = OrderedDefaultdict(list)
+
+ from .tv import TVidProdid
+
+ my_db = db.DBConnection()
+ exceptions = my_db.select('SELECT show_name, season'
+ ' FROM [scene_exceptions]'
' WHERE indexer = ? AND indexer_id = ?'
- ' AND season = ?',
- [tvid, prodid, season])
+ ' ORDER BY season DESC, show_name DESC',
+ TVidProdid(tvid_prodid).list)
+
+ exceptions_seasons = []
if exceptions:
- exceptions_list = list(set([cur_exception['show_name'] for cur_exception in exceptions]))
+ for cur_ex in exceptions:
+ # order as, s*, and then season desc, show_name also desc (so years in names fall the newest on top)
+ if -1 == cur_ex['season']:
+ exceptions_dict[-1].append(cur_ex['show_name'])
+ else:
+ exceptions_seasons += [cur_ex]
- if (tvid, prodid) not in exceptionsCache:
- exceptionsCache[(tvid, prodid)] = {}
- exceptionsCache[(tvid, prodid)][season] = exceptions_list
- else:
- exceptions_list = exceptionsCache[(tvid, prodid)][season]
+ for cur_ex in exceptions_seasons:
+ exceptions_dict[cur_ex['season']].append(cur_ex['show_name'])
- if 1 == season: # if we where looking for season 1 we can add generic names
- exceptions_list += get_scene_exceptions(tvid, prodid, season=-1)
+ return exceptions_dict
- return exceptions_list
+ @staticmethod
+ def get_exception_seasons(tvid, prodid):
+ # type: (int, int) -> List[int]
+ """
+ return a list of season numbers that have alternative names
+ :param tvid: show tvid
+ :param prodid: show prodid
+ """
+ exception_seasons = MEMCACHE['release_map_season'].get((tvid, prodid), [])
+ if not exception_seasons:
+ my_db = db.DBConnection()
+ sql_result = my_db.select('SELECT DISTINCT(season) AS season'
+ ' FROM [scene_exceptions]'
+ ' WHERE indexer = ? AND indexer_id = ?',
+ [tvid, prodid])
+ if sql_result:
+ exception_seasons = list(set([int(_x['season']) for _x in sql_result]))
-def get_all_scene_exceptions(tvid_prodid):
- """
+ if (tvid, prodid) not in MEMCACHE['release_map_season']:
+ MEMCACHE['release_map_season'][(tvid, prodid)] = {}
- :param tvid_prodid:
- :type tvid_prodid: AnyStr
- :return:
- :rtype: OrderedDefaultdict
- """
- exceptions_dict = OrderedDefaultdict(list)
+ MEMCACHE['release_map_season'][(tvid, prodid)] = exception_seasons
- from sickgear.tv import TVidProdid
-
- my_db = db.DBConnection()
- exceptions = my_db.select('SELECT show_name,season'
- ' FROM scene_exceptions'
- ' WHERE indexer = ? AND indexer_id = ?'
- ' ORDER BY season DESC, show_name DESC',
- TVidProdid(tvid_prodid).list)
-
- exceptions_seasons = []
- if exceptions:
- for cur_exception in exceptions:
- # order as, s*, and then season desc, show_name also desc (so years in names may fall newest on top)
- if -1 == cur_exception['season']:
- exceptions_dict[cur_exception['season']].append(cur_exception['show_name'])
- else:
- exceptions_seasons += [cur_exception]
-
- for cur_exception in exceptions_seasons:
- exceptions_dict[cur_exception['season']].append(cur_exception['show_name'])
-
- return exceptions_dict
-
-
-def get_scene_seasons(tvid, prodid):
- """
- return a list of season numbers that have scene exceptions
- :param tvid: tvid
- :type tvid: int
- :param prodid: prodid
- :type prodid: int or long
- :return:
- :rtype: List
- """
- global exceptionsSeasonCache
- exception_season_list = []
-
- if (tvid, prodid) not in exceptionsSeasonCache:
- my_db = db.DBConnection()
- sql_result = my_db.select('SELECT DISTINCT(season) AS season'
- ' FROM scene_exceptions'
- ' WHERE indexer = ? AND indexer_id = ?',
- [tvid, prodid])
- if sql_result:
- exception_season_list = list(set([int(x['season']) for x in sql_result]))
-
- if (tvid, prodid) not in exceptionsSeasonCache:
- exceptionsSeasonCache[(tvid, prodid)] = {}
-
- exceptionsSeasonCache[(tvid, prodid)] = exception_season_list
- else:
- exception_season_list = exceptionsSeasonCache[(tvid, prodid)]
-
- return exception_season_list
+ return exception_seasons
def get_scene_exception_by_name(show_name):
+ # type: (AnyStr) -> List[None, None, None] or List[int, int, int]
"""
:param show_name: show name
- :type show_name: AnyStr
- :return:
- :rtype: Tuple[None, None, None] or Tuple[int, int or long, int]
"""
- return get_scene_exception_by_name_multiple(show_name)[0]
+ return _get_scene_exception_by_name_multiple(show_name)[0]
-def get_scene_exception_by_name_multiple(show_name):
+def _get_scene_exception_by_name_multiple(show_name):
+ # type: (AnyStr) -> List[List[None, None, None] or List[int, int, int]]
"""
:param show_name: show name
- :type show_name: AnyStr
:return: (tvid, prodid, season) of the exception, None if no exception is present.
- :rtype: Tuple[None, None, None] or Tuple[int, int or long, int]
"""
try:
exception_result = name_cache.sceneNameCache[helpers.full_sanitize_scene_name(show_name)]
- return [exception_result]
except (BaseException, Exception):
return [[None, None, None]]
-
-def retrieve_exceptions():
- """
- Looks up the exceptions on github, parses them into a dict, and inserts them into the
- scene_exceptions table in cache.db. Also clears the scene name cache.
- """
- global exception_dict, anidb_exception_dict, xem_exception_dict
-
- # exceptions are stored on GitHub pages
- for tvid in sickgear.TVInfoAPI().sources:
- if should_refresh(sickgear.TVInfoAPI(tvid).name):
- logger.log(f'Checking for scene exception updates for {sickgear.TVInfoAPI(tvid).name}')
-
- url = sickgear.TVInfoAPI(tvid).config.get('scene_url')
- if not url:
- continue
-
- url_data = helpers.get_url(url)
- if None is url_data:
- # When None is urlData, trouble connecting to github
- logger.error(f'Check scene exceptions update failed. Unable to get URL: {url}')
- continue
-
- else:
- set_last_refresh(sickgear.TVInfoAPI(tvid).name)
-
- # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
- for cur_line in url_data.splitlines():
- cur_line = cur_line
- prodid, sep, aliases = cur_line.partition(':')
-
- if not aliases:
- continue
-
- prodid = int(prodid)
-
- # regex out the list of shows, taking \' into account
- # alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(? scene_episodes[1]
- if desc: # handle a descending range case
- scene_episodes.reverse()
- scene_episodes = list_range(*[scene_episodes[0], scene_episodes[1] + 1])
- if desc:
- scene_episodes.reverse()
-
- target_season = helpers.try_int(target_season, None)
- for target_episode in scene_episodes:
- sn = find_scene_numbering(tvid, prodid, for_season, for_episode)
- used.add((for_season, for_episode, target_season, target_episode))
- if sn and ((for_season, for_episode) + sn) not in used \
- and (for_season, for_episode) not in used:
- logger.debug(f'Skipped setting "{show_obj.unique_name}" episode {for_season}x{for_episode}'
- f' to target a release {target_season}x{target_episode}'
- f' because set to {sn[0]}x{sn[1]}')
- else:
- used.add((for_season, for_episode))
- if not sn or sn != (target_season, target_episode): # not already set
- result = set_scene_numbering_helper(
- tvid, prodid, for_season=for_season, for_episode=for_episode,
- scene_season=target_season, scene_episode=target_episode)
- if result.get('success'):
- cnt_updated_numbers += 1
-
- for_episode = for_episode + 1
-
- return custom_exception_dict, cnt_updated_numbers, should_refresh(src_id, iv, remaining=True)
-
-
-def _anidb_exceptions_fetcher():
- global anidb_exception_dict
-
- if should_refresh('anidb'):
- logger.log('Checking for AniDB scene exception updates')
- for cur_show_obj in filter(lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickgear.showList):
- try:
- anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True)
- except (BaseException, Exception):
- continue
- if anime.name and anime.name != cur_show_obj.name:
- anidb_exception_dict[(cur_show_obj.tvid, cur_show_obj.prodid)] = [{anime.name: -1}]
-
- set_last_refresh('anidb')
- return anidb_exception_dict
-
-
-def _xem_exceptions_fetcher():
- global xem_exception_dict
-
- xem_list = 'xem_us'
- for cur_show_obj in sickgear.showList:
- if cur_show_obj.is_anime and not cur_show_obj.paused:
- xem_list = 'xem'
- break
-
- if should_refresh(xem_list):
- for tvid in [i for i in sickgear.TVInfoAPI().sources if 'xem_origin' in sickgear.TVInfoAPI(i).config]:
- logger.log(f'Checking for XEM scene exception updates for {sickgear.TVInfoAPI(tvid).name}')
-
- url = 'https://thexem.info/map/allNames?origin=%s%s&seasonNumbers=1'\
- % (sickgear.TVInfoAPI(tvid).config['xem_origin'], ('&language=us', '')['xem' == xem_list])
-
- parsed_json = helpers.get_url(url, parse_json=True, timeout=90)
- if not parsed_json:
- logger.error(f'Check scene exceptions update failed for {sickgear.TVInfoAPI(tvid).name},'
- f' Unable to get URL: {url}')
- continue
-
- if 'failure' == parsed_json['result']:
- continue
-
- for prodid, names in iteritems(parsed_json['data']):
- try:
- xem_exception_dict[(tvid, int(prodid))] = names
- except (BaseException, Exception):
- continue
-
- set_last_refresh(xem_list)
-
- return xem_exception_dict
-
-
-def _xem_get_ids(infosrc_name, xem_origin):
- """
-
- :param infosrc_name:
- :type infosrc_name: AnyStr
- :param xem_origin:
- :type xem_origin: AnyStr
- :return:
- :rtype: List
- """
- xem_ids = []
-
- url = 'https://thexem.info/map/havemap?origin=%s' % xem_origin
-
- task = 'Fetching show ids with%s xem scene mapping%s for origin'
- logger.log(f'{task % ("", "s")} {infosrc_name}')
- parsed_json = helpers.get_url(url, parse_json=True, timeout=90)
- if not isinstance(parsed_json, dict) or not parsed_json:
- logger.error(f'Failed {task.lower() % ("", "s")} {infosrc_name},'
- f' Unable to get URL: {url}')
- else:
- if 'success' == parsed_json.get('result', '') and 'data' in parsed_json:
- xem_ids = list(set(filter(lambda prodid: 0 < prodid,
- map(lambda pid: helpers.try_int(pid), parsed_json['data']))))
- if 0 == len(xem_ids):
- logger.warning(f'Failed {task.lower() % ("", "s")} {infosrc_name},'
- f' no data items parsed from URL: {url}')
-
- logger.log(f'Finished {task.lower() % (f" {len(xem_ids)}", helpers.maybe_plural(xem_ids))} {infosrc_name}')
- return xem_ids
-
-
-def get_xem_ids():
- global xem_ids_list
-
- for tvid, name in iteritems(sickgear.TVInfoAPI().xem_supported_sources):
- xem_ids = _xem_get_ids(name, sickgear.TVInfoAPI(tvid).config['xem_origin'])
- if len(xem_ids):
- xem_ids_list[tvid] = xem_ids
+ return [exception_result]
def has_abs_episodes(ep_obj=None, name=None):
+ # type: (Optional[sickgear.tv.TVEpisode], Optional[AnyStr]) -> bool
"""
:param ep_obj: episode object
- :type ep_obj: sickgear.tv.TVEpisode or None
:param name: name
- :type name: AnyStr
- :return:
- :rtype: bool
"""
- return any((name or ep_obj.show_obj.name or '').lower().startswith(x.lower()) for x in [
+ return any((name or ep_obj.show_obj.name or '').lower().startswith(_x.lower()) for _x in [
'The Eighties', 'The Making of the Mob', 'The Night Of', 'Roots 2016', 'Trepalium'
])
diff --git a/sickgear/scene_numbering.py b/sickgear/scene_numbering.py
index 2afc2914..b2e01223 100644
--- a/sickgear/scene_numbering.py
+++ b/sickgear/scene_numbering.py
@@ -19,17 +19,14 @@
# @copyright: Dermot Buckley
#
-import datetime
import traceback
from sqlite3 import Row
from exceptions_helper import ex
-
import sickgear
from . import db, logger
from .helpers import try_int
-from .scene_exceptions import xem_ids_list
from .sgdatetime import SGDatetime
# noinspection PyUnreachableCode
@@ -774,7 +771,8 @@ def xem_refresh(tvid, prodid, force=False):
tvid, prodid = int(tvid), int(prodid)
tvinfo = sickgear.TVInfoAPI(tvid)
- if 'xem_origin' not in tvinfo.config or prodid not in xem_ids_list.get(tvid, []):
+ if 'xem_origin' not in tvinfo.config \
+ or prodid not in sickgear.scene_exceptions.MEMCACHE['release_map_xem'].get(tvid, []):
return
xem_origin = tvinfo.config['xem_origin']
diff --git a/sickgear/scheduler.py b/sickgear/scheduler.py
index 88bc2976..130e5771 100644
--- a/sickgear/scheduler.py
+++ b/sickgear/scheduler.py
@@ -24,10 +24,12 @@ import traceback
from . import logger
from exceptions_helper import ex
+import sickgear
+
class Scheduler(threading.Thread):
def __init__(self, action, cycle_time=datetime.timedelta(minutes=10), run_delay=datetime.timedelta(minutes=0),
- start_time=None, thread_name="ScheduledThread", silent=True, prevent_cycle_run=None, paused=False):
+ start_time=None, thread_name='ScheduledThread', silent=True, prevent_cycle_run=None, paused=False):
super(Scheduler, self).__init__()
self.last_run = datetime.datetime.now() + run_delay - cycle_time
@@ -41,10 +43,18 @@ class Scheduler(threading.Thread):
self._stopper = threading.Event()
self._unpause = threading.Event()
if not paused:
- self._unpause.set()
+ self.unpause()
self.lock = threading.Lock()
self.force = False
+ @property
+ def is_running_job(self):
+ # type: (...) -> bool
+ """
+ Return running state of the scheduled action
+ """
+ return self.action.amActive
+
def pause(self):
self._unpause.clear()
@@ -69,7 +79,7 @@ class Scheduler(threading.Thread):
return self.cycle_time - (datetime.datetime.now() - self.last_run)
def force_run(self):
- if not self.action.amActive:
+ if not self.is_running_job:
self.force = True
return True
return False
@@ -139,3 +149,72 @@ class Scheduler(threading.Thread):
# exiting thread
self._stopper.clear()
self._unpause.clear()
+
+ @staticmethod
+ def blocking_jobs():
+ # type (...) -> bool
+ """
+ Return description of running jobs, or False if none are running
+
+ These jobs should prevent a restart/shutdown while running.
+ """
+
+ job_report = []
+ if sickgear.process_media_scheduler.is_running_job:
+ job_report.append('Media processing')
+
+ if sickgear.update_show_scheduler.is_running_job:
+ job_report.append(f'{("U", "u")[len(job_report)]}pdating shows data')
+
+ # this idea is not ready for production. issues identified...
+ # 1. many are just the queue filling process, so this doesn't actually prevents restart/shutdown during those
+ # 2. in case something goes wrong or there is a bad bug in ithe code, the restart would be prevented
+ # (meaning no auto- / manual update via ui, user is forced to kill the process, manually update and restart)
+ # 3. just by bad timing the autoupdate process maybe at the same time as another blocking thread = updates but
+ # never restarts
+ # therefore, with these issues, the next two lines cannot allow this feature to be brought into service :(
+
+ # if len(job_report):
+ # return '%s %s running' % (' and '.join(job_report), ('are', 'is')[1 == len(job_report)])
+
+ return False
+
+
+class Job(object):
+ """
+ The job class centralises tasks with states
+ """
+ def __init__(self, func, silent=False, thread_lock=False, reentrant_lock=False, args=(), kwargs=None):
+
+ self.amActive = False
+
+ self._func = func
+ self._silent = silent
+ self._args = args
+ self._kwargs = (kwargs, {})[None is kwargs]
+
+ if thread_lock:
+ self.lock = threading.Lock()
+ elif reentrant_lock:
+ self.lock = threading.RLock()
+
+ def run(self):
+
+ if self.amActive and self.__class__.__name__ in ('BacklogSearcher', 'MediaProcess'):
+ logger.log(u'%s is still running, not starting it again' % self.__class__.__name__)
+ return
+
+ if self._func:
+ result, re_raise = None, False
+ try:
+ self.amActive = True
+ result = self._func(*self._args, **self._kwargs)
+ except(BaseException, Exception) as e:
+ re_raise = e
+ finally:
+ self.amActive = False
+ not self._silent and logger.log(u'%s(%s) completed' % (self.__class__.__name__, self._func.__name__))
+ if re_raise:
+ raise re_raise
+
+ return result
diff --git a/sickgear/search_backlog.py b/sickgear/search_backlog.py
index ecd9b369..1181581b 100644
--- a/sickgear/search_backlog.py
+++ b/sickgear/search_backlog.py
@@ -17,13 +17,13 @@
from __future__ import with_statement, division
import datetime
-import threading
from math import ceil
import sickgear
from . import db, logger, scheduler, search_queue, ui
from .helpers import find_show_by_id
from .providers.generic import GenericProvider
+from .scheduler import Job
from .search import wanted_episodes
from .sgdatetime import SGDatetime
from .tv import TVidProdid, TVEpisode, TVShow
@@ -74,13 +74,12 @@ class BacklogSearchScheduler(scheduler.Scheduler):
return self.action.nextBacklog - now if self.action.nextBacklog > now else datetime.timedelta(seconds=0)
-class BacklogSearcher(object):
+class BacklogSearcher(Job):
def __init__(self):
+ super(BacklogSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
self.last_backlog = self._get_last_backlog()
self.cycle_time = sickgear.BACKLOG_PERIOD
- self.lock = threading.Lock()
- self.amActive = False # type: bool
self.amPaused = False # type: bool
self.amWaiting = False # type: bool
self.forcetype = NORMAL_BACKLOG # type: int
@@ -196,9 +195,6 @@ class BacklogSearcher(object):
:return: nothing
:rtype: None
"""
- if self.amActive and not which_shows:
- logger.debug('Backlog is still running, not starting it again')
- return
if which_shows:
show_list = which_shows
@@ -225,7 +221,6 @@ class BacklogSearcher(object):
return
self._get_last_backlog()
- self.amActive = True
self.amPaused = False
cur_date = datetime.date.today().toordinal()
@@ -328,7 +323,6 @@ class BacklogSearcher(object):
if standard_backlog and not any_torrent_enabled:
self._set_last_runtime(now)
- self.amActive = False
self._reset_progress_indicator()
@staticmethod
@@ -401,7 +395,7 @@ class BacklogSearcher(object):
# noinspection SqlConstantCondition
my_db.action('UPDATE info SET last_backlog=%s WHERE 1=1' % when)
- def run(self):
+ def job_run(self):
try:
force_type = self.forcetype
force = self.force
@@ -409,5 +403,4 @@ class BacklogSearcher(object):
self.force = False
self.search_backlog(force_type=force_type, force=force)
except (BaseException, Exception):
- self.amActive = False
raise
diff --git a/sickgear/search_propers.py b/sickgear/search_propers.py
index 602c5125..87c7ced9 100644
--- a/sickgear/search_propers.py
+++ b/sickgear/search_propers.py
@@ -16,24 +16,21 @@
from __future__ import with_statement
-import threading
import sickgear
+from .scheduler import Job
-class ProperSearcher(object):
+class ProperSearcher(Job):
def __init__(self):
- self.lock = threading.Lock()
- self.amActive = False
+ super(ProperSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
@staticmethod
def is_enabled():
# type: (...) -> bool
return sickgear.DOWNLOAD_PROPERS
- def run(self):
- self.amActive = True
+ @staticmethod
+ def job_run():
propersearch_queue_item = sickgear.search_queue.ProperSearchQueueItem()
sickgear.search_queue_scheduler.action.add_item(propersearch_queue_item)
-
- self.amActive = False
diff --git a/sickgear/search_recent.py b/sickgear/search_recent.py
index f559178f..5ad92515 100644
--- a/sickgear/search_recent.py
+++ b/sickgear/search_recent.py
@@ -16,19 +16,15 @@
from __future__ import with_statement
-import threading
import sickgear
+from .scheduler import Job
-class RecentSearcher(object):
+class RecentSearcher(Job):
def __init__(self):
- self.lock = threading.Lock()
- self.amActive = False
+ super(RecentSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
- def run(self):
- self.amActive = True
+ def job_run(self):
recentsearch_queue_item = sickgear.search_queue.RecentSearchQueueItem()
sickgear.search_queue_scheduler.action.add_item(recentsearch_queue_item)
-
- self.amActive = False
diff --git a/sickgear/show_name_helpers.py b/sickgear/show_name_helpers.py
index f688c1d5..926e9e5d 100644
--- a/sickgear/show_name_helpers.py
+++ b/sickgear/show_name_helpers.py
@@ -25,7 +25,7 @@ import sickgear
from . import common, db, logger
from .helpers import sanitize_scene_name
from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser
-from .scene_exceptions import get_scene_exceptions
+from .scene_exceptions import ReleaseMap
from sg_helpers import scantree
from _23 import quote_plus
@@ -384,10 +384,10 @@ def all_possible_show_names(show_obj, season=-1, force_anime=False):
:return: a list of all the possible show names
"""
- show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:]
- if not show_names: # if we don't have any season specific exceptions fallback to generic exceptions
+ show_names = ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid, season)[:]
+ if -1 != season and not show_names: # fallback to generic exceptions if no season specific exceptions
season = -1
- show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:]
+ show_names = ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid)[:]
if -1 == season:
show_names.append(show_obj.name)
diff --git a/sickgear/show_queue.py b/sickgear/show_queue.py
index e083bb40..8f90d4d2 100644
--- a/sickgear/show_queue.py
+++ b/sickgear/show_queue.py
@@ -70,7 +70,7 @@ class ShowQueue(generic_queue.GenericQueue):
def check_events(self):
if self.daily_update_running and \
- not (self.is_show_update_running() or sickgear.show_update_scheduler.action.amActive):
+ not (self.is_show_update_running() or sickgear.update_show_scheduler.is_running_job):
self.execute_events(DAILY_SHOW_UPDATE_FINISHED_EVENT)
self.daily_update_running = False
@@ -1139,7 +1139,7 @@ class QueueItemAdd(ShowQueueItem):
self.show_obj.tvid, self.show_obj.prodid)
# if "scene" numbering is disabled during add show, output availability to log
if None is not self.scene and not self.show_obj.scene and \
- self.show_obj.prodid in sickgear.scene_exceptions.xem_ids_list[self.show_obj.tvid]:
+ self.show_obj.prodid in sickgear.scene_exceptions.MEMCACHE['release_map_xem'][self.show_obj.tvid]:
logger.log('No scene number mappings found at TheXEM. Therefore, episode scene numbering disabled, '
'edit show and enable it to manually add custom numbers for search and media processing.')
try:
@@ -1179,7 +1179,7 @@ class QueueItemAdd(ShowQueueItem):
# if started with WANTED eps then run the backlog
if WANTED == self.default_status or items_wanted:
logger.log('Launching backlog for this show since episodes are WANTED')
- sickgear.backlog_search_scheduler.action.search_backlog([self.show_obj], prevent_same=True)
+ sickgear.search_backlog_scheduler.action.search_backlog([self.show_obj], prevent_same=True)
ui.notifications.message('Show added/search', 'Adding and searching for episodes of' + msg)
else:
ui.notifications.message('Show added', 'Adding' + msg)
@@ -1253,7 +1253,7 @@ class QueueItemRefresh(ShowQueueItem):
self.show_obj.populate_cache(self.force_image_cache)
# Load XEM data to DB for show
- if self.show_obj.prodid in sickgear.scene_exceptions.xem_ids_list[self.show_obj.tvid]:
+ if self.show_obj.prodid in sickgear.scene_exceptions.MEMCACHE['release_map_xem'][self.show_obj.tvid]:
sickgear.scene_numbering.xem_refresh(self.show_obj.tvid, self.show_obj.prodid)
if 'pausestatus_after' in self.kwargs and None is not self.kwargs['pausestatus_after']:
diff --git a/sickgear/show_updater.py b/sickgear/show_updater.py
index 144398a7..abf20fe6 100644
--- a/sickgear/show_updater.py
+++ b/sickgear/show_updater.py
@@ -23,6 +23,7 @@ from exceptions_helper import ex
import sickgear
from . import db, logger, network_timezones, properFinder, ui
+from .scheduler import Job
# noinspection PyUnreachableCode
if False:
@@ -54,13 +55,12 @@ def clean_ignore_require_words():
pass
-class ShowUpdater(object):
+class ShowUpdater(Job):
def __init__(self):
- self.amActive = False
+ super(ShowUpdater, self).__init__(self.job_run, kwargs={})
- def run(self):
-
- self.amActive = True
+ @staticmethod
+ def job_run():
try:
update_datetime = datetime.datetime.now()
@@ -89,14 +89,14 @@ class ShowUpdater(object):
# update xem id lists
try:
- sickgear.scene_exceptions.get_xem_ids()
+ sickgear.scene_exceptions.ReleaseMap().fetch_xem_ids()
except (BaseException, Exception):
logger.error('xem id list update error')
logger.error(traceback.format_exc())
# update scene exceptions
try:
- sickgear.scene_exceptions.retrieve_exceptions()
+ sickgear.scene_exceptions.ReleaseMap().fetch_exceptions()
except (BaseException, Exception):
logger.error('scene exceptions update error')
logger.error(traceback.format_exc())
@@ -147,7 +147,7 @@ class ShowUpdater(object):
import threading
try:
sickgear.background_mapping_task = threading.Thread(
- name='MAPPINGSUPDATER', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True})
+ name='MAPPINGUPDATES', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True})
sickgear.background_mapping_task.start()
except (BaseException, Exception):
logger.error('missing mapped ids update error')
@@ -224,8 +224,8 @@ class ShowUpdater(object):
logger.log('Added all shows to show queue for full update')
- finally:
- self.amActive = False
+ except(BaseException, Exception):
+ pass
def __del__(self):
pass
diff --git a/sickgear/subtitles.py b/sickgear/subtitles.py
index c8cda3a0..28570bdc 100644
--- a/sickgear/subtitles.py
+++ b/sickgear/subtitles.py
@@ -19,6 +19,7 @@ import datetime
from . import db, helpers, logger
from .common import *
+from .scheduler import Job
import sickgear
@@ -103,24 +104,22 @@ def subtitle_language_filter():
return [language for language in subliminal.language.LANGUAGES if language[2] != ""]
-class SubtitlesFinder(object):
+class SubtitlesFinder(Job):
"""
The SubtitlesFinder will be executed every hour but will not necessarily search
and download subtitles. Only if the defined rule is true
"""
def __init__(self):
- self.amActive = False
+ super(SubtitlesFinder, self).__init__(self.job_run, kwargs={}, thread_lock=True)
@staticmethod
def is_enabled():
return sickgear.USE_SUBTITLES
- def run(self):
+ def job_run(self):
if self.is_enabled():
- self.amActive = True
self._main()
- self.amActive = False
def _main(self):
if 1 > len(sickgear.subtitles.get_enabled_service_list()):
diff --git a/sickgear/version_checker.py b/sickgear/version_checker.py
index aee6ccd1..3f0ea6df 100644
--- a/sickgear/version_checker.py
+++ b/sickgear/version_checker.py
@@ -29,6 +29,7 @@ from exceptions_helper import ex
import sickgear
from . import logger, notifiers, ui
+from .scheduler import (Scheduler, Job)
from .piper import check_pip_outdated
from sg_helpers import cmdline_runner, get_url
@@ -41,12 +42,14 @@ if False:
from typing import Tuple
-class PackagesUpdater(object):
+class PackagesUpdater(Job):
def __init__(self):
+ super(PackagesUpdater, self).__init__(self.job_run, kwargs={})
+
self.install_type = 'Python package updates'
- def run(self, force=False):
+ def job_run(self, force=False):
if not sickgear.EXT_UPDATES \
and self.check_for_new_version(force) \
and sickgear.UPDATE_PACKAGES_AUTO:
@@ -64,6 +67,11 @@ class PackagesUpdater(object):
:returns: True when package install/updates are available
:rtype: bool
"""
+ response = Scheduler.blocking_jobs()
+ if response:
+ logger.log(f'Update skipped because {response}', logger.DEBUG)
+ return False
+
if force and not sickgear.UPDATE_PACKAGES_MENU:
logger.log('Checking not enabled from menu action for %s' % self.install_type)
return False
@@ -100,12 +108,14 @@ class PackagesUpdater(object):
return True
-class SoftwareUpdater(object):
+class SoftwareUpdater(Job):
"""
Version check class meant to run as a thread object with the sg scheduler.
"""
def __init__(self):
+ super(SoftwareUpdater, self).__init__(self.job_run, kwargs={})
+
self._min_python = (100, 0) # set default to absurdly high to prevent update
self.install_type = self.find_install_type()
@@ -150,7 +160,7 @@ class SoftwareUpdater(object):
except (BaseException, Exception):
pass
- def run(self, force=False):
+ def job_run(self, force=False):
# set current branch version
sickgear.BRANCH = self.get_branch()
@@ -219,6 +229,11 @@ class SoftwareUpdater(object):
# update branch with current config branch value
self.updater.branch = sickgear.BRANCH
+ response = Scheduler.blocking_jobs()
+ if response:
+ logger.log(f'Update skipped because {response}', logger.DEBUG)
+ return False
+
if not self.is_updatable:
self._log_cannot_update()
return False
diff --git a/sickgear/watchedstate.py b/sickgear/watchedstate.py
index 14454cbc..5c9fcc88 100644
--- a/sickgear/watchedstate.py
+++ b/sickgear/watchedstate.py
@@ -14,17 +14,15 @@
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see .
-import threading
-
import sickgear
+from .scheduler import Job
from sickgear import watchedstate_queue
-class WatchedStateUpdater(object):
+class WatchedStateUpdater(Job):
def __init__(self, name, queue_item):
+ super(WatchedStateUpdater, self).__init__(self.job_run, silent=True, kwargs={}, thread_lock=True)
- self.amActive = False
- self.lock = threading.Lock()
self.name = name
self.queue_item = queue_item
@@ -32,13 +30,15 @@ class WatchedStateUpdater(object):
def prevent_run(self):
return sickgear.watched_state_queue_scheduler.action.is_in_queue(self.queue_item)
- def run(self):
+ @staticmethod
+ def is_enabled():
+ return True
+
+ def job_run(self):
# noinspection PyUnresolvedReferences
if self.is_enabled():
- self.amActive = True
new_item = self.queue_item()
sickgear.watched_state_queue_scheduler.action.add_item(new_item)
- self.amActive = False
class EmbyWatchedStateUpdater(WatchedStateUpdater):
diff --git a/sickgear/webapi.py b/sickgear/webapi.py
index 908a32eb..cafec481 100644
--- a/sickgear/webapi.py
+++ b/sickgear/webapi.py
@@ -48,6 +48,7 @@ from .indexers import indexer_api, indexer_config
from .indexers.indexer_config import *
from lib.tvinfo_base.exceptions import *
from .scene_numbering import set_scene_numbering_helper
+from .scheduler import Scheduler
from .search_backlog import FORCED_BACKLOG
from .show_updater import clean_ignore_require_words
from .sgdatetime import SGDatetime
@@ -1915,9 +1916,9 @@ class CMD_SickGearPostProcess(ApiCall):
if not self.type:
self.type = 'manual'
- data = processTV.processDir(self.path, process_method=self.process_method, force=self.force_replace,
- force_replace=self.is_priority, failed=self.failed, pp_type=self.type,
- client=self.client)
+ data = processTV.process_dir(self.path, process_method=self.process_method, force=self.force_replace,
+ force_replace=self.is_priority, failed=self.failed, pp_type=self.type,
+ client=self.client)
if not self.return_data:
data = ""
@@ -2074,7 +2075,7 @@ class CMD_SickGearCheckScheduler(ApiCall):
backlogPaused = sickgear.search_queue_scheduler.action.is_backlog_paused()
backlogRunning = sickgear.search_queue_scheduler.action.is_backlog_in_progress()
- nextBacklog = sickgear.backlog_search_scheduler.next_run().strftime(dateFormat)
+ nextBacklog = sickgear.search_backlog_scheduler.next_run().strftime(dateFormat)
data = {"backlog_is_paused": int(backlogPaused), "backlog_is_running": int(backlogRunning),
"last_backlog": (0 < len(sql_result) and _ordinal_to_dateForm(sql_result[0]["last_backlog"])) or '',
@@ -2177,15 +2178,15 @@ class CMD_SickGearForceSearch(ApiCall):
""" force the specified search type to run """
result = None
if 'recent' == self.searchtype and not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress() \
- and not sickgear.recent_search_scheduler.action.amActive:
- result = sickgear.recent_search_scheduler.force_run()
+ and not sickgear.search_recent_scheduler.is_running_job:
+ result = sickgear.search_recent_scheduler.force_run()
elif 'backlog' == self.searchtype and not sickgear.search_queue_scheduler.action.is_backlog_in_progress() \
- and not sickgear.backlog_search_scheduler.action.amActive:
- sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG)
+ and not sickgear.search_backlog_scheduler.is_running_job:
+ sickgear.search_backlog_scheduler.force_search(force_type=FORCED_BACKLOG)
result = True
elif 'proper' == self.searchtype and not sickgear.search_queue_scheduler.action.is_propersearch_in_progress() \
- and not sickgear.proper_finder_scheduler.action.amActive:
- result = sickgear.proper_finder_scheduler.force_run()
+ and not sickgear.search_propers_scheduler.is_running_job:
+ result = sickgear.search_propers_scheduler.force_run()
if result:
return _responds(RESULT_SUCCESS, msg='%s search successfully forced' % self.searchtype)
return _responds(RESULT_FAILURE,
@@ -2499,6 +2500,13 @@ class CMD_SickGearRestart(ApiCall):
def run(self):
""" restart sickgear """
+
+ response = Scheduler.blocking_jobs()
+ if response:
+ msg = f'Restart aborted from API because {response.lower()}'
+ logger.log(msg, logger.DEBUG)
+ return _responds(RESULT_FAILURE, msg=msg)
+
sickgear.restart(soft=False)
return _responds(RESULT_SUCCESS, msg="SickGear is restarting...")
@@ -2817,6 +2825,13 @@ class CMD_SickGearShutdown(ApiCall):
def run(self):
""" shutdown sickgear """
+
+ response = Scheduler.blocking_jobs()
+ if response:
+ msg = f'Shutdown aborted from API because {response.lower()}'
+ logger.log(msg, logger.DEBUG)
+ return _responds(RESULT_FAILURE, msg=msg)
+
sickgear.events.put(sickgear.events.SystemEvent.SHUTDOWN)
return _responds(RESULT_SUCCESS, msg="SickGear is shutting down...")
@@ -4668,10 +4683,10 @@ class CMD_SickGearShowsForceUpdate(ApiCall):
def run(self):
""" force the daily show update now """
if sickgear.show_queue_scheduler.action.is_show_update_running() \
- or sickgear.show_update_scheduler.action.amActive:
+ or sickgear.update_show_scheduler.is_running_job:
return _responds(RESULT_FAILURE, msg="show update already running.")
- result = sickgear.show_update_scheduler.force_run()
+ result = sickgear.update_show_scheduler.force_run()
if result:
return _responds(RESULT_SUCCESS, msg="daily show update started")
return _responds(RESULT_FAILURE, msg="can't start show update currently")
diff --git a/sickgear/webserve.py b/sickgear/webserve.py
index 9ccafc64..409870fb 100644
--- a/sickgear/webserve.py
+++ b/sickgear/webserve.py
@@ -65,6 +65,7 @@ from .name_parser.parser import InvalidNameException, InvalidShowException, Name
from .providers import newznab, rsstorrent
from .scene_numbering import get_scene_absolute_numbering_for_show, get_scene_numbering_for_show, \
get_xem_absolute_numbering_for_show, get_xem_numbering_for_show, set_scene_numbering_helper
+from .scheduler import Scheduler
from .search_backlog import FORCED_BACKLOG
from .sgdatetime import SGDatetime
from .show_name_helpers import abbr_showname
@@ -1327,8 +1328,8 @@ class MainHandler(WebHandler):
now = datetime.datetime.now()
events = [
- ('recent', sickgear.recent_search_scheduler.time_left),
- ('backlog', sickgear.backlog_search_scheduler.next_backlog_timeleft),
+ ('recent', sickgear.search_recent_scheduler.time_left),
+ ('backlog', sickgear.search_backlog_scheduler.next_backlog_timeleft),
]
if sickgear.DOWNLOAD_PROPERS:
@@ -2070,6 +2071,9 @@ class Home(MainHandler):
if str(pid) != str(sickgear.PID):
return self.redirect('/home/')
+ if self.maybe_ignore('Shutdown'):
+ return
+
t = PageTemplate(web_handler=self, file='restart.tmpl')
t.shutdown = True
@@ -2082,6 +2086,9 @@ class Home(MainHandler):
if str(pid) != str(sickgear.PID):
return self.redirect('/home/')
+ if self.maybe_ignore('Restart'):
+ return
+
t = PageTemplate(web_handler=self, file='restart.tmpl')
t.shutdown = False
@@ -2089,6 +2096,17 @@ class Home(MainHandler):
return t.respond()
+ def maybe_ignore(self, task):
+ response = Scheduler.blocking_jobs()
+ if response:
+ task and logger.log('%s aborted because %s' % (task, response.lower()), logger.DEBUG)
+
+ self.redirect(self.request.headers['Referer'])
+ if task:
+ ui.notifications.message(u'Fail %s because %s, please try later' % (task.lower(), response.lower()))
+ return True
+ return False
+
def update(self, pid=None):
if str(pid) != str(sickgear.PID):
@@ -2326,15 +2344,15 @@ class Home(MainHandler):
t.season_min = ([], [1])[2 < t.latest_season] + [t.latest_season]
t.other_seasons = (list(set(all_seasons) - set(t.season_min)), [])[display_show_minimum]
t.seasons = []
- for x in all_seasons:
- t.seasons += [(x, [None] if x not in (t.season_min + t.other_seasons) else my_db.select(
+ for cur_season in all_seasons:
+ t.seasons += [(cur_season, [None] if cur_season not in (t.season_min + t.other_seasons) else my_db.select(
'SELECT *'
' FROM tv_episodes'
' WHERE indexer = ? AND showid = ?'
' AND season = ?'
' ORDER BY episode DESC',
- [show_obj.tvid, show_obj.prodid, x]
- ), scene_exceptions.has_season_exceptions(show_obj.tvid, show_obj.prodid, x))]
+ [show_obj.tvid, show_obj.prodid, cur_season]
+ ), scene_exceptions.ReleaseMap().has_season_exceptions(show_obj.tvid, show_obj.prodid, cur_season))]
for row in my_db.select('SELECT season, episode, status'
' FROM tv_episodes'
@@ -2423,7 +2441,7 @@ class Home(MainHandler):
t.clean_show_name = quote_plus(sickgear.indexermapper.clean_show_name(show_obj.name))
t.min_initial = Quality.get_quality_ui(min(Quality.split_quality(show_obj.quality)[0]))
- t.show_obj.exceptions = scene_exceptions.get_scene_exceptions(show_obj.tvid, show_obj.prodid)
+ t.show_obj.exceptions = scene_exceptions.ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid)
# noinspection PyUnresolvedReferences
t.all_scene_exceptions = show_obj.exceptions # normally Unresolved as not a class attribute, force set above
t.scene_numbering = get_scene_numbering_for_show(show_obj.tvid, show_obj.prodid)
@@ -2562,7 +2580,7 @@ class Home(MainHandler):
@staticmethod
def scene_exceptions(tvid_prodid, wanted_season=None):
- exceptions_list = sickgear.scene_exceptions.get_all_scene_exceptions(tvid_prodid)
+ exceptions_list = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
wanted_season = helpers.try_int(wanted_season, None)
wanted_not_found = None is not wanted_season and wanted_season not in exceptions_list
if not exceptions_list or wanted_not_found:
@@ -2754,7 +2772,7 @@ class Home(MainHandler):
return [err_string]
return self._generic_message('Error', err_string)
- show_obj.exceptions = scene_exceptions.get_all_scene_exceptions(tvid_prodid)
+ show_obj.exceptions = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
if None is not quality_preset and int(quality_preset):
best_qualities = []
@@ -2971,7 +2989,7 @@ class Home(MainHandler):
if do_update_exceptions:
try:
- scene_exceptions.update_scene_exceptions(show_obj.tvid, show_obj.prodid, exceptions_list)
+ scene_exceptions.ReleaseMap().update_exceptions(show_obj, exceptions_list)
helpers.cpu_sleep()
except exceptions_helper.CantUpdateException:
errors.append('Unable to force an update on scene exceptions of the show.')
@@ -3912,16 +3930,16 @@ class HomeProcessMedia(Home):
m = sickgear.NZBGET_MAP.split('=')
dir_name, not_used = helpers.path_mapper(m[0], m[1], dir_name)
- result = processTV.processDir(dir_name if dir_name else None,
- None if not nzb_name else decode_str(nzb_name),
- process_method=process_method, pp_type=process_type,
- cleanup=cleanup,
- force=force in ('on', '1'),
- force_replace=force_replace in ('on', '1'),
- failed='0' != failed,
- webhandler=None if '0' == stream else self.send_message,
- show_obj=show_obj, is_basedir=is_basedir in ('on', '1'),
- skip_failure_processing=skip_failure_processing, client=client)
+ result = processTV.process_dir(dir_name if dir_name else None,
+ None if not nzb_name else decode_str(nzb_name),
+ process_method=process_method, pp_type=process_type,
+ cleanup=cleanup,
+ force=force in ('on', '1'),
+ force_replace=force_replace in ('on', '1'),
+ failed='0' != failed,
+ webhandler=None if '0' == stream else self.send_message,
+ show_obj=show_obj, is_basedir=is_basedir in ('on', '1'),
+ skip_failure_processing=skip_failure_processing, client=client)
if '0' == stream:
regexp = re.compile(r'(?i)
', flags=re.UNICODE)
@@ -4448,7 +4466,7 @@ class AddShows(Home):
t.blocklist = []
t.groups = []
- t.show_scene_maps = list(itervalues(sickgear.scene_exceptions.xem_ids_list))
+ t.show_scene_maps = list(itervalues(scene_exceptions.MEMCACHE['release_map_xem']))
has_shows = len(sickgear.showList)
t.try_id = [] # [dict try_tip: try_term]
@@ -6616,7 +6634,7 @@ class Manage(MainHandler):
show_obj = helpers.find_show_by_id(tvid_prodid)
if show_obj:
- sickgear.backlog_search_scheduler.action.search_backlog([show_obj])
+ sickgear.search_backlog_scheduler.action.search_backlog([show_obj])
self.redirect('/manage/backlog-overview/')
@@ -7116,11 +7134,11 @@ class ManageSearch(Manage):
def index(self):
t = PageTemplate(web_handler=self, file='manage_manageSearches.tmpl')
- # t.backlog_pi = sickgear.backlog_search_scheduler.action.get_progress_indicator()
+ # t.backlog_pi = sickgear.search_backlog_scheduler.action.get_progress_indicator()
t.backlog_paused = sickgear.search_queue_scheduler.action.is_backlog_paused()
t.scheduled_backlog_active_providers = sickgear.search_backlog.BacklogSearcher.providers_active(scheduled=True)
t.backlog_running = sickgear.search_queue_scheduler.action.is_backlog_in_progress()
- t.backlog_is_active = sickgear.backlog_search_scheduler.action.am_running()
+ t.backlog_is_active = sickgear.search_backlog_scheduler.action.am_running()
t.standard_backlog_running = sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress()
t.backlog_running_type = sickgear.search_queue_scheduler.action.type_of_backlog_in_progress()
t.recent_search_status = sickgear.search_queue_scheduler.action.is_recentsearch_in_progress()
@@ -7161,7 +7179,7 @@ class ManageSearch(Manage):
def force_backlog(self):
# force it to run the next time it looks
if not sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress():
- sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG)
+ sickgear.search_backlog_scheduler.force_search(force_type=FORCED_BACKLOG)
logger.log('Backlog search forced')
ui.notifications.message('Backlog search started')
@@ -7172,7 +7190,7 @@ class ManageSearch(Manage):
# force it to run the next time it looks
if not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress():
- result = sickgear.recent_search_scheduler.force_run()
+ result = sickgear.search_recent_scheduler.force_run()
if result:
logger.log('Recent search forced')
ui.notifications.message('Recent search started')
@@ -7183,7 +7201,7 @@ class ManageSearch(Manage):
def force_find_propers(self):
# force it to run the next time it looks
- result = sickgear.proper_finder_scheduler.force_run()
+ result = sickgear.search_propers_scheduler.force_run()
if result:
logger.log('Find propers search forced')
ui.notifications.message('Find propers search started')
@@ -7207,10 +7225,10 @@ class ShowTasks(Manage):
t = PageTemplate(web_handler=self, file='manage_showProcesses.tmpl')
t.queue_length = sickgear.show_queue_scheduler.action.queue_length()
t.people_queue = sickgear.people_queue_scheduler.action.queue_data()
- t.next_run = sickgear.show_update_scheduler.last_run.replace(
- hour=sickgear.show_update_scheduler.start_time.hour)
+ t.next_run = sickgear.update_show_scheduler.last_run.replace(
+ hour=sickgear.update_show_scheduler.start_time.hour)
t.show_update_running = sickgear.show_queue_scheduler.action.is_show_update_running() \
- or sickgear.show_update_scheduler.action.amActive
+ or sickgear.update_show_scheduler.is_running_job
my_db = db.DBConnection(row_type='dict')
sql_result = my_db.select('SELECT n.indexer || ? || n.indexer_id AS tvid_prodid,'
@@ -7293,7 +7311,7 @@ class ShowTasks(Manage):
def force_show_update(self):
- result = sickgear.show_update_scheduler.force_run()
+ result = sickgear.update_show_scheduler.force_run()
if result:
logger.log('Show Update forced')
ui.notifications.message('Forced Show Update started')
@@ -7982,7 +8000,7 @@ class ConfigGeneral(Config):
def update_alt():
""" Load scene exceptions """
- changed_exceptions, cnt_updated_numbers, min_remain_iv = scene_exceptions.retrieve_exceptions()
+ changed_exceptions, cnt_updated_numbers, min_remain_iv = scene_exceptions.ReleaseMap().fetch_exceptions()
return json_dumps(dict(names=int(changed_exceptions), numbers=cnt_updated_numbers, min_remain_iv=min_remain_iv))
@@ -7991,7 +8009,7 @@ class ConfigGeneral(Config):
""" Return alternative release names and numbering as json text"""
# alternative release names and numbers
- alt_names = scene_exceptions.get_all_scene_exceptions(tvid_prodid)
+ alt_names = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
alt_numbers = get_scene_numbering_for_show(*TVidProdid(tvid_prodid).tuple) # arbitrary order
ui_output = 'No alternative names or numbers to export'
@@ -8180,8 +8198,8 @@ class ConfigGeneral(Config):
sickgear.UPDATE_SHOWS_ON_START = config.checkbox_to_value(update_shows_on_start)
sickgear.SHOW_UPDATE_HOUR = config.minimax(show_update_hour, 3, 0, 23)
try:
- with sickgear.show_update_scheduler.lock:
- sickgear.show_update_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR)
+ with sickgear.update_show_scheduler.lock:
+ sickgear.update_show_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR)
except (BaseException, Exception) as e:
logger.error('Could not change Show Update Scheduler time: %s' % ex(e))
sickgear.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show)
diff --git a/tests/scene_helpers_tests.py b/tests/scene_helpers_tests.py
index 29f80deb..ff0d9bd3 100644
--- a/tests/scene_helpers_tests.py
+++ b/tests/scene_helpers_tests.py
@@ -74,14 +74,15 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
sickgear.showList.append(s)
sickgear.showDict[s.sid_int] = s
sickgear.webserve.Home.make_showlist_unique_names()
- scene_exceptions.retrieve_exceptions()
+ scene_exceptions.ReleaseMap().fetch_exceptions()
name_cache.build_name_cache()
def test_sceneExceptionsEmpty(self):
- self.assertEqual(scene_exceptions.get_scene_exceptions(0, 0), [])
+ self.assertEqual(scene_exceptions.ReleaseMap().get_alt_names(0, 0), [])
def test_sceneExceptionsBlack_Lagoon(self):
- self.assertEqual(sorted(scene_exceptions.get_scene_exceptions(1, 79604)), ['Black-Lagoon'])
+ self.assertEqual(sorted(
+ scene_exceptions.ReleaseMap().get_alt_names(1, 79604)), ['Black-Lagoon'])
def test_sceneExceptionByName(self):
self.assertEqual(scene_exceptions.get_scene_exception_by_name(
@@ -98,14 +99,18 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
s.anime = 1
sickgear.showList.append(s)
sickgear.showDict[s.sid_int] = s
- scene_exceptions.retrieve_exceptions()
+ scene_exceptions.ReleaseMap().fetch_exceptions()
name_cache.build_name_cache()
- self.assertEqual(scene_exceptions.get_scene_exception_by_name('ブラック・ラグーン'), [1, 79604, -1])
- self.assertEqual(scene_exceptions.get_scene_exception_by_name('Burakku Ragūn'), [1, 79604, -1])
- self.assertEqual(scene_exceptions.get_scene_exception_by_name('Rokka no Yuusha'), [1, 295243, -1])
+ self.assertEqual(scene_exceptions.get_scene_exception_by_name(
+ 'ブラック・ラグーン'), [1, 79604, -1])
+ self.assertEqual(scene_exceptions.get_scene_exception_by_name(
+ 'Burakku Ragūn'), [1, 79604, -1])
+ self.assertEqual(scene_exceptions.get_scene_exception_by_name(
+ 'Rokka no Yuusha'), [1, 295243, -1])
def test_sceneExceptionByNameEmpty(self):
- self.assertEqual(scene_exceptions.get_scene_exception_by_name('nothing useful'), [None, None, None])
+ self.assertEqual(scene_exceptions.get_scene_exception_by_name(
+ 'nothing useful'), [None, None, None])
def test_sceneExceptionsResetNameCache(self):
# clear the exceptions
@@ -117,7 +122,7 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
name_cache.add_name_to_cache('Cached Name', prodid=0)
# updating should not clear the cache this time since our exceptions didn't change
- scene_exceptions.retrieve_exceptions()
+ scene_exceptions.ReleaseMap().fetch_exceptions()
self.assertEqual(name_cache.retrieve_name_from_cache('Cached Name'), (0, 0))
diff --git a/tests/webapi_tests.py b/tests/webapi_tests.py
index a7b1c9a9..816de55b 100644
--- a/tests/webapi_tests.py
+++ b/tests/webapi_tests.py
@@ -180,7 +180,7 @@ class WebAPICase(test.SickbeardTestDBCase):
search_queue.SearchQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='SEARCHQUEUE')
- sickgear.backlog_search_scheduler = search_backlog.BacklogSearchScheduler(
+ sickgear.search_backlog_scheduler = search_backlog.BacklogSearchScheduler(
search_backlog.BacklogSearcher(),
cycle_time=datetime.timedelta(minutes=60),
run_delay=datetime.timedelta(minutes=60),