Change add jobs to centralise scheduler activities.

Change refactor scene_exceptions.
This commit is contained in:
JackDandy 2023-03-16 04:19:03 +00:00
parent 97466179eb
commit 8e8568adb3
29 changed files with 929 additions and 796 deletions

View file

@ -10,6 +10,8 @@
* Update SimpleJSON 3.18.1 (c891b95) to 3.19.1 (aeb63ee)
* Update Tornado Web Server 6.3.0 (7186b86) to 6.3.1 (419838b)
* Update urllib3 1.26.14 (a06c05c) to 1.26.15 (25cca389)
* Change add jobs to centralise scheduler activities
* Change refactor scene_exceptions
### 3.28.0 (2023-04-12 13:05:00 UTC)

View file

@ -555,9 +555,9 @@ class SickGear(object):
name_cache.build_name_cache()
# load all ids from xem
sickgear.classes.loading_msg.message = 'Loading xem data'
startup_background_tasks = threading.Thread(name='XEMUPDATER', target=sickgear.scene_exceptions.get_xem_ids)
startup_background_tasks.start()
# sickgear.classes.loading_msg.message = 'Loading xem data'
# startup_background_tasks = threading.Thread(name='XEMUPDATER', target=sickgear.scene_exceptions.ReleaseMap().fetch_xem_ids)
# startup_background_tasks.start()
sickgear.classes.loading_msg.message = 'Checking history'
# check history snatched_proper update
@ -624,7 +624,7 @@ class SickGear(object):
if not switching and (self.force_update or sickgear.UPDATE_SHOWS_ON_START):
sickgear.classes.loading_msg.message = 'Starting a forced show update'
background_start_forced_show_update = threading.Thread(name='STARTUP-FORCE-SHOW-UPDATE',
target=sickgear.show_update_scheduler.action.run)
target=sickgear.update_show_scheduler.action.run)
background_start_forced_show_update.start()
sickgear.classes.loading_msg.message = 'Switching to default web server'

View file

@ -37,7 +37,7 @@ import zlib
from . import classes, db, helpers, image_cache, indexermapper, logger, metadata, naming, people_queue, providers, \
scene_exceptions, scene_numbering, scheduler, search_backlog, search_propers, search_queue, search_recent, \
show_queue, show_updater, subtitles, trakt_helpers, version_checker, watchedstate_queue
from . import auto_post_processer, properFinder # must come after the above imports
from . import auto_media_process, properFinder # must come after the above imports
from .common import SD, SKIPPED, USER_AGENT
from .config import check_section, check_setting_int, check_setting_str, ConfigMigrator, minimax
from .databases import cache_db, failed_db, mainDB
@ -61,7 +61,7 @@ import sg_helpers
# noinspection PyUnreachableCode
if False:
from typing import AnyStr, Dict, List
from typing import AnyStr, Dict, List, Optional
from adba import Connection
from .event_queue import Events
from .tv import TVShow
@ -88,23 +88,25 @@ DATA_DIR = ''
# noinspection PyTypeChecker
events = None # type: Events
recent_search_scheduler = None
backlog_search_scheduler = None
show_update_scheduler = None
people_queue_scheduler = None
update_software_scheduler = None
update_packages_scheduler = None
show_queue_scheduler = None
search_queue_scheduler = None
proper_finder_scheduler = None
media_process_scheduler = None
subtitles_finder_scheduler = None
# trakt_checker_scheduler = None
emby_watched_state_scheduler = None
plex_watched_state_scheduler = None
watched_state_queue_scheduler = None
show_queue_scheduler = None # type: Optional[scheduler.Scheduler]
search_queue_scheduler = None # type: Optional[scheduler.Scheduler]
people_queue_scheduler = None # type: Optional[scheduler.Scheduler]
watched_state_queue_scheduler = None # type: Optional[scheduler.Scheduler]
update_software_scheduler = None # type: Optional[scheduler.Scheduler]
update_packages_scheduler = None # type: Optional[scheduler.Scheduler]
update_show_scheduler = None # type: Optional[scheduler.Scheduler]
update_release_mappings_scheduler = None # type: Optional[scheduler.Scheduler]
search_recent_scheduler = None # type: Optional[scheduler.Scheduler]
search_backlog_scheduler = None # type: Optional[search_backlog.BacklogSearchScheduler]
search_propers_scheduler = None # type: Optional[scheduler.Scheduler]
search_subtitles_scheduler = None # type: Optional[scheduler.Scheduler]
emby_watched_state_scheduler = None # type: Optional[scheduler.Scheduler]
plex_watched_state_scheduler = None # type: Optional[scheduler.Scheduler]
process_media_scheduler = None # type: Optional[scheduler.Scheduler]
# noinspection PyTypeChecker
background_mapping_task = None # type: threading.Thread
# deprecated
# trakt_checker_scheduler = None
provider_ping_thread_pool = {}
@ -624,9 +626,11 @@ __INITIALIZED__ = False
__INIT_STAGE__ = 0
# don't reassign MEMCACHE var without reassigning sg_helpers.MEMCACHE
# and scene_exceptions.MEMCACHE
# as long as the pointer is the same (dict only modified) all is fine
MEMCACHE = {}
sg_helpers.MEMCACHE = MEMCACHE
scene_exceptions.MEMCACHE = MEMCACHE
MEMCACHE_FLAG_IMAGES = {}
@ -1518,11 +1522,14 @@ def init_stage_2():
global __INITIALIZED__, MEMCACHE, MEMCACHE_FLAG_IMAGES, RECENTSEARCH_STARTUP
# Schedulers
# global trakt_checker_scheduler
global recent_search_scheduler, backlog_search_scheduler, people_queue_scheduler, show_update_scheduler, \
update_software_scheduler, update_packages_scheduler, show_queue_scheduler, search_queue_scheduler, \
proper_finder_scheduler, media_process_scheduler, subtitles_finder_scheduler, \
background_mapping_task, \
watched_state_queue_scheduler, emby_watched_state_scheduler, plex_watched_state_scheduler
global update_software_scheduler, update_packages_scheduler, \
update_show_scheduler, update_release_mappings_scheduler, \
search_backlog_scheduler, search_propers_scheduler, \
search_recent_scheduler, search_subtitles_scheduler, \
search_queue_scheduler, show_queue_scheduler, people_queue_scheduler, \
watched_state_queue_scheduler, emby_watched_state_scheduler, plex_watched_state_scheduler, \
process_media_scheduler, background_mapping_task
# Gen Config/Misc
global SHOW_UPDATE_HOUR, UPDATE_INTERVAL, UPDATE_PACKAGES_INTERVAL
# Search Settings/Episode
@ -1570,32 +1577,17 @@ def init_stage_2():
metadata_provider_dict[tmp_provider.name] = tmp_provider
# initialize schedulers
# updaters
update_now = datetime.timedelta(minutes=0)
update_software_scheduler = scheduler.Scheduler(
version_checker.SoftwareUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_INTERVAL),
thread_name='SOFTWAREUPDATER',
silent=False)
update_packages_scheduler = scheduler.Scheduler(
version_checker.PackagesUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_PACKAGES_INTERVAL),
# run_delay=datetime.timedelta(minutes=2),
thread_name='PACKAGESUPDATER',
silent=False)
# /
# queues must be first
show_queue_scheduler = scheduler.Scheduler(
show_queue.ShowQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='SHOWQUEUE')
show_update_scheduler = scheduler.Scheduler(
show_updater.ShowUpdater(),
cycle_time=datetime.timedelta(hours=1),
start_time=datetime.time(hour=SHOW_UPDATE_HOUR),
thread_name='SHOWUPDATER',
prevent_cycle_run=show_queue_scheduler.action.is_show_update_running) # 3AM
search_queue_scheduler = scheduler.Scheduler(
search_queue.SearchQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='SEARCHQUEUE')
people_queue_scheduler = scheduler.Scheduler(
people_queue.PeopleQueue(),
@ -1603,21 +1595,52 @@ def init_stage_2():
thread_name='PEOPLEQUEUE'
)
# searchers
search_queue_scheduler = scheduler.Scheduler(
search_queue.SearchQueue(),
watched_state_queue_scheduler = scheduler.Scheduler(
watchedstate_queue.WatchedStateQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='SEARCHQUEUE')
thread_name='WATCHEDSTATEQUEUE')
# /
# updaters
update_software_scheduler = scheduler.Scheduler(
version_checker.SoftwareUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_INTERVAL),
thread_name='SOFTWAREUPDATE',
silent=False)
update_packages_scheduler = scheduler.Scheduler(
version_checker.PackagesUpdater(),
cycle_time=datetime.timedelta(hours=UPDATE_PACKAGES_INTERVAL),
# run_delay=datetime.timedelta(minutes=2),
thread_name='PACKAGESUPDATE',
silent=False)
update_show_scheduler = scheduler.Scheduler(
show_updater.ShowUpdater(),
cycle_time=datetime.timedelta(hours=1),
start_time=datetime.time(hour=SHOW_UPDATE_HOUR),
thread_name='SHOWDATAUPDATE',
prevent_cycle_run=show_queue_scheduler.action.is_show_update_running) # 3AM
classes.loading_msg.message = 'Loading show maps'
update_release_mappings_scheduler = scheduler.Scheduler(
scene_exceptions.ReleaseMap(),
cycle_time=datetime.timedelta(hours=2),
thread_name='SHOWMAPSUPDATE',
silent=False)
# /
# searchers
init_search_delay = int(os.environ.get('INIT_SEARCH_DELAY', 0))
# enter 4499 (was 4489) for experimental internal provider intervals
update_interval = datetime.timedelta(minutes=(RECENTSEARCH_INTERVAL, 1)[4499 == RECENTSEARCH_INTERVAL])
recent_search_scheduler = scheduler.Scheduler(
update_now = datetime.timedelta(minutes=0)
search_recent_scheduler = scheduler.Scheduler(
search_recent.RecentSearcher(),
cycle_time=update_interval,
run_delay=update_now if RECENTSEARCH_STARTUP else datetime.timedelta(minutes=init_search_delay or 5),
thread_name='RECENTSEARCHER',
thread_name='RECENTSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_recentsearch_in_progress)
if [x for x in providers.sorted_sources()
@ -1635,14 +1658,13 @@ def init_stage_2():
backlogdelay = helpers.try_int((time_diff.total_seconds() / 60) + 10, 10)
else:
backlogdelay = 10
backlog_search_scheduler = search_backlog.BacklogSearchScheduler(
search_backlog_scheduler = search_backlog.BacklogSearchScheduler(
search_backlog.BacklogSearcher(),
cycle_time=datetime.timedelta(minutes=get_backlog_cycle_time()),
run_delay=datetime.timedelta(minutes=init_search_delay or backlogdelay),
thread_name='BACKLOG',
thread_name='BACKLOGSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_standard_backlog_in_progress)
propers_searcher = search_propers.ProperSearcher()
last_proper_search = datetime.datetime.fromtimestamp(properFinder.get_last_proper_search())
time_diff = datetime.timedelta(days=1) - (datetime.datetime.now() - last_proper_search)
if time_diff < datetime.timedelta(seconds=0):
@ -1650,34 +1672,21 @@ def init_stage_2():
else:
properdelay = helpers.try_int((time_diff.total_seconds() / 60) + 5, 20)
proper_finder_scheduler = scheduler.Scheduler(
propers_searcher,
search_propers_scheduler = scheduler.Scheduler(
search_propers.ProperSearcher(),
cycle_time=datetime.timedelta(days=1),
run_delay=datetime.timedelta(minutes=init_search_delay or properdelay),
thread_name='FINDPROPERS',
thread_name='PROPERSSEARCH',
prevent_cycle_run=search_queue_scheduler.action.is_propersearch_in_progress)
# processors
media_process_scheduler = scheduler.Scheduler(
auto_post_processer.PostProcesser(),
cycle_time=datetime.timedelta(minutes=MEDIAPROCESS_INTERVAL),
thread_name='POSTPROCESSER',
silent=not PROCESS_AUTOMATICALLY)
subtitles_finder_scheduler = scheduler.Scheduler(
search_subtitles_scheduler = scheduler.Scheduler(
subtitles.SubtitlesFinder(),
cycle_time=datetime.timedelta(hours=SUBTITLES_FINDER_INTERVAL),
thread_name='FINDSUBTITLES',
thread_name='SUBTITLESEARCH',
silent=not USE_SUBTITLES)
background_mapping_task = threading.Thread(name='MAPPINGSUPDATER', target=indexermapper.load_mapped_ids,
kwargs={'load_all': True})
watched_state_queue_scheduler = scheduler.Scheduler(
watchedstate_queue.WatchedStateQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='WATCHEDSTATEQUEUE')
# /
# others
emby_watched_state_scheduler = scheduler.Scheduler(
EmbyWatchedStateUpdater(),
cycle_time=datetime.timedelta(minutes=EMBY_WATCHEDSTATE_INTERVAL),
@ -1690,6 +1699,15 @@ def init_stage_2():
run_delay=datetime.timedelta(minutes=5),
thread_name='PLEXWATCHEDSTATE')
process_media_scheduler = scheduler.Scheduler(
auto_media_process.MediaProcess(),
cycle_time=datetime.timedelta(minutes=MEDIAPROCESS_INTERVAL),
thread_name='PROCESSMEDIA',
silent=not PROCESS_AUTOMATICALLY)
background_mapping_task = threading.Thread(name='MAPPINGUPDATES', target=indexermapper.load_mapped_ids,
kwargs={'load_all': True})
MEMCACHE['history_tab_limit'] = 11
MEMCACHE['history_tab'] = History.menu_tab(MEMCACHE['history_tab_limit'])
@ -1707,11 +1725,15 @@ def init_stage_2():
def enabled_schedulers(is_init=False):
# ([], [trakt_checker_scheduler])[USE_TRAKT] + \
return ([], [events])[is_init] \
+ ([], [recent_search_scheduler, backlog_search_scheduler, show_update_scheduler, people_queue_scheduler,
update_software_scheduler, update_packages_scheduler,
show_queue_scheduler, search_queue_scheduler, proper_finder_scheduler,
media_process_scheduler, subtitles_finder_scheduler,
emby_watched_state_scheduler, plex_watched_state_scheduler, watched_state_queue_scheduler]
+ ([], [update_software_scheduler, update_packages_scheduler,
update_show_scheduler, update_release_mappings_scheduler,
search_recent_scheduler, search_backlog_scheduler,
search_propers_scheduler, search_subtitles_scheduler,
show_queue_scheduler, search_queue_scheduler,
people_queue_scheduler, watched_state_queue_scheduler,
emby_watched_state_scheduler, plex_watched_state_scheduler,
process_media_scheduler
]
)[not MEMCACHE.get('update_restart')] \
+ ([events], [])[is_init]

View file

@ -18,32 +18,31 @@ import os.path
import sickgear
from . import logger, processTV
from .scheduler import Job
class PostProcesser(object):
class MediaProcess(Job):
def __init__(self):
self.amActive = False
super(MediaProcess, self).__init__(self.job_run, kwargs={})
@staticmethod
def is_enabled():
return sickgear.PROCESS_AUTOMATICALLY
def run(self):
def job_run(self):
if self.is_enabled():
self.amActive = True
self._main()
self.amActive = False
@staticmethod
def _main():
if not os.path.isdir(sickgear.TV_DOWNLOAD_DIR):
logger.error('Automatic post-processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR)
logger.error('Automatic media processing attempted but dir %s doesn\'t exist' % sickgear.TV_DOWNLOAD_DIR)
return
if not os.path.isabs(sickgear.TV_DOWNLOAD_DIR):
logger.error('Automatic post-processing attempted but dir %s is relative '
logger.error('Automatic media processing attempted but dir %s is relative '
'(and probably not what you really want to process)' % sickgear.TV_DOWNLOAD_DIR)
return
processTV.processDir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True)
processTV.process_dir(sickgear.TV_DOWNLOAD_DIR, is_basedir=True)

View file

@ -152,8 +152,8 @@ def schedule_mediaprocess(iv):
if sickgear.MEDIAPROCESS_INTERVAL < sickgear.MIN_MEDIAPROCESS_INTERVAL:
sickgear.MEDIAPROCESS_INTERVAL = sickgear.MIN_MEDIAPROCESS_INTERVAL
sickgear.media_process_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL)
sickgear.media_process_scheduler.set_paused_state()
sickgear.process_media_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.MEDIAPROCESS_INTERVAL)
sickgear.process_media_scheduler.set_paused_state()
def schedule_recentsearch(iv):
@ -162,14 +162,14 @@ def schedule_recentsearch(iv):
if sickgear.RECENTSEARCH_INTERVAL < sickgear.MIN_RECENTSEARCH_INTERVAL:
sickgear.RECENTSEARCH_INTERVAL = sickgear.MIN_RECENTSEARCH_INTERVAL
sickgear.recent_search_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL)
sickgear.search_recent_scheduler.cycle_time = datetime.timedelta(minutes=sickgear.RECENTSEARCH_INTERVAL)
def schedule_backlog(iv):
sickgear.BACKLOG_PERIOD = minimax(iv, sickgear.DEFAULT_BACKLOG_PERIOD,
sickgear.MIN_BACKLOG_PERIOD, sickgear.MAX_BACKLOG_PERIOD)
sickgear.backlog_search_scheduler.action.cycle_time = sickgear.BACKLOG_PERIOD
sickgear.search_backlog_scheduler.action.cycle_time = sickgear.BACKLOG_PERIOD
def schedule_update_software(iv):
@ -220,7 +220,7 @@ def schedule_update_packages_notify(update_packages_notify):
def schedule_download_propers(download_propers):
if sickgear.DOWNLOAD_PROPERS != download_propers:
sickgear.DOWNLOAD_PROPERS = download_propers
sickgear.proper_finder_scheduler.set_paused_state()
sickgear.search_propers_scheduler.set_paused_state()
def schedule_trakt(use_trakt):
@ -233,7 +233,7 @@ def schedule_trakt(use_trakt):
def schedule_subtitles(use_subtitles):
if sickgear.USE_SUBTITLES != use_subtitles:
sickgear.USE_SUBTITLES = use_subtitles
sickgear.subtitles_finder_scheduler.set_paused_state()
sickgear.search_subtitles_scheduler.set_paused_state()
def schedule_emby_watched(emby_watched_interval):

View file

@ -1,5 +1,4 @@
from lib.six import moves
import queue
import threading
@ -15,7 +14,7 @@ class Event(object):
class Events(threading.Thread):
def __init__(self, callback):
super(Events, self).__init__()
self.queue = moves.queue.Queue()
self.queue = queue.Queue()
self.daemon = True
self.callback = callback
self.name = 'EVENT-QUEUE'
@ -31,24 +30,24 @@ class Events(threading.Thread):
while not self._stopper.is_set():
try:
# get event type
etype = self.queue.get(True, 1)
except moves.queue.Empty:
etype = 'Empty'
ev_type = self.queue.get(True, 1)
except queue.Empty:
ev_type = 'Empty'
except(BaseException, Exception):
etype = None
if etype in (self.SystemEvent.RESTART, self.SystemEvent.SHUTDOWN, None, 'Empty'):
if etype in ('Empty',):
ev_type = None
if ev_type in (self.SystemEvent.RESTART, self.SystemEvent.SHUTDOWN, None, 'Empty'):
if ev_type in ('Empty',):
continue
from sickgear import logger
logger.debug(f'Callback {self.callback.__name__}(event type:{etype})')
logger.debug(f'Callback {self.callback.__name__}(event type:{ev_type})')
try:
# perform callback if we got an event type
self.callback(etype)
self.callback(ev_type)
# event completed
self.queue.task_done()
except moves.queue.Empty:
except queue.Empty:
pass
# exiting thread

View file

@ -19,6 +19,7 @@ import datetime
import threading
from . import db, logger
from .scheduler import Job
from exceptions_helper import ex
from six import integer_types
@ -37,9 +38,10 @@ class QueuePriorities(object):
VERYHIGH = 40
class GenericQueue(object):
class GenericQueue(Job):
def __init__(self, cache_db_tables=None, main_db_tables=None):
# type: (List[AnyStr], List[AnyStr]) -> None
super(GenericQueue, self).__init__(self.job_run, silent=True, kwargs={}, reentrant_lock=True)
self.currentItem = None # type: QueueItem or None
@ -51,13 +53,41 @@ class GenericQueue(object):
self.events = {} # type: Dict[int, List[Callable]]
self.lock = threading.RLock()
self.cache_db_tables = cache_db_tables or [] # type: List[AnyStr]
self.main_db_tables = main_db_tables or [] # type: List[AnyStr]
self._id_counter = self._load_init_id() # type: integer_types
def job_run(self):
# only start a new task if one isn't already going
with self.lock:
if None is self.currentItem or not self.currentItem.is_alive():
# if the thread is dead then the current item should be finished
if self.currentItem:
self.currentItem.finish()
try:
self.delete_item(self.currentItem, finished_run=True)
except (BaseException, Exception):
pass
self.currentItem = None
# if there's something in the queue then run it in a thread and take it out of the queue
if 0 < len(self.queue):
self.queue.sort(key=lambda y: (-y.priority, y.added))
if self.queue[0].priority < self.min_priority:
return
# launch the queue item in a thread
self.currentItem = self.queue.pop(0)
if 'SEARCHQUEUE' != self.queue_name:
self.currentItem.name = self.queue_name + '-' + self.currentItem.name
self.currentItem.start()
self.check_events()
def _load_init_id(self):
# type: (...) -> integer_types
"""
@ -216,7 +246,7 @@ class GenericQueue(object):
self.min_priority = 999999999999
def unpause(self):
logger.log('Unpausing queue')
logger.log('Un-pausing queue')
with self.lock:
self.min_priority = 0
@ -269,36 +299,6 @@ class GenericQueue(object):
except (BaseException, Exception) as e:
logger.error('Error executing Event: %s' % ex(e))
def run(self):
# only start a new task if one isn't already going
with self.lock:
if None is self.currentItem or not self.currentItem.is_alive():
# if the thread is dead then the current item should be finished
if self.currentItem:
self.currentItem.finish()
try:
self.delete_item(self.currentItem, finished_run=True)
except (BaseException, Exception):
pass
self.currentItem = None
# if there's something in the queue then run it in a thread and take it out of the queue
if 0 < len(self.queue):
self.queue.sort(key=lambda y: (-y.priority, y.added))
if self.queue[0].priority < self.min_priority:
return
# launch the queue item in a thread
self.currentItem = self.queue.pop(0)
if 'SEARCHQUEUE' != self.queue_name:
self.currentItem.name = self.queue_name + '-' + self.currentItem.name
self.currentItem.start()
self.check_events()
class QueueItem(threading.Thread):
def __init__(self, name, action_id=0, uid=None):

View file

@ -14,6 +14,7 @@
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
import threading
import sickgear
@ -21,6 +22,7 @@ from . import db
from .helpers import full_sanitize_scene_name, try_int
from six import iteritems
from _23 import map_consume
# noinspection PyUnreachableCode
if False:
@ -85,18 +87,19 @@ def build_name_cache(show_obj=None, update_only_scene=False):
if show_obj:
# search for only the requested show id and flush old show entries from namecache
show_ids = {show_obj.tvid: [show_obj.prodid]}
nameCache = dict([(k, v) for k, v in iteritems(nameCache)
if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)])
sceneNameCache = dict([(k, v) for k, v in iteritems(sceneNameCache)
if not (v[0] == show_obj.tvid and v[1] == show_obj.prodid)])
# add standard indexer name to namecache
nameCache[full_sanitize_scene_name(show_obj.unique_name or show_obj.name)] = [show_obj.tvid, show_obj.prodid, -1]
nameCache[full_sanitize_scene_name(show_obj.unique_name or show_obj.name)] = \
[show_obj.tvid, show_obj.prodid, -1]
else:
# generate list of production ids to look up in cache.db
show_ids = {}
for cur_show_obj in sickgear.showList:
show_ids.setdefault(cur_show_obj.tvid, []).append(cur_show_obj.prodid)
show_ids = defaultdict(list)
map_consume(lambda _so: show_ids[_so.tvid].append(_so.prodid), sickgear.showList)
# add all standard show indexer names to namecache
nameCache = dict(
@ -104,33 +107,32 @@ def build_name_cache(show_obj=None, update_only_scene=False):
for cur_so in sickgear.showList if cur_so])
sceneNameCache = {}
cache_db = db.DBConnection()
cache_results = []
if update_only_scene:
# generate list of production ids to look up in cache.db
show_ids = {}
for cur_show_obj in sickgear.showList:
show_ids.setdefault(cur_show_obj.tvid, []).append(cur_show_obj.prodid)
tmp_scene_name_cache = {}
else:
tmp_scene_name_cache = sceneNameCache.copy()
for t, s in iteritems(show_ids):
else:
# generate list of production ids to look up in cache.db
show_ids = defaultdict(list)
map_consume(lambda _so: show_ids[_so.tvid].append(_so.prodid), sickgear.showList)
tmp_scene_name_cache = {}
cache_results = []
cache_db = db.DBConnection()
for cur_tvid, cur_prodid_list in iteritems(show_ids):
cache_results += cache_db.select(
'SELECT show_name, indexer AS tv_id, indexer_id AS prod_id, season'
' FROM scene_exceptions'
' WHERE indexer = %s AND indexer_id IN (%s)' % (t, ','.join(['%s' % i for i in s])))
f'SELECT show_name, indexer AS tv_id, indexer_id AS prod_id, season'
f' FROM scene_exceptions'
f' WHERE indexer = {cur_tvid} AND indexer_id IN ({",".join(map(str, cur_prodid_list))})')
if cache_results:
for cache_result in cache_results:
tvid = int(cache_result['tv_id'])
prodid = int(cache_result['prod_id'])
season = try_int(cache_result['season'], -1)
name = full_sanitize_scene_name(cache_result['show_name'])
for cur_result in cache_results:
tvid = int(cur_result['tv_id'])
prodid = int(cur_result['prod_id'])
season = try_int(cur_result['season'], -1)
name = full_sanitize_scene_name(cur_result['show_name'])
tmp_scene_name_cache[name] = [tvid, prodid, season]
sceneNameCache = tmp_scene_name_cache
sceneNameCache = tmp_scene_name_cache.copy()
def remove_from_namecache(tvid, prodid):

View file

@ -407,7 +407,8 @@ class NameParser(object):
new_season_numbers.append(s)
elif show_obj.is_anime and len(best_result.ab_episode_numbers) and not self.testing:
scene_season = scene_exceptions.get_scene_exception_by_name(best_result.series_name)[2]
scene_season = scene_exceptions.get_scene_exception_by_name(
best_result.series_name)[2]
for epAbsNo in best_result.ab_episode_numbers:
a = epAbsNo

View file

@ -1141,10 +1141,9 @@ class ProcessTVShow(object):
self._buffer(processor.log.strip('\n'))
# backward compatibility prevents the case of this function name from being updated to PEP8
def processDir(dir_name, nzb_name=None, process_method=None, force=False, force_replace=None,
failed=False, pp_type='auto', cleanup=False, webhandler=None, show_obj=None, is_basedir=True,
skip_failure_processing=False, client=None):
def process_dir(dir_name, nzb_name=None, process_method=None, force=False, force_replace=None,
failed=False, pp_type='auto', cleanup=False, webhandler=None, show_obj=None, is_basedir=True,
skip_failure_processing=False, client=None):
"""
:param dir_name: dir name
@ -1182,6 +1181,10 @@ def processDir(dir_name, nzb_name=None, process_method=None, force=False, force_
pp_type, cleanup, show_obj)
# backward compatibility
processDir = process_dir
def process_minimal(nzb_name, show_obj, failed, webhandler):
# type: (AnyStr, TVShow, bool, Any) -> None
ProcessTVShow(webhandler).process_minimal(nzb_name, show_obj, failed, webhandler)

View file

@ -71,7 +71,7 @@ def search_propers(provider_proper_obj=None):
if None is provider_proper_obj:
_set_last_proper_search(datetime.datetime.now())
proper_sch = sickgear.proper_finder_scheduler
proper_sch = sickgear.search_propers_scheduler
if None is proper_sch.start_time:
run_in = proper_sch.last_run + proper_sch.cycle_time - datetime.datetime.now()
run_at = ', next check '
@ -696,7 +696,7 @@ def _set_last_proper_search(when):
def next_proper_timeleft():
return sickgear.proper_finder_scheduler.time_left()
return sickgear.search_propers_scheduler.time_left()
def get_last_proper_search():

View file

@ -37,7 +37,7 @@ from ..classes import NZBSearchResult, TorrentSearchResult, SearchResult
from ..common import Quality, MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
from ..helpers import maybe_plural, remove_file_perm
from ..name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from ..scene_exceptions import has_season_exceptions
from ..scene_exceptions import ReleaseMap
from ..show_name_helpers import get_show_names_all_possible
from ..sgdatetime import SGDatetime
from ..tv import TVEpisode, TVShow
@ -1743,7 +1743,8 @@ class TorrentProvider(GenericProvider):
return []
show_obj = ep_obj.show_obj
season = (-1, ep_obj.season)[has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
season = (-1, ep_obj.season)[ReleaseMap().has_season_exceptions(
ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
ep_dict = self._ep_dict(ep_obj)
sp_detail = (show_obj.air_by_date or show_obj.is_sports) and str(ep_obj.airdate).split('-')[0] or \
(show_obj.is_anime and ep_obj.scene_absolute_number or
@ -1779,7 +1780,8 @@ class TorrentProvider(GenericProvider):
return []
show_obj = ep_obj.show_obj
season = (-1, ep_obj.season)[has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
season = (-1, ep_obj.season)[ReleaseMap().has_season_exceptions(
ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season)]
if show_obj.air_by_date or show_obj.is_sports:
ep_detail = [str(ep_obj.airdate).replace('-', sep_date)]\
if 'date_detail' not in kwargs else kwargs['date_detail'](ep_obj.airdate)

View file

@ -34,7 +34,7 @@ from ..network_timezones import SG_TIMEZONE
from ..sgdatetime import SGDatetime
from ..search import get_aired_in_season, get_wanted_qualities
from ..show_name_helpers import get_show_names
from ..scene_exceptions import has_season_exceptions
from ..scene_exceptions import ReleaseMap
from ..tv import TVEpisode, TVShow
from lib.dateutil import parser
@ -470,7 +470,7 @@ class NewznabProvider(generic.NZBProvider):
# id search
params = base_params.copy()
use_id = False
if not has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
if not ReleaseMap().has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
for i in sickgear.TVInfoAPI().all_sources:
if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps:
params[self.caps[i]] = ep_obj.show_obj.ids[i]['id']
@ -528,7 +528,7 @@ class NewznabProvider(generic.NZBProvider):
# id search
params = base_params.copy()
use_id = False
if not has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
if not ReleaseMap().has_season_exceptions(ep_obj.show_obj.tvid, ep_obj.show_obj.prodid, ep_obj.season):
for i in sickgear.TVInfoAPI().all_sources:
if i in ep_obj.show_obj.ids and 0 < ep_obj.show_obj.ids[i]['id'] and i in self.caps:
params[self.caps[i]] = ep_obj.show_obj.ids[i]['id']

File diff suppressed because it is too large Load diff

View file

@ -19,17 +19,14 @@
# @copyright: Dermot Buckley
#
import datetime
import traceback
from sqlite3 import Row
from exceptions_helper import ex
import sickgear
from . import db, logger
from .helpers import try_int
from .scene_exceptions import xem_ids_list
from .sgdatetime import SGDatetime
# noinspection PyUnreachableCode
@ -774,7 +771,8 @@ def xem_refresh(tvid, prodid, force=False):
tvid, prodid = int(tvid), int(prodid)
tvinfo = sickgear.TVInfoAPI(tvid)
if 'xem_origin' not in tvinfo.config or prodid not in xem_ids_list.get(tvid, []):
if 'xem_origin' not in tvinfo.config \
or prodid not in sickgear.scene_exceptions.MEMCACHE['release_map_xem'].get(tvid, []):
return
xem_origin = tvinfo.config['xem_origin']

View file

@ -24,10 +24,12 @@ import traceback
from . import logger
from exceptions_helper import ex
import sickgear
class Scheduler(threading.Thread):
def __init__(self, action, cycle_time=datetime.timedelta(minutes=10), run_delay=datetime.timedelta(minutes=0),
start_time=None, thread_name="ScheduledThread", silent=True, prevent_cycle_run=None, paused=False):
start_time=None, thread_name='ScheduledThread', silent=True, prevent_cycle_run=None, paused=False):
super(Scheduler, self).__init__()
self.last_run = datetime.datetime.now() + run_delay - cycle_time
@ -41,10 +43,18 @@ class Scheduler(threading.Thread):
self._stopper = threading.Event()
self._unpause = threading.Event()
if not paused:
self._unpause.set()
self.unpause()
self.lock = threading.Lock()
self.force = False
@property
def is_running_job(self):
# type: (...) -> bool
"""
Return running state of the scheduled action
"""
return self.action.amActive
def pause(self):
self._unpause.clear()
@ -69,7 +79,7 @@ class Scheduler(threading.Thread):
return self.cycle_time - (datetime.datetime.now() - self.last_run)
def force_run(self):
if not self.action.amActive:
if not self.is_running_job:
self.force = True
return True
return False
@ -139,3 +149,72 @@ class Scheduler(threading.Thread):
# exiting thread
self._stopper.clear()
self._unpause.clear()
@staticmethod
def blocking_jobs():
# type (...) -> bool
"""
Return description of running jobs, or False if none are running
These jobs should prevent a restart/shutdown while running.
"""
job_report = []
if sickgear.process_media_scheduler.is_running_job:
job_report.append('Media processing')
if sickgear.update_show_scheduler.is_running_job:
job_report.append(f'{("U", "u")[len(job_report)]}pdating shows data')
# this idea is not ready for production. issues identified...
# 1. many are just the queue filling process, so this doesn't actually prevents restart/shutdown during those
# 2. in case something goes wrong or there is a bad bug in ithe code, the restart would be prevented
# (meaning no auto- / manual update via ui, user is forced to kill the process, manually update and restart)
# 3. just by bad timing the autoupdate process maybe at the same time as another blocking thread = updates but
# never restarts
# therefore, with these issues, the next two lines cannot allow this feature to be brought into service :(
# if len(job_report):
# return '%s %s running' % (' and '.join(job_report), ('are', 'is')[1 == len(job_report)])
return False
class Job(object):
"""
The job class centralises tasks with states
"""
def __init__(self, func, silent=False, thread_lock=False, reentrant_lock=False, args=(), kwargs=None):
self.amActive = False
self._func = func
self._silent = silent
self._args = args
self._kwargs = (kwargs, {})[None is kwargs]
if thread_lock:
self.lock = threading.Lock()
elif reentrant_lock:
self.lock = threading.RLock()
def run(self):
if self.amActive and self.__class__.__name__ in ('BacklogSearcher', 'MediaProcess'):
logger.log(u'%s is still running, not starting it again' % self.__class__.__name__)
return
if self._func:
result, re_raise = None, False
try:
self.amActive = True
result = self._func(*self._args, **self._kwargs)
except(BaseException, Exception) as e:
re_raise = e
finally:
self.amActive = False
not self._silent and logger.log(u'%s(%s) completed' % (self.__class__.__name__, self._func.__name__))
if re_raise:
raise re_raise
return result

View file

@ -17,13 +17,13 @@
from __future__ import with_statement, division
import datetime
import threading
from math import ceil
import sickgear
from . import db, logger, scheduler, search_queue, ui
from .helpers import find_show_by_id
from .providers.generic import GenericProvider
from .scheduler import Job
from .search import wanted_episodes
from .sgdatetime import SGDatetime
from .tv import TVidProdid, TVEpisode, TVShow
@ -74,13 +74,12 @@ class BacklogSearchScheduler(scheduler.Scheduler):
return self.action.nextBacklog - now if self.action.nextBacklog > now else datetime.timedelta(seconds=0)
class BacklogSearcher(object):
class BacklogSearcher(Job):
def __init__(self):
super(BacklogSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
self.last_backlog = self._get_last_backlog()
self.cycle_time = sickgear.BACKLOG_PERIOD
self.lock = threading.Lock()
self.amActive = False # type: bool
self.amPaused = False # type: bool
self.amWaiting = False # type: bool
self.forcetype = NORMAL_BACKLOG # type: int
@ -196,9 +195,6 @@ class BacklogSearcher(object):
:return: nothing
:rtype: None
"""
if self.amActive and not which_shows:
logger.debug('Backlog is still running, not starting it again')
return
if which_shows:
show_list = which_shows
@ -225,7 +221,6 @@ class BacklogSearcher(object):
return
self._get_last_backlog()
self.amActive = True
self.amPaused = False
cur_date = datetime.date.today().toordinal()
@ -328,7 +323,6 @@ class BacklogSearcher(object):
if standard_backlog and not any_torrent_enabled:
self._set_last_runtime(now)
self.amActive = False
self._reset_progress_indicator()
@staticmethod
@ -401,7 +395,7 @@ class BacklogSearcher(object):
# noinspection SqlConstantCondition
my_db.action('UPDATE info SET last_backlog=%s WHERE 1=1' % when)
def run(self):
def job_run(self):
try:
force_type = self.forcetype
force = self.force
@ -409,5 +403,4 @@ class BacklogSearcher(object):
self.force = False
self.search_backlog(force_type=force_type, force=force)
except (BaseException, Exception):
self.amActive = False
raise

View file

@ -16,24 +16,21 @@
from __future__ import with_statement
import threading
import sickgear
from .scheduler import Job
class ProperSearcher(object):
class ProperSearcher(Job):
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
super(ProperSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
@staticmethod
def is_enabled():
# type: (...) -> bool
return sickgear.DOWNLOAD_PROPERS
def run(self):
self.amActive = True
@staticmethod
def job_run():
propersearch_queue_item = sickgear.search_queue.ProperSearchQueueItem()
sickgear.search_queue_scheduler.action.add_item(propersearch_queue_item)
self.amActive = False

View file

@ -16,19 +16,15 @@
from __future__ import with_statement
import threading
import sickgear
from .scheduler import Job
class RecentSearcher(object):
class RecentSearcher(Job):
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
super(RecentSearcher, self).__init__(self.job_run, kwargs={}, thread_lock=True)
def run(self):
self.amActive = True
def job_run(self):
recentsearch_queue_item = sickgear.search_queue.RecentSearchQueueItem()
sickgear.search_queue_scheduler.action.add_item(recentsearch_queue_item)
self.amActive = False

View file

@ -25,7 +25,7 @@ import sickgear
from . import common, db, logger
from .helpers import sanitize_scene_name
from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from .scene_exceptions import get_scene_exceptions
from .scene_exceptions import ReleaseMap
from sg_helpers import scantree
from _23 import quote_plus
@ -384,10 +384,10 @@ def all_possible_show_names(show_obj, season=-1, force_anime=False):
:return: a list of all the possible show names
"""
show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:]
if not show_names: # if we don't have any season specific exceptions fallback to generic exceptions
show_names = ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid, season)[:]
if -1 != season and not show_names: # fallback to generic exceptions if no season specific exceptions
season = -1
show_names = get_scene_exceptions(show_obj.tvid, show_obj.prodid, season=season)[:]
show_names = ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid)[:]
if -1 == season:
show_names.append(show_obj.name)

View file

@ -70,7 +70,7 @@ class ShowQueue(generic_queue.GenericQueue):
def check_events(self):
if self.daily_update_running and \
not (self.is_show_update_running() or sickgear.show_update_scheduler.action.amActive):
not (self.is_show_update_running() or sickgear.update_show_scheduler.is_running_job):
self.execute_events(DAILY_SHOW_UPDATE_FINISHED_EVENT)
self.daily_update_running = False
@ -1139,7 +1139,7 @@ class QueueItemAdd(ShowQueueItem):
self.show_obj.tvid, self.show_obj.prodid)
# if "scene" numbering is disabled during add show, output availability to log
if None is not self.scene and not self.show_obj.scene and \
self.show_obj.prodid in sickgear.scene_exceptions.xem_ids_list[self.show_obj.tvid]:
self.show_obj.prodid in sickgear.scene_exceptions.MEMCACHE['release_map_xem'][self.show_obj.tvid]:
logger.log('No scene number mappings found at TheXEM. Therefore, episode scene numbering disabled, '
'edit show and enable it to manually add custom numbers for search and media processing.')
try:
@ -1179,7 +1179,7 @@ class QueueItemAdd(ShowQueueItem):
# if started with WANTED eps then run the backlog
if WANTED == self.default_status or items_wanted:
logger.log('Launching backlog for this show since episodes are WANTED')
sickgear.backlog_search_scheduler.action.search_backlog([self.show_obj], prevent_same=True)
sickgear.search_backlog_scheduler.action.search_backlog([self.show_obj], prevent_same=True)
ui.notifications.message('Show added/search', 'Adding and searching for episodes of' + msg)
else:
ui.notifications.message('Show added', 'Adding' + msg)
@ -1253,7 +1253,7 @@ class QueueItemRefresh(ShowQueueItem):
self.show_obj.populate_cache(self.force_image_cache)
# Load XEM data to DB for show
if self.show_obj.prodid in sickgear.scene_exceptions.xem_ids_list[self.show_obj.tvid]:
if self.show_obj.prodid in sickgear.scene_exceptions.MEMCACHE['release_map_xem'][self.show_obj.tvid]:
sickgear.scene_numbering.xem_refresh(self.show_obj.tvid, self.show_obj.prodid)
if 'pausestatus_after' in self.kwargs and None is not self.kwargs['pausestatus_after']:

View file

@ -23,6 +23,7 @@ from exceptions_helper import ex
import sickgear
from . import db, logger, network_timezones, properFinder, ui
from .scheduler import Job
# noinspection PyUnreachableCode
if False:
@ -54,13 +55,12 @@ def clean_ignore_require_words():
pass
class ShowUpdater(object):
class ShowUpdater(Job):
def __init__(self):
self.amActive = False
super(ShowUpdater, self).__init__(self.job_run, kwargs={})
def run(self):
self.amActive = True
@staticmethod
def job_run():
try:
update_datetime = datetime.datetime.now()
@ -89,14 +89,14 @@ class ShowUpdater(object):
# update xem id lists
try:
sickgear.scene_exceptions.get_xem_ids()
sickgear.scene_exceptions.ReleaseMap().fetch_xem_ids()
except (BaseException, Exception):
logger.error('xem id list update error')
logger.error(traceback.format_exc())
# update scene exceptions
try:
sickgear.scene_exceptions.retrieve_exceptions()
sickgear.scene_exceptions.ReleaseMap().fetch_exceptions()
except (BaseException, Exception):
logger.error('scene exceptions update error')
logger.error(traceback.format_exc())
@ -147,7 +147,7 @@ class ShowUpdater(object):
import threading
try:
sickgear.background_mapping_task = threading.Thread(
name='MAPPINGSUPDATER', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True})
name='MAPPINGUPDATES', target=sickgear.indexermapper.load_mapped_ids, kwargs={'update': True})
sickgear.background_mapping_task.start()
except (BaseException, Exception):
logger.error('missing mapped ids update error')
@ -224,8 +224,8 @@ class ShowUpdater(object):
logger.log('Added all shows to show queue for full update')
finally:
self.amActive = False
except(BaseException, Exception):
pass
def __del__(self):
pass

View file

@ -19,6 +19,7 @@ import datetime
from . import db, helpers, logger
from .common import *
from .scheduler import Job
import sickgear
@ -103,24 +104,22 @@ def subtitle_language_filter():
return [language for language in subliminal.language.LANGUAGES if language[2] != ""]
class SubtitlesFinder(object):
class SubtitlesFinder(Job):
"""
The SubtitlesFinder will be executed every hour but will not necessarily search
and download subtitles. Only if the defined rule is true
"""
def __init__(self):
self.amActive = False
super(SubtitlesFinder, self).__init__(self.job_run, kwargs={}, thread_lock=True)
@staticmethod
def is_enabled():
return sickgear.USE_SUBTITLES
def run(self):
def job_run(self):
if self.is_enabled():
self.amActive = True
self._main()
self.amActive = False
def _main(self):
if 1 > len(sickgear.subtitles.get_enabled_service_list()):

View file

@ -29,6 +29,7 @@ from exceptions_helper import ex
import sickgear
from . import logger, notifiers, ui
from .scheduler import (Scheduler, Job)
from .piper import check_pip_outdated
from sg_helpers import cmdline_runner, get_url
@ -41,12 +42,14 @@ if False:
from typing import Tuple
class PackagesUpdater(object):
class PackagesUpdater(Job):
def __init__(self):
super(PackagesUpdater, self).__init__(self.job_run, kwargs={})
self.install_type = 'Python package updates'
def run(self, force=False):
def job_run(self, force=False):
if not sickgear.EXT_UPDATES \
and self.check_for_new_version(force) \
and sickgear.UPDATE_PACKAGES_AUTO:
@ -64,6 +67,11 @@ class PackagesUpdater(object):
:returns: True when package install/updates are available
:rtype: bool
"""
response = Scheduler.blocking_jobs()
if response:
logger.log(f'Update skipped because {response}', logger.DEBUG)
return False
if force and not sickgear.UPDATE_PACKAGES_MENU:
logger.log('Checking not enabled from menu action for %s' % self.install_type)
return False
@ -100,12 +108,14 @@ class PackagesUpdater(object):
return True
class SoftwareUpdater(object):
class SoftwareUpdater(Job):
"""
Version check class meant to run as a thread object with the sg scheduler.
"""
def __init__(self):
super(SoftwareUpdater, self).__init__(self.job_run, kwargs={})
self._min_python = (100, 0) # set default to absurdly high to prevent update
self.install_type = self.find_install_type()
@ -150,7 +160,7 @@ class SoftwareUpdater(object):
except (BaseException, Exception):
pass
def run(self, force=False):
def job_run(self, force=False):
# set current branch version
sickgear.BRANCH = self.get_branch()
@ -219,6 +229,11 @@ class SoftwareUpdater(object):
# update branch with current config branch value
self.updater.branch = sickgear.BRANCH
response = Scheduler.blocking_jobs()
if response:
logger.log(f'Update skipped because {response}', logger.DEBUG)
return False
if not self.is_updatable:
self._log_cannot_update()
return False

View file

@ -14,17 +14,15 @@
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import threading
import sickgear
from .scheduler import Job
from sickgear import watchedstate_queue
class WatchedStateUpdater(object):
class WatchedStateUpdater(Job):
def __init__(self, name, queue_item):
super(WatchedStateUpdater, self).__init__(self.job_run, silent=True, kwargs={}, thread_lock=True)
self.amActive = False
self.lock = threading.Lock()
self.name = name
self.queue_item = queue_item
@ -32,13 +30,15 @@ class WatchedStateUpdater(object):
def prevent_run(self):
return sickgear.watched_state_queue_scheduler.action.is_in_queue(self.queue_item)
def run(self):
@staticmethod
def is_enabled():
return True
def job_run(self):
# noinspection PyUnresolvedReferences
if self.is_enabled():
self.amActive = True
new_item = self.queue_item()
sickgear.watched_state_queue_scheduler.action.add_item(new_item)
self.amActive = False
class EmbyWatchedStateUpdater(WatchedStateUpdater):

View file

@ -48,6 +48,7 @@ from .indexers import indexer_api, indexer_config
from .indexers.indexer_config import *
from lib.tvinfo_base.exceptions import *
from .scene_numbering import set_scene_numbering_helper
from .scheduler import Scheduler
from .search_backlog import FORCED_BACKLOG
from .show_updater import clean_ignore_require_words
from .sgdatetime import SGDatetime
@ -1915,9 +1916,9 @@ class CMD_SickGearPostProcess(ApiCall):
if not self.type:
self.type = 'manual'
data = processTV.processDir(self.path, process_method=self.process_method, force=self.force_replace,
force_replace=self.is_priority, failed=self.failed, pp_type=self.type,
client=self.client)
data = processTV.process_dir(self.path, process_method=self.process_method, force=self.force_replace,
force_replace=self.is_priority, failed=self.failed, pp_type=self.type,
client=self.client)
if not self.return_data:
data = ""
@ -2074,7 +2075,7 @@ class CMD_SickGearCheckScheduler(ApiCall):
backlogPaused = sickgear.search_queue_scheduler.action.is_backlog_paused()
backlogRunning = sickgear.search_queue_scheduler.action.is_backlog_in_progress()
nextBacklog = sickgear.backlog_search_scheduler.next_run().strftime(dateFormat)
nextBacklog = sickgear.search_backlog_scheduler.next_run().strftime(dateFormat)
data = {"backlog_is_paused": int(backlogPaused), "backlog_is_running": int(backlogRunning),
"last_backlog": (0 < len(sql_result) and _ordinal_to_dateForm(sql_result[0]["last_backlog"])) or '',
@ -2177,15 +2178,15 @@ class CMD_SickGearForceSearch(ApiCall):
""" force the specified search type to run """
result = None
if 'recent' == self.searchtype and not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress() \
and not sickgear.recent_search_scheduler.action.amActive:
result = sickgear.recent_search_scheduler.force_run()
and not sickgear.search_recent_scheduler.is_running_job:
result = sickgear.search_recent_scheduler.force_run()
elif 'backlog' == self.searchtype and not sickgear.search_queue_scheduler.action.is_backlog_in_progress() \
and not sickgear.backlog_search_scheduler.action.amActive:
sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG)
and not sickgear.search_backlog_scheduler.is_running_job:
sickgear.search_backlog_scheduler.force_search(force_type=FORCED_BACKLOG)
result = True
elif 'proper' == self.searchtype and not sickgear.search_queue_scheduler.action.is_propersearch_in_progress() \
and not sickgear.proper_finder_scheduler.action.amActive:
result = sickgear.proper_finder_scheduler.force_run()
and not sickgear.search_propers_scheduler.is_running_job:
result = sickgear.search_propers_scheduler.force_run()
if result:
return _responds(RESULT_SUCCESS, msg='%s search successfully forced' % self.searchtype)
return _responds(RESULT_FAILURE,
@ -2499,6 +2500,13 @@ class CMD_SickGearRestart(ApiCall):
def run(self):
""" restart sickgear """
response = Scheduler.blocking_jobs()
if response:
msg = f'Restart aborted from API because {response.lower()}'
logger.log(msg, logger.DEBUG)
return _responds(RESULT_FAILURE, msg=msg)
sickgear.restart(soft=False)
return _responds(RESULT_SUCCESS, msg="SickGear is restarting...")
@ -2817,6 +2825,13 @@ class CMD_SickGearShutdown(ApiCall):
def run(self):
""" shutdown sickgear """
response = Scheduler.blocking_jobs()
if response:
msg = f'Shutdown aborted from API because {response.lower()}'
logger.log(msg, logger.DEBUG)
return _responds(RESULT_FAILURE, msg=msg)
sickgear.events.put(sickgear.events.SystemEvent.SHUTDOWN)
return _responds(RESULT_SUCCESS, msg="SickGear is shutting down...")
@ -4668,10 +4683,10 @@ class CMD_SickGearShowsForceUpdate(ApiCall):
def run(self):
""" force the daily show update now """
if sickgear.show_queue_scheduler.action.is_show_update_running() \
or sickgear.show_update_scheduler.action.amActive:
or sickgear.update_show_scheduler.is_running_job:
return _responds(RESULT_FAILURE, msg="show update already running.")
result = sickgear.show_update_scheduler.force_run()
result = sickgear.update_show_scheduler.force_run()
if result:
return _responds(RESULT_SUCCESS, msg="daily show update started")
return _responds(RESULT_FAILURE, msg="can't start show update currently")

View file

@ -65,6 +65,7 @@ from .name_parser.parser import InvalidNameException, InvalidShowException, Name
from .providers import newznab, rsstorrent
from .scene_numbering import get_scene_absolute_numbering_for_show, get_scene_numbering_for_show, \
get_xem_absolute_numbering_for_show, get_xem_numbering_for_show, set_scene_numbering_helper
from .scheduler import Scheduler
from .search_backlog import FORCED_BACKLOG
from .sgdatetime import SGDatetime
from .show_name_helpers import abbr_showname
@ -1327,8 +1328,8 @@ class MainHandler(WebHandler):
now = datetime.datetime.now()
events = [
('recent', sickgear.recent_search_scheduler.time_left),
('backlog', sickgear.backlog_search_scheduler.next_backlog_timeleft),
('recent', sickgear.search_recent_scheduler.time_left),
('backlog', sickgear.search_backlog_scheduler.next_backlog_timeleft),
]
if sickgear.DOWNLOAD_PROPERS:
@ -2070,6 +2071,9 @@ class Home(MainHandler):
if str(pid) != str(sickgear.PID):
return self.redirect('/home/')
if self.maybe_ignore('Shutdown'):
return
t = PageTemplate(web_handler=self, file='restart.tmpl')
t.shutdown = True
@ -2082,6 +2086,9 @@ class Home(MainHandler):
if str(pid) != str(sickgear.PID):
return self.redirect('/home/')
if self.maybe_ignore('Restart'):
return
t = PageTemplate(web_handler=self, file='restart.tmpl')
t.shutdown = False
@ -2089,6 +2096,17 @@ class Home(MainHandler):
return t.respond()
def maybe_ignore(self, task):
response = Scheduler.blocking_jobs()
if response:
task and logger.log('%s aborted because %s' % (task, response.lower()), logger.DEBUG)
self.redirect(self.request.headers['Referer'])
if task:
ui.notifications.message(u'Fail %s because %s, please try later' % (task.lower(), response.lower()))
return True
return False
def update(self, pid=None):
if str(pid) != str(sickgear.PID):
@ -2326,15 +2344,15 @@ class Home(MainHandler):
t.season_min = ([], [1])[2 < t.latest_season] + [t.latest_season]
t.other_seasons = (list(set(all_seasons) - set(t.season_min)), [])[display_show_minimum]
t.seasons = []
for x in all_seasons:
t.seasons += [(x, [None] if x not in (t.season_min + t.other_seasons) else my_db.select(
for cur_season in all_seasons:
t.seasons += [(cur_season, [None] if cur_season not in (t.season_min + t.other_seasons) else my_db.select(
'SELECT *'
' FROM tv_episodes'
' WHERE indexer = ? AND showid = ?'
' AND season = ?'
' ORDER BY episode DESC',
[show_obj.tvid, show_obj.prodid, x]
), scene_exceptions.has_season_exceptions(show_obj.tvid, show_obj.prodid, x))]
[show_obj.tvid, show_obj.prodid, cur_season]
), scene_exceptions.ReleaseMap().has_season_exceptions(show_obj.tvid, show_obj.prodid, cur_season))]
for row in my_db.select('SELECT season, episode, status'
' FROM tv_episodes'
@ -2423,7 +2441,7 @@ class Home(MainHandler):
t.clean_show_name = quote_plus(sickgear.indexermapper.clean_show_name(show_obj.name))
t.min_initial = Quality.get_quality_ui(min(Quality.split_quality(show_obj.quality)[0]))
t.show_obj.exceptions = scene_exceptions.get_scene_exceptions(show_obj.tvid, show_obj.prodid)
t.show_obj.exceptions = scene_exceptions.ReleaseMap().get_alt_names(show_obj.tvid, show_obj.prodid)
# noinspection PyUnresolvedReferences
t.all_scene_exceptions = show_obj.exceptions # normally Unresolved as not a class attribute, force set above
t.scene_numbering = get_scene_numbering_for_show(show_obj.tvid, show_obj.prodid)
@ -2562,7 +2580,7 @@ class Home(MainHandler):
@staticmethod
def scene_exceptions(tvid_prodid, wanted_season=None):
exceptions_list = sickgear.scene_exceptions.get_all_scene_exceptions(tvid_prodid)
exceptions_list = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
wanted_season = helpers.try_int(wanted_season, None)
wanted_not_found = None is not wanted_season and wanted_season not in exceptions_list
if not exceptions_list or wanted_not_found:
@ -2754,7 +2772,7 @@ class Home(MainHandler):
return [err_string]
return self._generic_message('Error', err_string)
show_obj.exceptions = scene_exceptions.get_all_scene_exceptions(tvid_prodid)
show_obj.exceptions = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
if None is not quality_preset and int(quality_preset):
best_qualities = []
@ -2971,7 +2989,7 @@ class Home(MainHandler):
if do_update_exceptions:
try:
scene_exceptions.update_scene_exceptions(show_obj.tvid, show_obj.prodid, exceptions_list)
scene_exceptions.ReleaseMap().update_exceptions(show_obj, exceptions_list)
helpers.cpu_sleep()
except exceptions_helper.CantUpdateException:
errors.append('Unable to force an update on scene exceptions of the show.')
@ -3912,16 +3930,16 @@ class HomeProcessMedia(Home):
m = sickgear.NZBGET_MAP.split('=')
dir_name, not_used = helpers.path_mapper(m[0], m[1], dir_name)
result = processTV.processDir(dir_name if dir_name else None,
None if not nzb_name else decode_str(nzb_name),
process_method=process_method, pp_type=process_type,
cleanup=cleanup,
force=force in ('on', '1'),
force_replace=force_replace in ('on', '1'),
failed='0' != failed,
webhandler=None if '0' == stream else self.send_message,
show_obj=show_obj, is_basedir=is_basedir in ('on', '1'),
skip_failure_processing=skip_failure_processing, client=client)
result = processTV.process_dir(dir_name if dir_name else None,
None if not nzb_name else decode_str(nzb_name),
process_method=process_method, pp_type=process_type,
cleanup=cleanup,
force=force in ('on', '1'),
force_replace=force_replace in ('on', '1'),
failed='0' != failed,
webhandler=None if '0' == stream else self.send_message,
show_obj=show_obj, is_basedir=is_basedir in ('on', '1'),
skip_failure_processing=skip_failure_processing, client=client)
if '0' == stream:
regexp = re.compile(r'(?i)<br[\s/]+>', flags=re.UNICODE)
@ -4448,7 +4466,7 @@ class AddShows(Home):
t.blocklist = []
t.groups = []
t.show_scene_maps = list(itervalues(sickgear.scene_exceptions.xem_ids_list))
t.show_scene_maps = list(itervalues(scene_exceptions.MEMCACHE['release_map_xem']))
has_shows = len(sickgear.showList)
t.try_id = [] # [dict try_tip: try_term]
@ -6616,7 +6634,7 @@ class Manage(MainHandler):
show_obj = helpers.find_show_by_id(tvid_prodid)
if show_obj:
sickgear.backlog_search_scheduler.action.search_backlog([show_obj])
sickgear.search_backlog_scheduler.action.search_backlog([show_obj])
self.redirect('/manage/backlog-overview/')
@ -7116,11 +7134,11 @@ class ManageSearch(Manage):
def index(self):
t = PageTemplate(web_handler=self, file='manage_manageSearches.tmpl')
# t.backlog_pi = sickgear.backlog_search_scheduler.action.get_progress_indicator()
# t.backlog_pi = sickgear.search_backlog_scheduler.action.get_progress_indicator()
t.backlog_paused = sickgear.search_queue_scheduler.action.is_backlog_paused()
t.scheduled_backlog_active_providers = sickgear.search_backlog.BacklogSearcher.providers_active(scheduled=True)
t.backlog_running = sickgear.search_queue_scheduler.action.is_backlog_in_progress()
t.backlog_is_active = sickgear.backlog_search_scheduler.action.am_running()
t.backlog_is_active = sickgear.search_backlog_scheduler.action.am_running()
t.standard_backlog_running = sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress()
t.backlog_running_type = sickgear.search_queue_scheduler.action.type_of_backlog_in_progress()
t.recent_search_status = sickgear.search_queue_scheduler.action.is_recentsearch_in_progress()
@ -7161,7 +7179,7 @@ class ManageSearch(Manage):
def force_backlog(self):
# force it to run the next time it looks
if not sickgear.search_queue_scheduler.action.is_standard_backlog_in_progress():
sickgear.backlog_search_scheduler.force_search(force_type=FORCED_BACKLOG)
sickgear.search_backlog_scheduler.force_search(force_type=FORCED_BACKLOG)
logger.log('Backlog search forced')
ui.notifications.message('Backlog search started')
@ -7172,7 +7190,7 @@ class ManageSearch(Manage):
# force it to run the next time it looks
if not sickgear.search_queue_scheduler.action.is_recentsearch_in_progress():
result = sickgear.recent_search_scheduler.force_run()
result = sickgear.search_recent_scheduler.force_run()
if result:
logger.log('Recent search forced')
ui.notifications.message('Recent search started')
@ -7183,7 +7201,7 @@ class ManageSearch(Manage):
def force_find_propers(self):
# force it to run the next time it looks
result = sickgear.proper_finder_scheduler.force_run()
result = sickgear.search_propers_scheduler.force_run()
if result:
logger.log('Find propers search forced')
ui.notifications.message('Find propers search started')
@ -7207,10 +7225,10 @@ class ShowTasks(Manage):
t = PageTemplate(web_handler=self, file='manage_showProcesses.tmpl')
t.queue_length = sickgear.show_queue_scheduler.action.queue_length()
t.people_queue = sickgear.people_queue_scheduler.action.queue_data()
t.next_run = sickgear.show_update_scheduler.last_run.replace(
hour=sickgear.show_update_scheduler.start_time.hour)
t.next_run = sickgear.update_show_scheduler.last_run.replace(
hour=sickgear.update_show_scheduler.start_time.hour)
t.show_update_running = sickgear.show_queue_scheduler.action.is_show_update_running() \
or sickgear.show_update_scheduler.action.amActive
or sickgear.update_show_scheduler.is_running_job
my_db = db.DBConnection(row_type='dict')
sql_result = my_db.select('SELECT n.indexer || ? || n.indexer_id AS tvid_prodid,'
@ -7293,7 +7311,7 @@ class ShowTasks(Manage):
def force_show_update(self):
result = sickgear.show_update_scheduler.force_run()
result = sickgear.update_show_scheduler.force_run()
if result:
logger.log('Show Update forced')
ui.notifications.message('Forced Show Update started')
@ -7982,7 +8000,7 @@ class ConfigGeneral(Config):
def update_alt():
""" Load scene exceptions """
changed_exceptions, cnt_updated_numbers, min_remain_iv = scene_exceptions.retrieve_exceptions()
changed_exceptions, cnt_updated_numbers, min_remain_iv = scene_exceptions.ReleaseMap().fetch_exceptions()
return json_dumps(dict(names=int(changed_exceptions), numbers=cnt_updated_numbers, min_remain_iv=min_remain_iv))
@ -7991,7 +8009,7 @@ class ConfigGeneral(Config):
""" Return alternative release names and numbering as json text"""
# alternative release names and numbers
alt_names = scene_exceptions.get_all_scene_exceptions(tvid_prodid)
alt_names = scene_exceptions.ReleaseMap().get_show_exceptions(tvid_prodid)
alt_numbers = get_scene_numbering_for_show(*TVidProdid(tvid_prodid).tuple) # arbitrary order
ui_output = 'No alternative names or numbers to export'
@ -8180,8 +8198,8 @@ class ConfigGeneral(Config):
sickgear.UPDATE_SHOWS_ON_START = config.checkbox_to_value(update_shows_on_start)
sickgear.SHOW_UPDATE_HOUR = config.minimax(show_update_hour, 3, 0, 23)
try:
with sickgear.show_update_scheduler.lock:
sickgear.show_update_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR)
with sickgear.update_show_scheduler.lock:
sickgear.update_show_scheduler.start_time = datetime.time(hour=sickgear.SHOW_UPDATE_HOUR)
except (BaseException, Exception) as e:
logger.error('Could not change Show Update Scheduler time: %s' % ex(e))
sickgear.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show)

View file

@ -74,14 +74,15 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
sickgear.showList.append(s)
sickgear.showDict[s.sid_int] = s
sickgear.webserve.Home.make_showlist_unique_names()
scene_exceptions.retrieve_exceptions()
scene_exceptions.ReleaseMap().fetch_exceptions()
name_cache.build_name_cache()
def test_sceneExceptionsEmpty(self):
self.assertEqual(scene_exceptions.get_scene_exceptions(0, 0), [])
self.assertEqual(scene_exceptions.ReleaseMap().get_alt_names(0, 0), [])
def test_sceneExceptionsBlack_Lagoon(self):
self.assertEqual(sorted(scene_exceptions.get_scene_exceptions(1, 79604)), ['Black-Lagoon'])
self.assertEqual(sorted(
scene_exceptions.ReleaseMap().get_alt_names(1, 79604)), ['Black-Lagoon'])
def test_sceneExceptionByName(self):
self.assertEqual(scene_exceptions.get_scene_exception_by_name(
@ -98,14 +99,18 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
s.anime = 1
sickgear.showList.append(s)
sickgear.showDict[s.sid_int] = s
scene_exceptions.retrieve_exceptions()
scene_exceptions.ReleaseMap().fetch_exceptions()
name_cache.build_name_cache()
self.assertEqual(scene_exceptions.get_scene_exception_by_name('ブラック・ラグーン'), [1, 79604, -1])
self.assertEqual(scene_exceptions.get_scene_exception_by_name('Burakku Ragūn'), [1, 79604, -1])
self.assertEqual(scene_exceptions.get_scene_exception_by_name('Rokka no Yuusha'), [1, 295243, -1])
self.assertEqual(scene_exceptions.get_scene_exception_by_name(
'ブラック・ラグーン'), [1, 79604, -1])
self.assertEqual(scene_exceptions.get_scene_exception_by_name(
'Burakku Ragūn'), [1, 79604, -1])
self.assertEqual(scene_exceptions.get_scene_exception_by_name(
'Rokka no Yuusha'), [1, 295243, -1])
def test_sceneExceptionByNameEmpty(self):
self.assertEqual(scene_exceptions.get_scene_exception_by_name('nothing useful'), [None, None, None])
self.assertEqual(scene_exceptions.get_scene_exception_by_name(
'nothing useful'), [None, None, None])
def test_sceneExceptionsResetNameCache(self):
# clear the exceptions
@ -117,7 +122,7 @@ class SceneExceptionTestCase(test.SickbeardTestDBCase):
name_cache.add_name_to_cache('Cached Name', prodid=0)
# updating should not clear the cache this time since our exceptions didn't change
scene_exceptions.retrieve_exceptions()
scene_exceptions.ReleaseMap().fetch_exceptions()
self.assertEqual(name_cache.retrieve_name_from_cache('Cached Name'), (0, 0))

View file

@ -180,7 +180,7 @@ class WebAPICase(test.SickbeardTestDBCase):
search_queue.SearchQueue(),
cycle_time=datetime.timedelta(seconds=3),
thread_name='SEARCHQUEUE')
sickgear.backlog_search_scheduler = search_backlog.BacklogSearchScheduler(
sickgear.search_backlog_scheduler = search_backlog.BacklogSearchScheduler(
search_backlog.BacklogSearcher(),
cycle_time=datetime.timedelta(minutes=60),
run_delay=datetime.timedelta(minutes=60),